repo
stringlengths
1
191
file
stringlengths
23
351
code
stringlengths
0
5.32M
file_length
int64
0
5.32M
avg_line_length
float64
0
2.9k
max_line_length
int64
0
288k
extension_type
stringclasses
1 value
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSFailedAppMaster.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.applications.distributedshell; import java.io.IOException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.yarn.exceptions.YarnException; public class TestDSFailedAppMaster extends ApplicationMaster { private static final Log LOG = LogFactory.getLog(TestDSFailedAppMaster.class); @Override public void run() throws YarnException, IOException, InterruptedException { super.run(); // for the 2nd attempt. if (appAttemptID.getAttemptId() == 2) { // should reuse the earlier running container, so numAllocatedContainers // should be set to 1. And should ask no more containers, so // numRequestedContainers should be the same as numTotalContainers. // The only container is the container requested by the AM in the first // attempt. if (numAllocatedContainers.get() != 1 || numRequestedContainers.get() != numTotalContainers) { LOG.info("NumAllocatedContainers is " + numAllocatedContainers.get() + " and NumRequestedContainers is " + numAllocatedContainers.get() + ".Application Master failed. exiting"); System.exit(200); } } } public static void main(String[] args) { boolean result = false; try { TestDSFailedAppMaster appMaster = new TestDSFailedAppMaster(); boolean doRun = appMaster.init(args); if (!doRun) { System.exit(0); } appMaster.run(); if (appMaster.appAttemptID.getAttemptId() == 1) { try { // sleep some time, wait for the AM to launch a container. Thread.sleep(3000); } catch (InterruptedException e) {} // fail the first am. System.exit(100); } result = appMaster.finish(); } catch (Throwable t) { System.exit(1); } if (result) { LOG.info("Application Master completed successfully. exiting"); System.exit(0); } else { LOG.info("Application Master failed. exiting"); System.exit(2); } } }
2,898
34.353659
80
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Log4jPropertyHelper.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.applications.distributedshell; import java.io.FileInputStream; import java.io.InputStream; import java.util.Map.Entry; import java.util.Properties; import org.apache.commons.io.IOUtils; import org.apache.log4j.LogManager; import org.apache.log4j.PropertyConfigurator; public class Log4jPropertyHelper { public static void updateLog4jConfiguration(Class<?> targetClass, String log4jPath) throws Exception { Properties customProperties = new Properties(); FileInputStream fs = null; InputStream is = null; try { fs = new FileInputStream(log4jPath); is = targetClass.getResourceAsStream("/log4j.properties"); customProperties.load(fs); Properties originalProperties = new Properties(); originalProperties.load(is); for (Entry<Object, Object> entry : customProperties.entrySet()) { originalProperties.setProperty(entry.getKey().toString(), entry .getValue().toString()); } LogManager.resetConfiguration(); PropertyConfigurator.configure(originalProperties); }finally { IOUtils.closeQuietly(is); IOUtils.closeQuietly(fs); } } }
1,987
34.5
75
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.applications.distributedshell; import java.io.IOException; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Vector; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.GnuParser; import org.apache.commons.cli.HelpFormatter; import org.apache.commons.cli.Option; import org.apache.commons.cli.Options; import org.apache.commons.cli.ParseException; import org.apache.commons.io.IOUtils; import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.yarn.api.ApplicationClientProtocol; import org.apache.hadoop.yarn.api.ApplicationConstants; import org.apache.hadoop.yarn.api.ApplicationConstants.Environment; import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse; import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationReport; import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.api.records.LocalResource; import org.apache.hadoop.yarn.api.records.LocalResourceType; import org.apache.hadoop.yarn.api.records.LocalResourceVisibility; import org.apache.hadoop.yarn.api.records.NodeReport; import org.apache.hadoop.yarn.api.records.NodeState; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.QueueACL; import org.apache.hadoop.yarn.api.records.QueueInfo; import org.apache.hadoop.yarn.api.records.QueueUserACLInfo; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.api.records.YarnClusterMetrics; import org.apache.hadoop.yarn.api.records.timeline.TimelineDomain; import org.apache.hadoop.yarn.client.api.TimelineClient; import org.apache.hadoop.yarn.client.api.YarnClient; import org.apache.hadoop.yarn.client.api.YarnClientApplication; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.util.ConverterUtils; import org.apache.hadoop.yarn.util.timeline.TimelineUtils; /** * Client for Distributed Shell application submission to YARN. * * <p> The distributed shell client allows an application master to be launched that in turn would run * the provided shell command on a set of containers. </p> * * <p>This client is meant to act as an example on how to write yarn-based applications. </p> * * <p> To submit an application, a client first needs to connect to the <code>ResourceManager</code> * aka ApplicationsManager or ASM via the {@link ApplicationClientProtocol}. The {@link ApplicationClientProtocol} * provides a way for the client to get access to cluster information and to request for a * new {@link ApplicationId}. <p> * * <p> For the actual job submission, the client first has to create an {@link ApplicationSubmissionContext}. * The {@link ApplicationSubmissionContext} defines the application details such as {@link ApplicationId} * and application name, the priority assigned to the application and the queue * to which this application needs to be assigned. In addition to this, the {@link ApplicationSubmissionContext} * also defines the {@link ContainerLaunchContext} which describes the <code>Container</code> with which * the {@link ApplicationMaster} is launched. </p> * * <p> The {@link ContainerLaunchContext} in this scenario defines the resources to be allocated for the * {@link ApplicationMaster}'s container, the local resources (jars, configuration files) to be made available * and the environment to be set for the {@link ApplicationMaster} and the commands to be executed to run the * {@link ApplicationMaster}. <p> * * <p> Using the {@link ApplicationSubmissionContext}, the client submits the application to the * <code>ResourceManager</code> and then monitors the application by requesting the <code>ResourceManager</code> * for an {@link ApplicationReport} at regular time intervals. In case of the application taking too long, the client * kills the application by submitting a {@link KillApplicationRequest} to the <code>ResourceManager</code>. </p> * */ @InterfaceAudience.Public @InterfaceStability.Unstable public class Client { private static final Log LOG = LogFactory.getLog(Client.class); // Configuration private Configuration conf; private YarnClient yarnClient; // Application master specific info to register a new Application with RM/ASM private String appName = ""; // App master priority private int amPriority = 0; // Queue for App master private String amQueue = ""; // Amt. of memory resource to request for to run the App Master private int amMemory = 10; // Amt. of virtual core resource to request for to run the App Master private int amVCores = 1; // Application master jar file private String appMasterJar = ""; // Main class to invoke application master private final String appMasterMainClass; // Shell command to be executed private String shellCommand = ""; // Location of shell script private String shellScriptPath = ""; // Args to be passed to the shell command private String[] shellArgs = new String[] {}; // Env variables to be setup for the shell command private Map<String, String> shellEnv = new HashMap<String, String>(); // Shell Command Container priority private int shellCmdPriority = 0; // Amt of memory to request for container in which shell script will be executed private int containerMemory = 10; // Amt. of virtual cores to request for container in which shell script will be executed private int containerVirtualCores = 1; // No. of containers in which the shell script needs to be executed private int numContainers = 1; private String nodeLabelExpression = null; // log4j.properties file // if available, add to local resources and set into classpath private String log4jPropFile = ""; // Start time for client private final long clientStartTime = System.currentTimeMillis(); // Timeout threshold for client. Kill app after time interval expires. private long clientTimeout = 600000; // flag to indicate whether to keep containers across application attempts. private boolean keepContainers = false; private long attemptFailuresValidityInterval = -1; // Debug flag boolean debugFlag = false; // Timeline domain ID private String domainId = null; // Flag to indicate whether to create the domain of the given ID private boolean toCreateDomain = false; // Timeline domain reader access control private String viewACLs = null; // Timeline domain writer access control private String modifyACLs = null; // Command line options private Options opts; private static final String shellCommandPath = "shellCommands"; private static final String shellArgsPath = "shellArgs"; private static final String appMasterJarPath = "AppMaster.jar"; // Hardcoded path to custom log_properties private static final String log4jPath = "log4j.properties"; public static final String SCRIPT_PATH = "ExecScript"; /** * @param args Command line arguments */ public static void main(String[] args) { boolean result = false; try { Client client = new Client(); LOG.info("Initializing Client"); try { boolean doRun = client.init(args); if (!doRun) { System.exit(0); } } catch (IllegalArgumentException e) { System.err.println(e.getLocalizedMessage()); client.printUsage(); System.exit(-1); } result = client.run(); } catch (Throwable t) { LOG.fatal("Error running Client", t); System.exit(1); } if (result) { LOG.info("Application completed successfully"); System.exit(0); } LOG.error("Application failed to complete successfully"); System.exit(2); } /** */ public Client(Configuration conf) throws Exception { this( "org.apache.hadoop.yarn.applications.distributedshell.ApplicationMaster", conf); } Client(String appMasterMainClass, Configuration conf) { this.conf = conf; this.appMasterMainClass = appMasterMainClass; yarnClient = YarnClient.createYarnClient(); yarnClient.init(conf); opts = new Options(); opts.addOption("appname", true, "Application Name. Default value - DistributedShell"); opts.addOption("priority", true, "Application Priority. Default 0"); opts.addOption("queue", true, "RM Queue in which this application is to be submitted"); opts.addOption("timeout", true, "Application timeout in milliseconds"); opts.addOption("master_memory", true, "Amount of memory in MB to be requested to run the application master"); opts.addOption("master_vcores", true, "Amount of virtual cores to be requested to run the application master"); opts.addOption("jar", true, "Jar file containing the application master"); opts.addOption("shell_command", true, "Shell command to be executed by " + "the Application Master. Can only specify either --shell_command " + "or --shell_script"); opts.addOption("shell_script", true, "Location of the shell script to be " + "executed. Can only specify either --shell_command or --shell_script"); opts.addOption("shell_args", true, "Command line args for the shell script." + "Multiple args can be separated by empty space."); opts.getOption("shell_args").setArgs(Option.UNLIMITED_VALUES); opts.addOption("shell_env", true, "Environment for shell script. Specified as env_key=env_val pairs"); opts.addOption("shell_cmd_priority", true, "Priority for the shell command containers"); opts.addOption("container_memory", true, "Amount of memory in MB to be requested to run the shell command"); opts.addOption("container_vcores", true, "Amount of virtual cores to be requested to run the shell command"); opts.addOption("num_containers", true, "No. of containers on which the shell command needs to be executed"); opts.addOption("log_properties", true, "log4j.properties file"); opts.addOption("keep_containers_across_application_attempts", false, "Flag to indicate whether to keep containers across application attempts." + " If the flag is true, running containers will not be killed when" + " application attempt fails and these containers will be retrieved by" + " the new application attempt "); opts.addOption("attempt_failures_validity_interval", true, "when attempt_failures_validity_interval in milliseconds is set to > 0," + "the failure number will not take failures which happen out of " + "the validityInterval into failure count. " + "If failure count reaches to maxAppAttempts, " + "the application will be failed."); opts.addOption("debug", false, "Dump out debug information"); opts.addOption("domain", true, "ID of the timeline domain where the " + "timeline entities will be put"); opts.addOption("view_acls", true, "Users and groups that allowed to " + "view the timeline entities in the given domain"); opts.addOption("modify_acls", true, "Users and groups that allowed to " + "modify the timeline entities in the given domain"); opts.addOption("create", false, "Flag to indicate whether to create the " + "domain specified with -domain."); opts.addOption("help", false, "Print usage"); opts.addOption("node_label_expression", true, "Node label expression to determine the nodes" + " where all the containers of this application" + " will be allocated, \"\" means containers" + " can be allocated anywhere, if you don't specify the option," + " default node_label_expression of queue will be used."); } /** */ public Client() throws Exception { this(new YarnConfiguration()); } /** * Helper function to print out usage */ private void printUsage() { new HelpFormatter().printHelp("Client", opts); } /** * Parse command line options * @param args Parsed command line options * @return Whether the init was successful to run the client * @throws ParseException */ public boolean init(String[] args) throws ParseException { CommandLine cliParser = new GnuParser().parse(opts, args); if (args.length == 0) { throw new IllegalArgumentException("No args specified for client to initialize"); } if (cliParser.hasOption("log_properties")) { String log4jPath = cliParser.getOptionValue("log_properties"); try { Log4jPropertyHelper.updateLog4jConfiguration(Client.class, log4jPath); } catch (Exception e) { LOG.warn("Can not set up custom log4j properties. " + e); } } if (cliParser.hasOption("help")) { printUsage(); return false; } if (cliParser.hasOption("debug")) { debugFlag = true; } if (cliParser.hasOption("keep_containers_across_application_attempts")) { LOG.info("keep_containers_across_application_attempts"); keepContainers = true; } appName = cliParser.getOptionValue("appname", "DistributedShell"); amPriority = Integer.parseInt(cliParser.getOptionValue("priority", "0")); amQueue = cliParser.getOptionValue("queue", "default"); amMemory = Integer.parseInt(cliParser.getOptionValue("master_memory", "10")); amVCores = Integer.parseInt(cliParser.getOptionValue("master_vcores", "1")); if (amMemory < 0) { throw new IllegalArgumentException("Invalid memory specified for application master, exiting." + " Specified memory=" + amMemory); } if (amVCores < 0) { throw new IllegalArgumentException("Invalid virtual cores specified for application master, exiting." + " Specified virtual cores=" + amVCores); } if (!cliParser.hasOption("jar")) { throw new IllegalArgumentException("No jar file specified for application master"); } appMasterJar = cliParser.getOptionValue("jar"); if (!cliParser.hasOption("shell_command") && !cliParser.hasOption("shell_script")) { throw new IllegalArgumentException( "No shell command or shell script specified to be executed by application master"); } else if (cliParser.hasOption("shell_command") && cliParser.hasOption("shell_script")) { throw new IllegalArgumentException("Can not specify shell_command option " + "and shell_script option at the same time"); } else if (cliParser.hasOption("shell_command")) { shellCommand = cliParser.getOptionValue("shell_command"); } else { shellScriptPath = cliParser.getOptionValue("shell_script"); } if (cliParser.hasOption("shell_args")) { shellArgs = cliParser.getOptionValues("shell_args"); } if (cliParser.hasOption("shell_env")) { String envs[] = cliParser.getOptionValues("shell_env"); for (String env : envs) { env = env.trim(); int index = env.indexOf('='); if (index == -1) { shellEnv.put(env, ""); continue; } String key = env.substring(0, index); String val = ""; if (index < (env.length()-1)) { val = env.substring(index+1); } shellEnv.put(key, val); } } shellCmdPriority = Integer.parseInt(cliParser.getOptionValue("shell_cmd_priority", "0")); containerMemory = Integer.parseInt(cliParser.getOptionValue("container_memory", "10")); containerVirtualCores = Integer.parseInt(cliParser.getOptionValue("container_vcores", "1")); numContainers = Integer.parseInt(cliParser.getOptionValue("num_containers", "1")); if (containerMemory < 0 || containerVirtualCores < 0 || numContainers < 1) { throw new IllegalArgumentException("Invalid no. of containers or container memory/vcores specified," + " exiting." + " Specified containerMemory=" + containerMemory + ", containerVirtualCores=" + containerVirtualCores + ", numContainer=" + numContainers); } nodeLabelExpression = cliParser.getOptionValue("node_label_expression", null); clientTimeout = Integer.parseInt(cliParser.getOptionValue("timeout", "600000")); attemptFailuresValidityInterval = Long.parseLong(cliParser.getOptionValue( "attempt_failures_validity_interval", "-1")); log4jPropFile = cliParser.getOptionValue("log_properties", ""); // Get timeline domain options if (cliParser.hasOption("domain")) { domainId = cliParser.getOptionValue("domain"); toCreateDomain = cliParser.hasOption("create"); if (cliParser.hasOption("view_acls")) { viewACLs = cliParser.getOptionValue("view_acls"); } if (cliParser.hasOption("modify_acls")) { modifyACLs = cliParser.getOptionValue("modify_acls"); } } return true; } /** * Main run function for the client * @return true if application completed successfully * @throws IOException * @throws YarnException */ public boolean run() throws IOException, YarnException { LOG.info("Running Client"); yarnClient.start(); YarnClusterMetrics clusterMetrics = yarnClient.getYarnClusterMetrics(); LOG.info("Got Cluster metric info from ASM" + ", numNodeManagers=" + clusterMetrics.getNumNodeManagers()); List<NodeReport> clusterNodeReports = yarnClient.getNodeReports( NodeState.RUNNING); LOG.info("Got Cluster node info from ASM"); for (NodeReport node : clusterNodeReports) { LOG.info("Got node report from ASM for" + ", nodeId=" + node.getNodeId() + ", nodeAddress" + node.getHttpAddress() + ", nodeRackName" + node.getRackName() + ", nodeNumContainers" + node.getNumContainers()); } QueueInfo queueInfo = yarnClient.getQueueInfo(this.amQueue); LOG.info("Queue info" + ", queueName=" + queueInfo.getQueueName() + ", queueCurrentCapacity=" + queueInfo.getCurrentCapacity() + ", queueMaxCapacity=" + queueInfo.getMaximumCapacity() + ", queueApplicationCount=" + queueInfo.getApplications().size() + ", queueChildQueueCount=" + queueInfo.getChildQueues().size()); List<QueueUserACLInfo> listAclInfo = yarnClient.getQueueAclsInfo(); for (QueueUserACLInfo aclInfo : listAclInfo) { for (QueueACL userAcl : aclInfo.getUserAcls()) { LOG.info("User ACL Info for Queue" + ", queueName=" + aclInfo.getQueueName() + ", userAcl=" + userAcl.name()); } } if (domainId != null && domainId.length() > 0 && toCreateDomain) { prepareTimelineDomain(); } // Get a new application id YarnClientApplication app = yarnClient.createApplication(); GetNewApplicationResponse appResponse = app.getNewApplicationResponse(); // TODO get min/max resource capabilities from RM and change memory ask if needed // If we do not have min/max, we may not be able to correctly request // the required resources from the RM for the app master // Memory ask has to be a multiple of min and less than max. // Dump out information about cluster capability as seen by the resource manager int maxMem = appResponse.getMaximumResourceCapability().getMemory(); LOG.info("Max mem capability of resources in this cluster " + maxMem); // A resource ask cannot exceed the max. if (amMemory > maxMem) { LOG.info("AM memory specified above max threshold of cluster. Using max value." + ", specified=" + amMemory + ", max=" + maxMem); amMemory = maxMem; } int maxVCores = appResponse.getMaximumResourceCapability().getVirtualCores(); LOG.info("Max virtual cores capability of resources in this cluster " + maxVCores); if (amVCores > maxVCores) { LOG.info("AM virtual cores specified above max threshold of cluster. " + "Using max value." + ", specified=" + amVCores + ", max=" + maxVCores); amVCores = maxVCores; } // set the application name ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext(); ApplicationId appId = appContext.getApplicationId(); appContext.setKeepContainersAcrossApplicationAttempts(keepContainers); appContext.setApplicationName(appName); if (attemptFailuresValidityInterval >= 0) { appContext .setAttemptFailuresValidityInterval(attemptFailuresValidityInterval); } // set local resources for the application master // local files or archives as needed // In this scenario, the jar file for the application master is part of the local resources Map<String, LocalResource> localResources = new HashMap<String, LocalResource>(); LOG.info("Copy App Master jar from local filesystem and add to local environment"); // Copy the application master jar to the filesystem // Create a local resource to point to the destination jar path FileSystem fs = FileSystem.get(conf); addToLocalResources(fs, appMasterJar, appMasterJarPath, appId.toString(), localResources, null); // Set the log4j properties if needed if (!log4jPropFile.isEmpty()) { addToLocalResources(fs, log4jPropFile, log4jPath, appId.toString(), localResources, null); } // The shell script has to be made available on the final container(s) // where it will be executed. // To do this, we need to first copy into the filesystem that is visible // to the yarn framework. // We do not need to set this as a local resource for the application // master as the application master does not need it. String hdfsShellScriptLocation = ""; long hdfsShellScriptLen = 0; long hdfsShellScriptTimestamp = 0; if (!shellScriptPath.isEmpty()) { Path shellSrc = new Path(shellScriptPath); String shellPathSuffix = appName + "/" + appId.toString() + "/" + SCRIPT_PATH; Path shellDst = new Path(fs.getHomeDirectory(), shellPathSuffix); fs.copyFromLocalFile(false, true, shellSrc, shellDst); hdfsShellScriptLocation = shellDst.toUri().toString(); FileStatus shellFileStatus = fs.getFileStatus(shellDst); hdfsShellScriptLen = shellFileStatus.getLen(); hdfsShellScriptTimestamp = shellFileStatus.getModificationTime(); } if (!shellCommand.isEmpty()) { addToLocalResources(fs, null, shellCommandPath, appId.toString(), localResources, shellCommand); } if (shellArgs.length > 0) { addToLocalResources(fs, null, shellArgsPath, appId.toString(), localResources, StringUtils.join(shellArgs, " ")); } // Set the necessary security tokens as needed //amContainer.setContainerTokens(containerToken); // Set the env variables to be setup in the env where the application master will be run LOG.info("Set the environment for the application master"); Map<String, String> env = new HashMap<String, String>(); // put location of shell script into env // using the env info, the application master will create the correct local resource for the // eventual containers that will be launched to execute the shell scripts env.put(DSConstants.DISTRIBUTEDSHELLSCRIPTLOCATION, hdfsShellScriptLocation); env.put(DSConstants.DISTRIBUTEDSHELLSCRIPTTIMESTAMP, Long.toString(hdfsShellScriptTimestamp)); env.put(DSConstants.DISTRIBUTEDSHELLSCRIPTLEN, Long.toString(hdfsShellScriptLen)); if (domainId != null && domainId.length() > 0) { env.put(DSConstants.DISTRIBUTEDSHELLTIMELINEDOMAIN, domainId); } // Add AppMaster.jar location to classpath // At some point we should not be required to add // the hadoop specific classpaths to the env. // It should be provided out of the box. // For now setting all required classpaths including // the classpath to "." for the application jar StringBuilder classPathEnv = new StringBuilder(Environment.CLASSPATH.$$()) .append(ApplicationConstants.CLASS_PATH_SEPARATOR).append("./*"); for (String c : conf.getStrings( YarnConfiguration.YARN_APPLICATION_CLASSPATH, YarnConfiguration.DEFAULT_YARN_CROSS_PLATFORM_APPLICATION_CLASSPATH)) { classPathEnv.append(ApplicationConstants.CLASS_PATH_SEPARATOR); classPathEnv.append(c.trim()); } classPathEnv.append(ApplicationConstants.CLASS_PATH_SEPARATOR).append( "./log4j.properties"); // add the runtime classpath needed for tests to work if (conf.getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) { classPathEnv.append(':'); classPathEnv.append(System.getProperty("java.class.path")); } env.put("CLASSPATH", classPathEnv.toString()); // Set the necessary command to execute the application master Vector<CharSequence> vargs = new Vector<CharSequence>(30); // Set java executable command LOG.info("Setting up app master command"); vargs.add(Environment.JAVA_HOME.$$() + "/bin/java"); // Set Xmx based on am memory size vargs.add("-Xmx" + amMemory + "m"); // Set class name vargs.add(appMasterMainClass); // Set params for Application Master vargs.add("--container_memory " + String.valueOf(containerMemory)); vargs.add("--container_vcores " + String.valueOf(containerVirtualCores)); vargs.add("--num_containers " + String.valueOf(numContainers)); if (null != nodeLabelExpression) { appContext.setNodeLabelExpression(nodeLabelExpression); } vargs.add("--priority " + String.valueOf(shellCmdPriority)); for (Map.Entry<String, String> entry : shellEnv.entrySet()) { vargs.add("--shell_env " + entry.getKey() + "=" + entry.getValue()); } if (debugFlag) { vargs.add("--debug"); } vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stdout"); vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stderr"); // Get final commmand StringBuilder command = new StringBuilder(); for (CharSequence str : vargs) { command.append(str).append(" "); } LOG.info("Completed setting up app master command " + command.toString()); List<String> commands = new ArrayList<String>(); commands.add(command.toString()); // Set up the container launch context for the application master ContainerLaunchContext amContainer = ContainerLaunchContext.newInstance( localResources, env, commands, null, null, null); // Set up resource type requirements // For now, both memory and vcores are supported, so we set memory and // vcores requirements Resource capability = Resource.newInstance(amMemory, amVCores); appContext.setResource(capability); // Service data is a binary blob that can be passed to the application // Not needed in this scenario // amContainer.setServiceData(serviceData); // Setup security tokens if (UserGroupInformation.isSecurityEnabled()) { // Note: Credentials class is marked as LimitedPrivate for HDFS and MapReduce Credentials credentials = new Credentials(); String tokenRenewer = conf.get(YarnConfiguration.RM_PRINCIPAL); if (tokenRenewer == null || tokenRenewer.length() == 0) { throw new IOException( "Can't get Master Kerberos principal for the RM to use as renewer"); } // For now, only getting tokens for the default file-system. final Token<?> tokens[] = fs.addDelegationTokens(tokenRenewer, credentials); if (tokens != null) { for (Token<?> token : tokens) { LOG.info("Got dt for " + fs.getUri() + "; " + token); } } DataOutputBuffer dob = new DataOutputBuffer(); credentials.writeTokenStorageToStream(dob); ByteBuffer fsTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength()); amContainer.setTokens(fsTokens); } appContext.setAMContainerSpec(amContainer); // Set the priority for the application master // TODO - what is the range for priority? how to decide? Priority pri = Priority.newInstance(amPriority); appContext.setPriority(pri); // Set the queue to which this application is to be submitted in the RM appContext.setQueue(amQueue); // Submit the application to the applications manager // SubmitApplicationResponse submitResp = applicationsManager.submitApplication(appRequest); // Ignore the response as either a valid response object is returned on success // or an exception thrown to denote some form of a failure LOG.info("Submitting application to ASM"); yarnClient.submitApplication(appContext); // TODO // Try submitting the same request again // app submission failure? // Monitor the application return monitorApplication(appId); } /** * Monitor the submitted application for completion. * Kill application if time expires. * @param appId Application Id of application to be monitored * @return true if application completed successfully * @throws YarnException * @throws IOException */ private boolean monitorApplication(ApplicationId appId) throws YarnException, IOException { while (true) { // Check app status every 1 second. try { Thread.sleep(1000); } catch (InterruptedException e) { LOG.debug("Thread sleep in monitoring loop interrupted"); } // Get application report for the appId we are interested in ApplicationReport report = yarnClient.getApplicationReport(appId); LOG.info("Got application report from ASM for" + ", appId=" + appId.getId() + ", clientToAMToken=" + report.getClientToAMToken() + ", appDiagnostics=" + report.getDiagnostics() + ", appMasterHost=" + report.getHost() + ", appQueue=" + report.getQueue() + ", appMasterRpcPort=" + report.getRpcPort() + ", appStartTime=" + report.getStartTime() + ", yarnAppState=" + report.getYarnApplicationState().toString() + ", distributedFinalState=" + report.getFinalApplicationStatus().toString() + ", appTrackingUrl=" + report.getTrackingUrl() + ", appUser=" + report.getUser()); YarnApplicationState state = report.getYarnApplicationState(); FinalApplicationStatus dsStatus = report.getFinalApplicationStatus(); if (YarnApplicationState.FINISHED == state) { if (FinalApplicationStatus.SUCCEEDED == dsStatus) { LOG.info("Application has completed successfully. Breaking monitoring loop"); return true; } else { LOG.info("Application did finished unsuccessfully." + " YarnState=" + state.toString() + ", DSFinalStatus=" + dsStatus.toString() + ". Breaking monitoring loop"); return false; } } else if (YarnApplicationState.KILLED == state || YarnApplicationState.FAILED == state) { LOG.info("Application did not finish." + " YarnState=" + state.toString() + ", DSFinalStatus=" + dsStatus.toString() + ". Breaking monitoring loop"); return false; } if (System.currentTimeMillis() > (clientStartTime + clientTimeout)) { LOG.info("Reached client specified timeout for application. Killing application"); forceKillApplication(appId); return false; } } } /** * Kill a submitted application by sending a call to the ASM * @param appId Application Id to be killed. * @throws YarnException * @throws IOException */ private void forceKillApplication(ApplicationId appId) throws YarnException, IOException { // TODO clarify whether multiple jobs with the same app id can be submitted and be running at // the same time. // If yes, can we kill a particular attempt only? // Response can be ignored as it is non-null on success or // throws an exception in case of failures yarnClient.killApplication(appId); } private void addToLocalResources(FileSystem fs, String fileSrcPath, String fileDstPath, String appId, Map<String, LocalResource> localResources, String resources) throws IOException { String suffix = appName + "/" + appId + "/" + fileDstPath; Path dst = new Path(fs.getHomeDirectory(), suffix); if (fileSrcPath == null) { FSDataOutputStream ostream = null; try { ostream = FileSystem .create(fs, dst, new FsPermission((short) 0710)); ostream.writeUTF(resources); } finally { IOUtils.closeQuietly(ostream); } } else { fs.copyFromLocalFile(new Path(fileSrcPath), dst); } FileStatus scFileStatus = fs.getFileStatus(dst); LocalResource scRsrc = LocalResource.newInstance( ConverterUtils.getYarnUrlFromURI(dst.toUri()), LocalResourceType.FILE, LocalResourceVisibility.APPLICATION, scFileStatus.getLen(), scFileStatus.getModificationTime()); localResources.put(fileDstPath, scRsrc); } private void prepareTimelineDomain() { TimelineClient timelineClient = null; if (conf.getBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, YarnConfiguration.DEFAULT_TIMELINE_SERVICE_ENABLED)) { timelineClient = TimelineClient.createTimelineClient(); timelineClient.init(conf); timelineClient.start(); } else { LOG.warn("Cannot put the domain " + domainId + " because the timeline service is not enabled"); return; } try { //TODO: we need to check and combine the existing timeline domain ACLs, //but let's do it once we have client java library to query domains. TimelineDomain domain = new TimelineDomain(); domain.setId(domainId); domain.setReaders( viewACLs != null && viewACLs.length() > 0 ? viewACLs : " "); domain.setWriters( modifyACLs != null && modifyACLs.length() > 0 ? modifyACLs : " "); timelineClient.putDomain(domain); LOG.info("Put the timeline domain: " + TimelineUtils.dumpTimelineRecordtoJSON(domain)); } catch (Exception e) { LOG.error("Error when putting the timeline domain", e); } finally { timelineClient.stop(); } } }
36,296
41.107889
118
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.applications.distributedshell; import java.io.BufferedReader; import java.io.DataInputStream; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.io.StringReader; import java.lang.reflect.UndeclaredThrowableException; import java.net.URI; import java.net.URISyntaxException; import java.nio.ByteBuffer; import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; import java.util.Vector; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.atomic.AtomicInteger; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.GnuParser; import org.apache.commons.cli.HelpFormatter; import org.apache.commons.cli.Options; import org.apache.commons.cli.ParseException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.ExitUtil; import org.apache.hadoop.util.Shell; import org.apache.hadoop.yarn.api.ApplicationConstants; import org.apache.hadoop.yarn.api.ApplicationConstants.Environment; import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; import org.apache.hadoop.yarn.api.ContainerManagementProtocol; import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest; import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse; import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerExitStatus; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; import org.apache.hadoop.yarn.api.records.ContainerState; import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.api.records.LocalResource; import org.apache.hadoop.yarn.api.records.LocalResourceType; import org.apache.hadoop.yarn.api.records.LocalResourceVisibility; import org.apache.hadoop.yarn.api.records.NodeReport; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceRequest; import org.apache.hadoop.yarn.api.records.URL; import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity; import org.apache.hadoop.yarn.api.records.timeline.TimelineEvent; import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse; import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest; import org.apache.hadoop.yarn.client.api.TimelineClient; import org.apache.hadoop.yarn.client.api.async.AMRMClientAsync; import org.apache.hadoop.yarn.client.api.async.NMClientAsync; import org.apache.hadoop.yarn.client.api.async.impl.NMClientAsyncImpl; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.security.AMRMTokenIdentifier; import org.apache.hadoop.yarn.util.ConverterUtils; import org.apache.log4j.LogManager; import com.google.common.annotations.VisibleForTesting; /** * An ApplicationMaster for executing shell commands on a set of launched * containers using the YARN framework. * * <p> * This class is meant to act as an example on how to write yarn-based * application masters. * </p> * * <p> * The ApplicationMaster is started on a container by the * <code>ResourceManager</code>'s launcher. The first thing that the * <code>ApplicationMaster</code> needs to do is to connect and register itself * with the <code>ResourceManager</code>. The registration sets up information * within the <code>ResourceManager</code> regarding what host:port the * ApplicationMaster is listening on to provide any form of functionality to a * client as well as a tracking url that a client can use to keep track of * status/job history if needed. However, in the distributedshell, trackingurl * and appMasterHost:appMasterRpcPort are not supported. * </p> * * <p> * The <code>ApplicationMaster</code> needs to send a heartbeat to the * <code>ResourceManager</code> at regular intervals to inform the * <code>ResourceManager</code> that it is up and alive. The * {@link ApplicationMasterProtocol#allocate} to the <code>ResourceManager</code> from the * <code>ApplicationMaster</code> acts as a heartbeat. * * <p> * For the actual handling of the job, the <code>ApplicationMaster</code> has to * request the <code>ResourceManager</code> via {@link AllocateRequest} for the * required no. of containers using {@link ResourceRequest} with the necessary * resource specifications such as node location, computational * (memory/disk/cpu) resource requirements. The <code>ResourceManager</code> * responds with an {@link AllocateResponse} that informs the * <code>ApplicationMaster</code> of the set of newly allocated containers, * completed containers as well as current state of available resources. * </p> * * <p> * For each allocated container, the <code>ApplicationMaster</code> can then set * up the necessary launch context via {@link ContainerLaunchContext} to specify * the allocated container id, local resources required by the executable, the * environment to be setup for the executable, commands to execute, etc. and * submit a {@link StartContainerRequest} to the {@link ContainerManagementProtocol} to * launch and execute the defined commands on the given allocated container. * </p> * * <p> * The <code>ApplicationMaster</code> can monitor the launched container by * either querying the <code>ResourceManager</code> using * {@link ApplicationMasterProtocol#allocate} to get updates on completed containers or via * the {@link ContainerManagementProtocol} by querying for the status of the allocated * container's {@link ContainerId}. * * <p> * After the job has been completed, the <code>ApplicationMaster</code> has to * send a {@link FinishApplicationMasterRequest} to the * <code>ResourceManager</code> to inform it that the * <code>ApplicationMaster</code> has been completed. */ @InterfaceAudience.Public @InterfaceStability.Unstable public class ApplicationMaster { private static final Log LOG = LogFactory.getLog(ApplicationMaster.class); @VisibleForTesting @Private public static enum DSEvent { DS_APP_ATTEMPT_START, DS_APP_ATTEMPT_END, DS_CONTAINER_START, DS_CONTAINER_END } @VisibleForTesting @Private public static enum DSEntity { DS_APP_ATTEMPT, DS_CONTAINER } private static final String YARN_SHELL_ID = "YARN_SHELL_ID"; // Configuration private Configuration conf; // Handle to communicate with the Resource Manager @SuppressWarnings("rawtypes") private AMRMClientAsync amRMClient; // In both secure and non-secure modes, this points to the job-submitter. @VisibleForTesting UserGroupInformation appSubmitterUgi; // Handle to communicate with the Node Manager private NMClientAsync nmClientAsync; // Listen to process the response from the Node Manager private NMCallbackHandler containerListener; // Application Attempt Id ( combination of attemptId and fail count ) @VisibleForTesting protected ApplicationAttemptId appAttemptID; // TODO // For status update for clients - yet to be implemented // Hostname of the container private String appMasterHostname = ""; // Port on which the app master listens for status updates from clients private int appMasterRpcPort = -1; // Tracking url to which app master publishes info for clients to monitor private String appMasterTrackingUrl = ""; // App Master configuration // No. of containers to run shell command on @VisibleForTesting protected int numTotalContainers = 1; // Memory to request for the container on which the shell command will run private int containerMemory = 10; // VirtualCores to request for the container on which the shell command will run private int containerVirtualCores = 1; // Priority of the request private int requestPriority; // Counter for completed containers ( complete denotes successful or failed ) private AtomicInteger numCompletedContainers = new AtomicInteger(); // Allocated container count so that we know how many containers has the RM // allocated to us @VisibleForTesting protected AtomicInteger numAllocatedContainers = new AtomicInteger(); // Count of failed containers private AtomicInteger numFailedContainers = new AtomicInteger(); // Count of containers already requested from the RM // Needed as once requested, we should not request for containers again. // Only request for more if the original requirement changes. @VisibleForTesting protected AtomicInteger numRequestedContainers = new AtomicInteger(); // Shell command to be executed private String shellCommand = ""; // Args to be passed to the shell command private String shellArgs = ""; // Env variables to be setup for the shell command private Map<String, String> shellEnv = new HashMap<String, String>(); // Location of shell script ( obtained from info set in env ) // Shell script path in fs private String scriptPath = ""; // Timestamp needed for creating a local resource private long shellScriptPathTimestamp = 0; // File length needed for local resource private long shellScriptPathLen = 0; // Timeline domain ID private String domainId = null; // Hardcoded path to shell script in launch container's local env private static final String ExecShellStringPath = Client.SCRIPT_PATH + ".sh"; private static final String ExecBatScripStringtPath = Client.SCRIPT_PATH + ".bat"; // Hardcoded path to custom log_properties private static final String log4jPath = "log4j.properties"; private static final String shellCommandPath = "shellCommands"; private static final String shellArgsPath = "shellArgs"; private volatile boolean done; private ByteBuffer allTokens; // Launch threads private List<Thread> launchThreads = new ArrayList<Thread>(); // Timeline Client @VisibleForTesting TimelineClient timelineClient; private final String linux_bash_command = "bash"; private final String windows_command = "cmd /c"; private int yarnShellIdCounter = 1; @VisibleForTesting protected final Set<ContainerId> launchedContainers = Collections.newSetFromMap(new ConcurrentHashMap<ContainerId, Boolean>()); /** * @param args Command line args */ public static void main(String[] args) { boolean result = false; try { ApplicationMaster appMaster = new ApplicationMaster(); LOG.info("Initializing ApplicationMaster"); boolean doRun = appMaster.init(args); if (!doRun) { System.exit(0); } appMaster.run(); result = appMaster.finish(); } catch (Throwable t) { LOG.fatal("Error running ApplicationMaster", t); LogManager.shutdown(); ExitUtil.terminate(1, t); } if (result) { LOG.info("Application Master completed successfully. exiting"); System.exit(0); } else { LOG.info("Application Master failed. exiting"); System.exit(2); } } /** * Dump out contents of $CWD and the environment to stdout for debugging */ private void dumpOutDebugInfo() { LOG.info("Dump debug output"); Map<String, String> envs = System.getenv(); for (Map.Entry<String, String> env : envs.entrySet()) { LOG.info("System env: key=" + env.getKey() + ", val=" + env.getValue()); System.out.println("System env: key=" + env.getKey() + ", val=" + env.getValue()); } BufferedReader buf = null; try { String lines = Shell.WINDOWS ? Shell.execCommand("cmd", "/c", "dir") : Shell.execCommand("ls", "-al"); buf = new BufferedReader(new StringReader(lines)); String line = ""; while ((line = buf.readLine()) != null) { LOG.info("System CWD content: " + line); System.out.println("System CWD content: " + line); } } catch (IOException e) { e.printStackTrace(); } finally { IOUtils.cleanup(LOG, buf); } } public ApplicationMaster() { // Set up the configuration conf = new YarnConfiguration(); } /** * Parse command line options * * @param args Command line args * @return Whether init successful and run should be invoked * @throws ParseException * @throws IOException */ public boolean init(String[] args) throws ParseException, IOException { Options opts = new Options(); opts.addOption("app_attempt_id", true, "App Attempt ID. Not to be used unless for testing purposes"); opts.addOption("shell_env", true, "Environment for shell script. Specified as env_key=env_val pairs"); opts.addOption("container_memory", true, "Amount of memory in MB to be requested to run the shell command"); opts.addOption("container_vcores", true, "Amount of virtual cores to be requested to run the shell command"); opts.addOption("num_containers", true, "No. of containers on which the shell command needs to be executed"); opts.addOption("priority", true, "Application Priority. Default 0"); opts.addOption("debug", false, "Dump out debug information"); opts.addOption("help", false, "Print usage"); CommandLine cliParser = new GnuParser().parse(opts, args); if (args.length == 0) { printUsage(opts); throw new IllegalArgumentException( "No args specified for application master to initialize"); } //Check whether customer log4j.properties file exists if (fileExist(log4jPath)) { try { Log4jPropertyHelper.updateLog4jConfiguration(ApplicationMaster.class, log4jPath); } catch (Exception e) { LOG.warn("Can not set up custom log4j properties. " + e); } } if (cliParser.hasOption("help")) { printUsage(opts); return false; } if (cliParser.hasOption("debug")) { dumpOutDebugInfo(); } Map<String, String> envs = System.getenv(); if (!envs.containsKey(Environment.CONTAINER_ID.name())) { if (cliParser.hasOption("app_attempt_id")) { String appIdStr = cliParser.getOptionValue("app_attempt_id", ""); appAttemptID = ConverterUtils.toApplicationAttemptId(appIdStr); } else { throw new IllegalArgumentException( "Application Attempt Id not set in the environment"); } } else { ContainerId containerId = ConverterUtils.toContainerId(envs .get(Environment.CONTAINER_ID.name())); appAttemptID = containerId.getApplicationAttemptId(); } if (!envs.containsKey(ApplicationConstants.APP_SUBMIT_TIME_ENV)) { throw new RuntimeException(ApplicationConstants.APP_SUBMIT_TIME_ENV + " not set in the environment"); } if (!envs.containsKey(Environment.NM_HOST.name())) { throw new RuntimeException(Environment.NM_HOST.name() + " not set in the environment"); } if (!envs.containsKey(Environment.NM_HTTP_PORT.name())) { throw new RuntimeException(Environment.NM_HTTP_PORT + " not set in the environment"); } if (!envs.containsKey(Environment.NM_PORT.name())) { throw new RuntimeException(Environment.NM_PORT.name() + " not set in the environment"); } LOG.info("Application master for app" + ", appId=" + appAttemptID.getApplicationId().getId() + ", clustertimestamp=" + appAttemptID.getApplicationId().getClusterTimestamp() + ", attemptId=" + appAttemptID.getAttemptId()); if (!fileExist(shellCommandPath) && envs.get(DSConstants.DISTRIBUTEDSHELLSCRIPTLOCATION).isEmpty()) { throw new IllegalArgumentException( "No shell command or shell script specified to be executed by application master"); } if (fileExist(shellCommandPath)) { shellCommand = readContent(shellCommandPath); } if (fileExist(shellArgsPath)) { shellArgs = readContent(shellArgsPath); } if (cliParser.hasOption("shell_env")) { String shellEnvs[] = cliParser.getOptionValues("shell_env"); for (String env : shellEnvs) { env = env.trim(); int index = env.indexOf('='); if (index == -1) { shellEnv.put(env, ""); continue; } String key = env.substring(0, index); String val = ""; if (index < (env.length() - 1)) { val = env.substring(index + 1); } shellEnv.put(key, val); } } if (envs.containsKey(DSConstants.DISTRIBUTEDSHELLSCRIPTLOCATION)) { scriptPath = envs.get(DSConstants.DISTRIBUTEDSHELLSCRIPTLOCATION); if (envs.containsKey(DSConstants.DISTRIBUTEDSHELLSCRIPTTIMESTAMP)) { shellScriptPathTimestamp = Long.parseLong(envs .get(DSConstants.DISTRIBUTEDSHELLSCRIPTTIMESTAMP)); } if (envs.containsKey(DSConstants.DISTRIBUTEDSHELLSCRIPTLEN)) { shellScriptPathLen = Long.parseLong(envs .get(DSConstants.DISTRIBUTEDSHELLSCRIPTLEN)); } if (!scriptPath.isEmpty() && (shellScriptPathTimestamp <= 0 || shellScriptPathLen <= 0)) { LOG.error("Illegal values in env for shell script path" + ", path=" + scriptPath + ", len=" + shellScriptPathLen + ", timestamp=" + shellScriptPathTimestamp); throw new IllegalArgumentException( "Illegal values in env for shell script path"); } } if (envs.containsKey(DSConstants.DISTRIBUTEDSHELLTIMELINEDOMAIN)) { domainId = envs.get(DSConstants.DISTRIBUTEDSHELLTIMELINEDOMAIN); } containerMemory = Integer.parseInt(cliParser.getOptionValue( "container_memory", "10")); containerVirtualCores = Integer.parseInt(cliParser.getOptionValue( "container_vcores", "1")); numTotalContainers = Integer.parseInt(cliParser.getOptionValue( "num_containers", "1")); if (numTotalContainers == 0) { throw new IllegalArgumentException( "Cannot run distributed shell with no containers"); } requestPriority = Integer.parseInt(cliParser .getOptionValue("priority", "0")); return true; } /** * Helper function to print usage * * @param opts Parsed command line options */ private void printUsage(Options opts) { new HelpFormatter().printHelp("ApplicationMaster", opts); } /** * Main run function for the application master * * @throws YarnException * @throws IOException */ @SuppressWarnings({ "unchecked" }) public void run() throws YarnException, IOException, InterruptedException { LOG.info("Starting ApplicationMaster"); // Note: Credentials, Token, UserGroupInformation, DataOutputBuffer class // are marked as LimitedPrivate Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials(); DataOutputBuffer dob = new DataOutputBuffer(); credentials.writeTokenStorageToStream(dob); // Now remove the AM->RM token so that containers cannot access it. Iterator<Token<?>> iter = credentials.getAllTokens().iterator(); LOG.info("Executing with tokens:"); while (iter.hasNext()) { Token<?> token = iter.next(); LOG.info(token); if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) { iter.remove(); } } allTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength()); // Create appSubmitterUgi and add original tokens to it String appSubmitterUserName = System.getenv(ApplicationConstants.Environment.USER.name()); appSubmitterUgi = UserGroupInformation.createRemoteUser(appSubmitterUserName); appSubmitterUgi.addCredentials(credentials); AMRMClientAsync.CallbackHandler allocListener = new RMCallbackHandler(); amRMClient = AMRMClientAsync.createAMRMClientAsync(1000, allocListener); amRMClient.init(conf); amRMClient.start(); containerListener = createNMCallbackHandler(); nmClientAsync = new NMClientAsyncImpl(containerListener); nmClientAsync.init(conf); nmClientAsync.start(); startTimelineClient(conf); if(timelineClient != null) { publishApplicationAttemptEvent(timelineClient, appAttemptID.toString(), DSEvent.DS_APP_ATTEMPT_START, domainId, appSubmitterUgi); } // Setup local RPC Server to accept status requests directly from clients // TODO need to setup a protocol for client to be able to communicate to // the RPC server // TODO use the rpc port info to register with the RM for the client to // send requests to this app master // Register self with ResourceManager // This will start heartbeating to the RM appMasterHostname = NetUtils.getHostname(); RegisterApplicationMasterResponse response = amRMClient .registerApplicationMaster(appMasterHostname, appMasterRpcPort, appMasterTrackingUrl); // Dump out information about cluster capability as seen by the // resource manager int maxMem = response.getMaximumResourceCapability().getMemory(); LOG.info("Max mem capability of resources in this cluster " + maxMem); int maxVCores = response.getMaximumResourceCapability().getVirtualCores(); LOG.info("Max vcores capability of resources in this cluster " + maxVCores); // A resource ask cannot exceed the max. if (containerMemory > maxMem) { LOG.info("Container memory specified above max threshold of cluster." + " Using max value." + ", specified=" + containerMemory + ", max=" + maxMem); containerMemory = maxMem; } if (containerVirtualCores > maxVCores) { LOG.info("Container virtual cores specified above max threshold of cluster." + " Using max value." + ", specified=" + containerVirtualCores + ", max=" + maxVCores); containerVirtualCores = maxVCores; } List<Container> previousAMRunningContainers = response.getContainersFromPreviousAttempts(); LOG.info(appAttemptID + " received " + previousAMRunningContainers.size() + " previous attempts' running containers on AM registration."); for(Container container: previousAMRunningContainers) { launchedContainers.add(container.getId()); } numAllocatedContainers.addAndGet(previousAMRunningContainers.size()); int numTotalContainersToRequest = numTotalContainers - previousAMRunningContainers.size(); // Setup ask for containers from RM // Send request for containers to RM // Until we get our fully allocated quota, we keep on polling RM for // containers // Keep looping until all the containers are launched and shell script // executed on them ( regardless of success/failure). for (int i = 0; i < numTotalContainersToRequest; ++i) { ContainerRequest containerAsk = setupContainerAskForRM(); amRMClient.addContainerRequest(containerAsk); } numRequestedContainers.set(numTotalContainers); } @VisibleForTesting void startTimelineClient(final Configuration conf) throws YarnException, IOException, InterruptedException { try { appSubmitterUgi.doAs(new PrivilegedExceptionAction<Void>() { @Override public Void run() throws Exception { if (conf.getBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, YarnConfiguration.DEFAULT_TIMELINE_SERVICE_ENABLED)) { // Creating the Timeline Client timelineClient = TimelineClient.createTimelineClient(); timelineClient.init(conf); timelineClient.start(); } else { timelineClient = null; LOG.warn("Timeline service is not enabled"); } return null; } }); } catch (UndeclaredThrowableException e) { throw new YarnException(e.getCause()); } } @VisibleForTesting NMCallbackHandler createNMCallbackHandler() { return new NMCallbackHandler(this); } @VisibleForTesting protected boolean finish() { // wait for completion. while (!done && (numCompletedContainers.get() != numTotalContainers)) { try { Thread.sleep(200); } catch (InterruptedException ex) {} } if(timelineClient != null) { publishApplicationAttemptEvent(timelineClient, appAttemptID.toString(), DSEvent.DS_APP_ATTEMPT_END, domainId, appSubmitterUgi); } // Join all launched threads // needed for when we time out // and we need to release containers for (Thread launchThread : launchThreads) { try { launchThread.join(10000); } catch (InterruptedException e) { LOG.info("Exception thrown in thread join: " + e.getMessage()); e.printStackTrace(); } } // When the application completes, it should stop all running containers LOG.info("Application completed. Stopping running containers"); nmClientAsync.stop(); // When the application completes, it should send a finish application // signal to the RM LOG.info("Application completed. Signalling finish to RM"); FinalApplicationStatus appStatus; String appMessage = null; boolean success = true; if (numFailedContainers.get() == 0 && numCompletedContainers.get() == numTotalContainers) { appStatus = FinalApplicationStatus.SUCCEEDED; } else { appStatus = FinalApplicationStatus.FAILED; appMessage = "Diagnostics." + ", total=" + numTotalContainers + ", completed=" + numCompletedContainers.get() + ", allocated=" + numAllocatedContainers.get() + ", failed=" + numFailedContainers.get(); LOG.info(appMessage); success = false; } try { amRMClient.unregisterApplicationMaster(appStatus, appMessage, null); } catch (YarnException ex) { LOG.error("Failed to unregister application", ex); } catch (IOException e) { LOG.error("Failed to unregister application", e); } amRMClient.stop(); // Stop Timeline Client if(timelineClient != null) { timelineClient.stop(); } return success; } @VisibleForTesting class RMCallbackHandler implements AMRMClientAsync.CallbackHandler { @SuppressWarnings("unchecked") @Override public void onContainersCompleted(List<ContainerStatus> completedContainers) { LOG.info("Got response from RM for container ask, completedCnt=" + completedContainers.size()); for (ContainerStatus containerStatus : completedContainers) { LOG.info(appAttemptID + " got container status for containerID=" + containerStatus.getContainerId() + ", state=" + containerStatus.getState() + ", exitStatus=" + containerStatus.getExitStatus() + ", diagnostics=" + containerStatus.getDiagnostics()); // non complete containers should not be here assert (containerStatus.getState() == ContainerState.COMPLETE); // ignore containers we know nothing about - probably from a previous // attempt if (!launchedContainers.contains(containerStatus.getContainerId())) { LOG.info("Ignoring completed status of " + containerStatus.getContainerId() + "; unknown container(probably launched by previous attempt)"); continue; } // increment counters for completed/failed containers int exitStatus = containerStatus.getExitStatus(); if (0 != exitStatus) { // container failed if (ContainerExitStatus.ABORTED != exitStatus) { // shell script failed // counts as completed numCompletedContainers.incrementAndGet(); numFailedContainers.incrementAndGet(); } else { // container was killed by framework, possibly preempted // we should re-try as the container was lost for some reason numAllocatedContainers.decrementAndGet(); numRequestedContainers.decrementAndGet(); // we do not need to release the container as it would be done // by the RM } } else { // nothing to do // container completed successfully numCompletedContainers.incrementAndGet(); LOG.info("Container completed successfully." + ", containerId=" + containerStatus.getContainerId()); } if(timelineClient != null) { publishContainerEndEvent( timelineClient, containerStatus, domainId, appSubmitterUgi); } } // ask for more containers if any failed int askCount = numTotalContainers - numRequestedContainers.get(); numRequestedContainers.addAndGet(askCount); if (askCount > 0) { for (int i = 0; i < askCount; ++i) { ContainerRequest containerAsk = setupContainerAskForRM(); amRMClient.addContainerRequest(containerAsk); } } if (numCompletedContainers.get() == numTotalContainers) { done = true; } } @Override public void onContainersAllocated(List<Container> allocatedContainers) { LOG.info("Got response from RM for container ask, allocatedCnt=" + allocatedContainers.size()); numAllocatedContainers.addAndGet(allocatedContainers.size()); for (Container allocatedContainer : allocatedContainers) { String yarnShellId = Integer.toString(yarnShellIdCounter); yarnShellIdCounter++; LOG.info("Launching shell command on a new container." + ", containerId=" + allocatedContainer.getId() + ", yarnShellId=" + yarnShellId + ", containerNode=" + allocatedContainer.getNodeId().getHost() + ":" + allocatedContainer.getNodeId().getPort() + ", containerNodeURI=" + allocatedContainer.getNodeHttpAddress() + ", containerResourceMemory" + allocatedContainer.getResource().getMemory() + ", containerResourceVirtualCores" + allocatedContainer.getResource().getVirtualCores()); // + ", containerToken" // +allocatedContainer.getContainerToken().getIdentifier().toString()); Thread launchThread = createLaunchContainerThread(allocatedContainer, yarnShellId); // launch and start the container on a separate thread to keep // the main thread unblocked // as all containers may not be allocated at one go. launchThreads.add(launchThread); launchedContainers.add(allocatedContainer.getId()); launchThread.start(); } } @Override public void onShutdownRequest() { done = true; } @Override public void onNodesUpdated(List<NodeReport> updatedNodes) {} @Override public float getProgress() { // set progress to deliver to RM on next heartbeat float progress = (float) numCompletedContainers.get() / numTotalContainers; return progress; } @Override public void onError(Throwable e) { done = true; amRMClient.stop(); } } @VisibleForTesting static class NMCallbackHandler implements NMClientAsync.CallbackHandler { private ConcurrentMap<ContainerId, Container> containers = new ConcurrentHashMap<ContainerId, Container>(); private final ApplicationMaster applicationMaster; public NMCallbackHandler(ApplicationMaster applicationMaster) { this.applicationMaster = applicationMaster; } public void addContainer(ContainerId containerId, Container container) { containers.putIfAbsent(containerId, container); } @Override public void onContainerStopped(ContainerId containerId) { if (LOG.isDebugEnabled()) { LOG.debug("Succeeded to stop Container " + containerId); } containers.remove(containerId); } @Override public void onContainerStatusReceived(ContainerId containerId, ContainerStatus containerStatus) { if (LOG.isDebugEnabled()) { LOG.debug("Container Status: id=" + containerId + ", status=" + containerStatus); } } @Override public void onContainerStarted(ContainerId containerId, Map<String, ByteBuffer> allServiceResponse) { if (LOG.isDebugEnabled()) { LOG.debug("Succeeded to start Container " + containerId); } Container container = containers.get(containerId); if (container != null) { applicationMaster.nmClientAsync.getContainerStatusAsync(containerId, container.getNodeId()); } if(applicationMaster.timelineClient != null) { ApplicationMaster.publishContainerStartEvent( applicationMaster.timelineClient, container, applicationMaster.domainId, applicationMaster.appSubmitterUgi); } } @Override public void onStartContainerError(ContainerId containerId, Throwable t) { LOG.error("Failed to start Container " + containerId); containers.remove(containerId); applicationMaster.numCompletedContainers.incrementAndGet(); applicationMaster.numFailedContainers.incrementAndGet(); } @Override public void onGetContainerStatusError( ContainerId containerId, Throwable t) { LOG.error("Failed to query the status of Container " + containerId); } @Override public void onStopContainerError(ContainerId containerId, Throwable t) { LOG.error("Failed to stop Container " + containerId); containers.remove(containerId); } } /** * Thread to connect to the {@link ContainerManagementProtocol} and launch the container * that will execute the shell command. */ private class LaunchContainerRunnable implements Runnable { // Allocated container private Container container; private String shellId; NMCallbackHandler containerListener; /** * @param lcontainer Allocated container * @param containerListener Callback handler of the container */ public LaunchContainerRunnable(Container lcontainer, NMCallbackHandler containerListener, String shellId) { this.container = lcontainer; this.containerListener = containerListener; this.shellId = shellId; } @Override /** * Connects to CM, sets up container launch context * for shell command and eventually dispatches the container * start request to the CM. */ public void run() { LOG.info("Setting up container launch container for containerid=" + container.getId() + " with shellid=" + shellId); // Set the local resources Map<String, LocalResource> localResources = new HashMap<String, LocalResource>(); // The container for the eventual shell commands needs its own local // resources too. // In this scenario, if a shell script is specified, we need to have it // copied and made available to the container. if (!scriptPath.isEmpty()) { Path renamedScriptPath = null; if (Shell.WINDOWS) { renamedScriptPath = new Path(scriptPath + ".bat"); } else { renamedScriptPath = new Path(scriptPath + ".sh"); } try { // rename the script file based on the underlying OS syntax. renameScriptFile(renamedScriptPath); } catch (Exception e) { LOG.error( "Not able to add suffix (.bat/.sh) to the shell script filename", e); // We know we cannot continue launching the container // so we should release it. numCompletedContainers.incrementAndGet(); numFailedContainers.incrementAndGet(); return; } URL yarnUrl = null; try { yarnUrl = ConverterUtils.getYarnUrlFromURI( new URI(renamedScriptPath.toString())); } catch (URISyntaxException e) { LOG.error("Error when trying to use shell script path specified" + " in env, path=" + renamedScriptPath, e); // A failure scenario on bad input such as invalid shell script path // We know we cannot continue launching the container // so we should release it. // TODO numCompletedContainers.incrementAndGet(); numFailedContainers.incrementAndGet(); return; } LocalResource shellRsrc = LocalResource.newInstance(yarnUrl, LocalResourceType.FILE, LocalResourceVisibility.APPLICATION, shellScriptPathLen, shellScriptPathTimestamp); localResources.put(Shell.WINDOWS ? ExecBatScripStringtPath : ExecShellStringPath, shellRsrc); shellCommand = Shell.WINDOWS ? windows_command : linux_bash_command; } // Set the necessary command to execute on the allocated container Vector<CharSequence> vargs = new Vector<CharSequence>(5); // Set executable command vargs.add(shellCommand); // Set shell script path if (!scriptPath.isEmpty()) { vargs.add(Shell.WINDOWS ? ExecBatScripStringtPath : ExecShellStringPath); } // Set args for the shell command if any vargs.add(shellArgs); // Add log redirect params vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout"); vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr"); // Get final commmand StringBuilder command = new StringBuilder(); for (CharSequence str : vargs) { command.append(str).append(" "); } List<String> commands = new ArrayList<String>(); commands.add(command.toString()); // Set up ContainerLaunchContext, setting local resource, environment, // command and token for constructor. // Note for tokens: Set up tokens for the container too. Today, for normal // shell commands, the container in distribute-shell doesn't need any // tokens. We are populating them mainly for NodeManagers to be able to // download anyfiles in the distributed file-system. The tokens are // otherwise also useful in cases, for e.g., when one is running a // "hadoop dfs" command inside the distributed shell. Map<String, String> myShellEnv = new HashMap<String, String>(shellEnv); myShellEnv.put(YARN_SHELL_ID, shellId); ContainerLaunchContext ctx = ContainerLaunchContext.newInstance( localResources, myShellEnv, commands, null, allTokens.duplicate(), null); containerListener.addContainer(container.getId(), container); nmClientAsync.startContainerAsync(container, ctx); } } private void renameScriptFile(final Path renamedScriptPath) throws IOException, InterruptedException { appSubmitterUgi.doAs(new PrivilegedExceptionAction<Void>() { @Override public Void run() throws IOException { FileSystem fs = renamedScriptPath.getFileSystem(conf); fs.rename(new Path(scriptPath), renamedScriptPath); return null; } }); LOG.info("User " + appSubmitterUgi.getUserName() + " added suffix(.sh/.bat) to script file as " + renamedScriptPath); } /** * Setup the request that will be sent to the RM for the container ask. * * @return the setup ResourceRequest to be sent to RM */ private ContainerRequest setupContainerAskForRM() { // setup requirements for hosts // using * as any host will do for the distributed shell app // set the priority for the request // TODO - what is the range for priority? how to decide? Priority pri = Priority.newInstance(requestPriority); // Set up resource type requirements // For now, memory and CPU are supported so we set memory and cpu requirements Resource capability = Resource.newInstance(containerMemory, containerVirtualCores); ContainerRequest request = new ContainerRequest(capability, null, null, pri); LOG.info("Requested container ask: " + request.toString()); return request; } private boolean fileExist(String filePath) { return new File(filePath).exists(); } private String readContent(String filePath) throws IOException { DataInputStream ds = null; try { ds = new DataInputStream(new FileInputStream(filePath)); return ds.readUTF(); } finally { org.apache.commons.io.IOUtils.closeQuietly(ds); } } private static void publishContainerStartEvent( final TimelineClient timelineClient, Container container, String domainId, UserGroupInformation ugi) { final TimelineEntity entity = new TimelineEntity(); entity.setEntityId(container.getId().toString()); entity.setEntityType(DSEntity.DS_CONTAINER.toString()); entity.setDomainId(domainId); entity.addPrimaryFilter("user", ugi.getShortUserName()); TimelineEvent event = new TimelineEvent(); event.setTimestamp(System.currentTimeMillis()); event.setEventType(DSEvent.DS_CONTAINER_START.toString()); event.addEventInfo("Node", container.getNodeId().toString()); event.addEventInfo("Resources", container.getResource().toString()); entity.addEvent(event); try { ugi.doAs(new PrivilegedExceptionAction<TimelinePutResponse>() { @Override public TimelinePutResponse run() throws Exception { return timelineClient.putEntities(entity); } }); } catch (Exception e) { LOG.error("Container start event could not be published for " + container.getId().toString(), e instanceof UndeclaredThrowableException ? e.getCause() : e); } } private static void publishContainerEndEvent( final TimelineClient timelineClient, ContainerStatus container, String domainId, UserGroupInformation ugi) { final TimelineEntity entity = new TimelineEntity(); entity.setEntityId(container.getContainerId().toString()); entity.setEntityType(DSEntity.DS_CONTAINER.toString()); entity.setDomainId(domainId); entity.addPrimaryFilter("user", ugi.getShortUserName()); TimelineEvent event = new TimelineEvent(); event.setTimestamp(System.currentTimeMillis()); event.setEventType(DSEvent.DS_CONTAINER_END.toString()); event.addEventInfo("State", container.getState().name()); event.addEventInfo("Exit Status", container.getExitStatus()); entity.addEvent(event); try { timelineClient.putEntities(entity); } catch (YarnException | IOException e) { LOG.error("Container end event could not be published for " + container.getContainerId().toString(), e); } } private static void publishApplicationAttemptEvent( final TimelineClient timelineClient, String appAttemptId, DSEvent appEvent, String domainId, UserGroupInformation ugi) { final TimelineEntity entity = new TimelineEntity(); entity.setEntityId(appAttemptId); entity.setEntityType(DSEntity.DS_APP_ATTEMPT.toString()); entity.setDomainId(domainId); entity.addPrimaryFilter("user", ugi.getShortUserName()); TimelineEvent event = new TimelineEvent(); event.setEventType(appEvent.toString()); event.setTimestamp(System.currentTimeMillis()); entity.addEvent(event); try { timelineClient.putEntities(entity); } catch (YarnException | IOException e) { LOG.error("App Attempt " + (appEvent.equals(DSEvent.DS_APP_ATTEMPT_START) ? "start" : "end") + " event could not be published for " + appAttemptId.toString(), e); } } RMCallbackHandler getRMCallbackHandler() { return new RMCallbackHandler(); } @VisibleForTesting void setAmRMClient(AMRMClientAsync client) { this.amRMClient = client; } @VisibleForTesting int getNumCompletedContainers() { return numCompletedContainers.get(); } @VisibleForTesting boolean getDone() { return done; } @VisibleForTesting Thread createLaunchContainerThread(Container allocatedContainer, String shellId) { LaunchContainerRunnable runnableLaunchContainer = new LaunchContainerRunnable(allocatedContainer, containerListener, shellId); return new Thread(runnableLaunchContainer); } }
46,155
37.051113
100
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/DSConstants.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.applications.distributedshell; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * Constants used in both Client and Application Master */ @InterfaceAudience.Public @InterfaceStability.Unstable public class DSConstants { /** * Environment key name pointing to the shell script's location */ public static final String DISTRIBUTEDSHELLSCRIPTLOCATION = "DISTRIBUTEDSHELLSCRIPTLOCATION"; /** * Environment key name denoting the file timestamp for the shell script. * Used to validate the local resource. */ public static final String DISTRIBUTEDSHELLSCRIPTTIMESTAMP = "DISTRIBUTEDSHELLSCRIPTTIMESTAMP"; /** * Environment key name denoting the file content length for the shell script. * Used to validate the local resource. */ public static final String DISTRIBUTEDSHELLSCRIPTLEN = "DISTRIBUTEDSHELLSCRIPTLEN"; /** * Environment key name denoting the timeline domain ID. */ public static final String DISTRIBUTEDSHELLTIMELINEDOMAIN = "DISTRIBUTEDSHELLTIMELINEDOMAIN"; }
1,937
35.566038
97
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/src/test/java/org/apache/hadoop/yarn/applications/unmanagedamlauncher/TestUnmanagedAMLauncher.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.applications.unmanagedamlauncher; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import java.io.ByteArrayOutputStream; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; import java.io.OutputStream; import java.net.URL; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest; import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse; import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState; import org.apache.hadoop.yarn.client.ClientRMProxy; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.server.MiniYARNCluster; import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Test; public class TestUnmanagedAMLauncher { private static final Log LOG = LogFactory .getLog(TestUnmanagedAMLauncher.class); protected static MiniYARNCluster yarnCluster = null; protected static Configuration conf = new YarnConfiguration(); @BeforeClass public static void setup() throws InterruptedException, IOException { LOG.info("Starting up YARN cluster"); conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 128); if (yarnCluster == null) { yarnCluster = new MiniYARNCluster( TestUnmanagedAMLauncher.class.getSimpleName(), 1, 1, 1); yarnCluster.init(conf); yarnCluster.start(); //get the address Configuration yarnClusterConfig = yarnCluster.getConfig(); LOG.info("MiniYARN ResourceManager published address: " + yarnClusterConfig.get(YarnConfiguration.RM_ADDRESS)); LOG.info("MiniYARN ResourceManager published web address: " + yarnClusterConfig.get(YarnConfiguration.RM_WEBAPP_ADDRESS)); String webapp = yarnClusterConfig.get(YarnConfiguration.RM_WEBAPP_ADDRESS); assertTrue("Web app address still unbound to a host at " + webapp, !webapp.startsWith("0.0.0.0")); LOG.info("Yarn webapp is at "+ webapp); URL url = Thread.currentThread().getContextClassLoader() .getResource("yarn-site.xml"); if (url == null) { throw new RuntimeException( "Could not find 'yarn-site.xml' dummy file in classpath"); } //write the document to a buffer (not directly to the file, as that //can cause the file being written to get read -which will then fail. ByteArrayOutputStream bytesOut = new ByteArrayOutputStream(); yarnClusterConfig.writeXml(bytesOut); bytesOut.close(); //write the bytes to the file in the classpath OutputStream os = new FileOutputStream(new File(url.getPath())); os.write(bytesOut.toByteArray()); os.close(); } try { Thread.sleep(2000); } catch (InterruptedException e) { LOG.info("setup thread sleep interrupted. message=" + e.getMessage()); } } @AfterClass public static void tearDown() throws IOException { if (yarnCluster != null) { try { yarnCluster.stop(); } finally { yarnCluster = null; } } } private static String getTestRuntimeClasspath() { LOG.info("Trying to generate classpath for app master from current thread's classpath"); String envClassPath = ""; String cp = System.getProperty("java.class.path"); if (cp != null) { envClassPath += cp.trim() + File.pathSeparator; } // yarn-site.xml at this location contains proper config for mini cluster ClassLoader thisClassLoader = Thread.currentThread() .getContextClassLoader(); URL url = thisClassLoader.getResource("yarn-site.xml"); envClassPath += new File(url.getFile()).getParent(); return envClassPath; } @Test(timeout=30000) public void testUMALauncher() throws Exception { String classpath = getTestRuntimeClasspath(); String javaHome = System.getenv("JAVA_HOME"); if (javaHome == null) { LOG.fatal("JAVA_HOME not defined. Test not running."); return; } String[] args = { "--classpath", classpath, "--queue", "default", "--cmd", javaHome + "/bin/java -Xmx512m " + TestUnmanagedAMLauncher.class.getCanonicalName() + " success" }; LOG.info("Initializing Launcher"); UnmanagedAMLauncher launcher = new UnmanagedAMLauncher(new Configuration(yarnCluster.getConfig())) { public void launchAM(ApplicationAttemptId attemptId) throws IOException, YarnException { YarnApplicationAttemptState attemptState = rmClient.getApplicationAttemptReport(attemptId) .getYarnApplicationAttemptState(); Assert.assertTrue(attemptState .equals(YarnApplicationAttemptState.LAUNCHED)); super.launchAM(attemptId); } }; boolean initSuccess = launcher.init(args); Assert.assertTrue(initSuccess); LOG.info("Running Launcher"); boolean result = launcher.run(); LOG.info("Launcher run completed. Result=" + result); Assert.assertTrue(result); } @Test(timeout=30000) public void testUMALauncherError() throws Exception { String classpath = getTestRuntimeClasspath(); String javaHome = System.getenv("JAVA_HOME"); if (javaHome == null) { LOG.fatal("JAVA_HOME not defined. Test not running."); return; } String[] args = { "--classpath", classpath, "--queue", "default", "--cmd", javaHome + "/bin/java -Xmx512m " + TestUnmanagedAMLauncher.class.getCanonicalName() + " failure" }; LOG.info("Initializing Launcher"); UnmanagedAMLauncher launcher = new UnmanagedAMLauncher(new Configuration( yarnCluster.getConfig())); boolean initSuccess = launcher.init(args); Assert.assertTrue(initSuccess); LOG.info("Running Launcher"); try { launcher.run(); fail("Expected an exception to occur as launch should have failed"); } catch (RuntimeException e) { // Expected } } // provide main method so this class can act as AM public static void main(String[] args) throws Exception { if (args[0].equals("success")) { ApplicationMasterProtocol client = ClientRMProxy.createRMProxy(conf, ApplicationMasterProtocol.class); client.registerApplicationMaster(RegisterApplicationMasterRequest .newInstance(NetUtils.getHostname(), -1, "")); Thread.sleep(1000); FinishApplicationMasterResponse resp = client.finishApplicationMaster(FinishApplicationMasterRequest .newInstance(FinalApplicationStatus.SUCCEEDED, "success", null)); assertTrue(resp.getIsUnregistered()); System.exit(0); } else { System.exit(1); } } }
8,225
36.390909
92
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/src/main/java/org/apache/hadoop/yarn/applications/unmanagedamlauncher/UnmanagedAMLauncher.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.applications.unmanagedamlauncher; import java.io.BufferedReader; import java.io.DataOutputStream; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStreamReader; import java.net.InetAddress; import java.nio.charset.Charset; import java.util.ArrayList; import java.util.EnumSet; import java.util.Map; import java.util.Set; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.GnuParser; import org.apache.commons.cli.HelpFormatter; import org.apache.commons.cli.Options; import org.apache.commons.cli.ParseException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.yarn.api.ApplicationConstants; import org.apache.hadoop.yarn.api.ApplicationConstants.Environment; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationReport; import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState; import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.client.api.YarnClient; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.security.AMRMTokenIdentifier; import org.apache.hadoop.yarn.util.Records; /** * The UnmanagedLauncher is a simple client that launches and unmanaged AM. An * unmanagedAM is an AM that is not launched and managed by the RM. The client * creates a new application on the RM and negotiates a new attempt id. Then it * waits for the RM app state to reach be YarnApplicationState.ACCEPTED after * which it spawns the AM in another process and passes it the container id via * env variable Environment.CONTAINER_ID. The AM can be in any * language. The AM can register with the RM using the attempt id obtained * from the container id and proceed as normal. * The client redirects app stdout and stderr to its own stdout and * stderr and waits for the AM process to exit. Then it waits for the RM to * report app completion. */ public class UnmanagedAMLauncher { private static final Log LOG = LogFactory.getLog(UnmanagedAMLauncher.class); private Configuration conf; // Handle to talk to the Resource Manager/Applications Manager protected YarnClient rmClient; // Application master specific info to register a new Application with RM/ASM private String appName = ""; // App master priority private int amPriority = 0; // Queue for App master private String amQueue = ""; // cmd to start AM private String amCmd = null; // set the classpath explicitly private String classpath = null; private volatile boolean amCompleted = false; private static final long AM_STATE_WAIT_TIMEOUT_MS = 10000; /** * @param args * Command line arguments */ public static void main(String[] args) { try { UnmanagedAMLauncher client = new UnmanagedAMLauncher(); LOG.info("Initializing Client"); boolean doRun = client.init(args); if (!doRun) { System.exit(0); } client.run(); } catch (Throwable t) { LOG.fatal("Error running Client", t); System.exit(1); } } /** */ public UnmanagedAMLauncher(Configuration conf) throws Exception { // Set up RPC this.conf = conf; } public UnmanagedAMLauncher() throws Exception { this(new Configuration()); } private void printUsage(Options opts) { new HelpFormatter().printHelp("Client", opts); } public boolean init(String[] args) throws ParseException { Options opts = new Options(); opts.addOption("appname", true, "Application Name. Default value - UnmanagedAM"); opts.addOption("priority", true, "Application Priority. Default 0"); opts.addOption("queue", true, "RM Queue in which this application is to be submitted"); opts.addOption("master_memory", true, "Amount of memory in MB to be requested to run the application master"); opts.addOption("cmd", true, "command to start unmanaged AM (required)"); opts.addOption("classpath", true, "additional classpath"); opts.addOption("help", false, "Print usage"); CommandLine cliParser = new GnuParser().parse(opts, args); if (args.length == 0) { printUsage(opts); throw new IllegalArgumentException( "No args specified for client to initialize"); } if (cliParser.hasOption("help")) { printUsage(opts); return false; } appName = cliParser.getOptionValue("appname", "UnmanagedAM"); amPriority = Integer.parseInt(cliParser.getOptionValue("priority", "0")); amQueue = cliParser.getOptionValue("queue", "default"); classpath = cliParser.getOptionValue("classpath", null); amCmd = cliParser.getOptionValue("cmd"); if (amCmd == null) { printUsage(opts); throw new IllegalArgumentException( "No cmd specified for application master"); } YarnConfiguration yarnConf = new YarnConfiguration(conf); rmClient = YarnClient.createYarnClient(); rmClient.init(yarnConf); return true; } public void launchAM(ApplicationAttemptId attemptId) throws IOException, YarnException { Credentials credentials = new Credentials(); Token<AMRMTokenIdentifier> token = rmClient.getAMRMToken(attemptId.getApplicationId()); // Service will be empty but that's okay, we are just passing down only // AMRMToken down to the real AM which eventually sets the correct // service-address. credentials.addToken(token.getService(), token); File tokenFile = File.createTempFile("unmanagedAMRMToken","", new File(System.getProperty("user.dir"))); try { FileUtil.chmod(tokenFile.getAbsolutePath(), "600"); } catch (InterruptedException ex) { throw new RuntimeException(ex); } tokenFile.deleteOnExit(); DataOutputStream os = new DataOutputStream(new FileOutputStream(tokenFile, true)); credentials.writeTokenStorageToStream(os); os.close(); Map<String, String> env = System.getenv(); ArrayList<String> envAMList = new ArrayList<String>(); boolean setClasspath = false; for (Map.Entry<String, String> entry : env.entrySet()) { String key = entry.getKey(); String value = entry.getValue(); if(key.equals("CLASSPATH")) { setClasspath = true; if(classpath != null) { value = value + File.pathSeparator + classpath; } } envAMList.add(key + "=" + value); } if(!setClasspath && classpath!=null) { envAMList.add("CLASSPATH="+classpath); } ContainerId containerId = ContainerId.newContainerId(attemptId, 0); String hostname = InetAddress.getLocalHost().getHostName(); envAMList.add(Environment.CONTAINER_ID.name() + "=" + containerId); envAMList.add(Environment.NM_HOST.name() + "=" + hostname); envAMList.add(Environment.NM_HTTP_PORT.name() + "=0"); envAMList.add(Environment.NM_PORT.name() + "=0"); envAMList.add(Environment.LOCAL_DIRS.name() + "= /tmp"); envAMList.add(ApplicationConstants.APP_SUBMIT_TIME_ENV + "=" + System.currentTimeMillis()); envAMList.add(ApplicationConstants.CONTAINER_TOKEN_FILE_ENV_NAME + "=" + tokenFile.getAbsolutePath()); String[] envAM = new String[envAMList.size()]; Process amProc = Runtime.getRuntime().exec(amCmd, envAMList.toArray(envAM)); final BufferedReader errReader = new BufferedReader(new InputStreamReader( amProc.getErrorStream(), Charset.forName("UTF-8"))); final BufferedReader inReader = new BufferedReader(new InputStreamReader( amProc.getInputStream(), Charset.forName("UTF-8"))); // read error and input streams as this would free up the buffers // free the error stream buffer Thread errThread = new Thread() { @Override public void run() { try { String line = errReader.readLine(); while((line != null) && !isInterrupted()) { System.err.println(line); line = errReader.readLine(); } } catch(IOException ioe) { LOG.warn("Error reading the error stream", ioe); } } }; Thread outThread = new Thread() { @Override public void run() { try { String line = inReader.readLine(); while((line != null) && !isInterrupted()) { System.out.println(line); line = inReader.readLine(); } } catch(IOException ioe) { LOG.warn("Error reading the out stream", ioe); } } }; try { errThread.start(); outThread.start(); } catch (IllegalStateException ise) { } // wait for the process to finish and check the exit code try { int exitCode = amProc.waitFor(); LOG.info("AM process exited with value: " + exitCode); } catch (InterruptedException e) { e.printStackTrace(); } finally { amCompleted = true; } try { // make sure that the error thread exits // on Windows these threads sometimes get stuck and hang the execution // timeout and join later after destroying the process. errThread.join(); outThread.join(); errReader.close(); inReader.close(); } catch (InterruptedException ie) { LOG.info("ShellExecutor: Interrupted while reading the error/out stream", ie); } catch (IOException ioe) { LOG.warn("Error while closing the error/out stream", ioe); } amProc.destroy(); } public boolean run() throws IOException, YarnException { LOG.info("Starting Client"); // Connect to ResourceManager rmClient.start(); try { // Create launch context for app master LOG.info("Setting up application submission context for ASM"); ApplicationSubmissionContext appContext = rmClient.createApplication() .getApplicationSubmissionContext(); ApplicationId appId = appContext.getApplicationId(); // set the application name appContext.setApplicationName(appName); // Set the priority for the application master Priority pri = Records.newRecord(Priority.class); pri.setPriority(amPriority); appContext.setPriority(pri); // Set the queue to which this application is to be submitted in the RM appContext.setQueue(amQueue); // Set up the container launch context for the application master ContainerLaunchContext amContainer = Records .newRecord(ContainerLaunchContext.class); appContext.setAMContainerSpec(amContainer); // unmanaged AM appContext.setUnmanagedAM(true); LOG.info("Setting unmanaged AM"); // Submit the application to the applications manager LOG.info("Submitting application to ASM"); rmClient.submitApplication(appContext); ApplicationReport appReport = monitorApplication(appId, EnumSet.of(YarnApplicationState.ACCEPTED, YarnApplicationState.KILLED, YarnApplicationState.FAILED, YarnApplicationState.FINISHED)); if (appReport.getYarnApplicationState() == YarnApplicationState.ACCEPTED) { // Monitor the application attempt to wait for launch state ApplicationAttemptReport attemptReport = monitorCurrentAppAttempt(appId, YarnApplicationAttemptState.LAUNCHED); ApplicationAttemptId attemptId = attemptReport.getApplicationAttemptId(); LOG.info("Launching AM with application attempt id " + attemptId); // launch AM launchAM(attemptId); // Monitor the application for end state appReport = monitorApplication(appId, EnumSet.of(YarnApplicationState.KILLED, YarnApplicationState.FAILED, YarnApplicationState.FINISHED)); } YarnApplicationState appState = appReport.getYarnApplicationState(); FinalApplicationStatus appStatus = appReport.getFinalApplicationStatus(); LOG.info("App ended with state: " + appReport.getYarnApplicationState() + " and status: " + appStatus); boolean success; if (YarnApplicationState.FINISHED == appState && FinalApplicationStatus.SUCCEEDED == appStatus) { LOG.info("Application has completed successfully."); success = true; } else { LOG.info("Application did finished unsuccessfully." + " YarnState=" + appState.toString() + ", FinalStatus=" + appStatus.toString()); success = false; } return success; } finally { rmClient.stop(); } } private ApplicationAttemptReport monitorCurrentAppAttempt( ApplicationId appId, YarnApplicationAttemptState attemptState) throws YarnException, IOException { long startTime = System.currentTimeMillis(); ApplicationAttemptId attemptId = null; while (true) { if (attemptId == null) { attemptId = rmClient.getApplicationReport(appId) .getCurrentApplicationAttemptId(); } ApplicationAttemptReport attemptReport = null; if (attemptId != null) { attemptReport = rmClient.getApplicationAttemptReport(attemptId); if (attemptState.equals(attemptReport.getYarnApplicationAttemptState())) { return attemptReport; } } LOG.info("Current attempt state of " + appId + " is " + (attemptReport == null ? " N/A " : attemptReport.getYarnApplicationAttemptState()) + ", waiting for current attempt to reach " + attemptState); try { Thread.sleep(1000); } catch (InterruptedException e) { LOG.warn("Interrupted while waiting for current attempt of " + appId + " to reach " + attemptState); } if (System.currentTimeMillis() - startTime > AM_STATE_WAIT_TIMEOUT_MS) { String errmsg = "Timeout for waiting current attempt of " + appId + " to reach " + attemptState; LOG.error(errmsg); throw new RuntimeException(errmsg); } } } /** * Monitor the submitted application for completion. Kill application if time * expires. * * @param appId * Application Id of application to be monitored * @return true if application completed successfully * @throws YarnException * @throws IOException */ private ApplicationReport monitorApplication(ApplicationId appId, Set<YarnApplicationState> finalState) throws YarnException, IOException { long foundAMCompletedTime = 0; StringBuilder expectedFinalState = new StringBuilder(); boolean first = true; for (YarnApplicationState state : finalState) { if (first) { first = false; expectedFinalState.append(state.name()); } else { expectedFinalState.append("," + state.name()); } } while (true) { // Check app status every 1 second. try { Thread.sleep(1000); } catch (InterruptedException e) { LOG.debug("Thread sleep in monitoring loop interrupted"); } // Get application report for the appId we are interested in ApplicationReport report = rmClient.getApplicationReport(appId); LOG.info("Got application report from ASM for" + ", appId=" + appId.getId() + ", appAttemptId=" + report.getCurrentApplicationAttemptId() + ", clientToAMToken=" + report.getClientToAMToken() + ", appDiagnostics=" + report.getDiagnostics() + ", appMasterHost=" + report.getHost() + ", appQueue=" + report.getQueue() + ", appMasterRpcPort=" + report.getRpcPort() + ", appStartTime=" + report.getStartTime() + ", yarnAppState=" + report.getYarnApplicationState().toString() + ", distributedFinalState=" + report.getFinalApplicationStatus().toString() + ", appTrackingUrl=" + report.getTrackingUrl() + ", appUser=" + report.getUser()); YarnApplicationState state = report.getYarnApplicationState(); if (finalState.contains(state)) { return report; } // wait for 10 seconds after process has completed for app report to // come back if (amCompleted) { if (foundAMCompletedTime == 0) { foundAMCompletedTime = System.currentTimeMillis(); } else if ((System.currentTimeMillis() - foundAMCompletedTime) > AM_STATE_WAIT_TIMEOUT_MS) { LOG.warn("Waited " + AM_STATE_WAIT_TIMEOUT_MS/1000 + " seconds after process completed for AppReport" + " to reach desired final state. Not waiting anymore." + "CurrentState = " + state + ", ExpectedStates = " + expectedFinalState.toString()); throw new RuntimeException("Failed to receive final expected state" + " in ApplicationReport" + ", CurrentState=" + state + ", ExpectedStates=" + expectedFinalState.toString()); } } } } }
18,622
36.470825
84
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestApplicationMasterServiceProtocolOnHA.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.client; import java.io.IOException; import java.util.ArrayList; import org.junit.Assert; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest; import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse; import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest; import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest; import org.apache.hadoop.yarn.api.records.ResourceRequest; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.security.AMRMTokenIdentifier; import org.junit.After; import org.junit.Before; import org.junit.Test; public class TestApplicationMasterServiceProtocolOnHA extends ProtocolHATestBase { private ApplicationMasterProtocol amClient; private ApplicationAttemptId attemptId ; @Before public void initialize() throws Exception { startHACluster(0, false, false, true); attemptId = this.cluster.createFakeApplicationAttemptId(); Token<AMRMTokenIdentifier> appToken = this.cluster.getResourceManager().getRMContext() .getAMRMTokenSecretManager().createAndGetAMRMToken(attemptId); appToken.setService(ClientRMProxy.getAMRMTokenService(this.conf)); UserGroupInformation.setLoginUser(UserGroupInformation .createRemoteUser(UserGroupInformation.getCurrentUser().getUserName())); UserGroupInformation.getCurrentUser().addToken(appToken); syncToken(appToken); amClient = ClientRMProxy .createRMProxy(this.conf, ApplicationMasterProtocol.class); } @After public void shutDown() { if(this.amClient != null) { RPC.stopProxy(this.amClient); } } @Test(timeout = 15000) public void testRegisterApplicationMasterOnHA() throws YarnException, IOException { RegisterApplicationMasterRequest request = RegisterApplicationMasterRequest.newInstance("localhost", 0, ""); RegisterApplicationMasterResponse response = amClient.registerApplicationMaster(request); Assert.assertEquals(response, this.cluster.createFakeRegisterApplicationMasterResponse()); } @Test(timeout = 15000) public void testFinishApplicationMasterOnHA() throws YarnException, IOException { FinishApplicationMasterRequest request = FinishApplicationMasterRequest.newInstance( FinalApplicationStatus.SUCCEEDED, "", ""); FinishApplicationMasterResponse response = amClient.finishApplicationMaster(request); Assert.assertEquals(response, this.cluster.createFakeFinishApplicationMasterResponse()); } @Test(timeout = 15000) public void testAllocateOnHA() throws YarnException, IOException { AllocateRequest request = AllocateRequest.newInstance(0, 50f, new ArrayList<ResourceRequest>(), new ArrayList<ContainerId>(), ResourceBlacklistRequest.newInstance(new ArrayList<String>(), new ArrayList<String>())); AllocateResponse response = amClient.allocate(request); Assert.assertEquals(response, this.cluster.createFakeAllocateResponse()); } private void syncToken(Token<AMRMTokenIdentifier> token) throws IOException { for (int i = 0; i < this.cluster.getNumOfResourceManager(); i++) { this.cluster.getResourceManager(i).getRMContext() .getAMRMTokenSecretManager().addPersistedPassword(token); } } }
4,810
39.428571
84
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestGetGroups.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.client; import java.io.IOException; import java.io.PrintStream; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.service.Service.STATE; import org.apache.hadoop.tools.GetGroupsTestBase; import org.apache.hadoop.util.Tool; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; public class TestGetGroups extends GetGroupsTestBase { private static final Log LOG = LogFactory.getLog(TestGetGroups.class); private static ResourceManager resourceManager; private static Configuration conf; @BeforeClass public static void setUpResourceManager() throws IOException, InterruptedException { conf = new YarnConfiguration(); resourceManager = new ResourceManager() { @Override protected void doSecureLogin() throws IOException { }; }; resourceManager.init(conf); new Thread() { public void run() { resourceManager.start(); }; }.start(); int waitCount = 0; while (resourceManager.getServiceState() == STATE.INITED && waitCount++ < 10) { LOG.info("Waiting for RM to start..."); Thread.sleep(1000); } if (resourceManager.getServiceState() != STATE.STARTED) { throw new IOException( "ResourceManager failed to start. Final state is " + resourceManager.getServiceState()); } LOG.info("ResourceManager RMAdmin address: " + conf.get(YarnConfiguration.RM_ADMIN_ADDRESS)); } @SuppressWarnings("static-access") @Before public void setUpConf() { super.conf = this.conf; } @AfterClass public static void tearDownResourceManager() throws InterruptedException { if (resourceManager != null) { LOG.info("Stopping ResourceManager..."); resourceManager.stop(); } } @Override protected Tool getTool(PrintStream o) { return new GetGroupsForTesting(conf, o); } }
2,962
30.860215
86
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.client; import java.nio.ByteBuffer; import java.util.HashMap; import java.util.Map; import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest; import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse; import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest; import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse; import org.apache.hadoop.yarn.api.records.ApplicationAccessType; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.concurrent.atomic.AtomicBoolean; import org.apache.hadoop.yarn.server.resourcemanager.HATestUtil; import org.junit.Assert; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ha.ClientBaseWithFixes; import org.apache.hadoop.ha.HAServiceProtocol; import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenRequest; import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetContainersRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetContainersResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoResponse; import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest; import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationResponse; import org.apache.hadoop.yarn.api.protocolrecords.MoveApplicationAcrossQueuesRequest; import org.apache.hadoop.yarn.api.protocolrecords.MoveApplicationAcrossQueuesResponse; import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenRequest; import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenResponse; import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest; import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationResponse; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationReport; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerReport; import org.apache.hadoop.yarn.api.records.ContainerState; import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.api.records.NMToken; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.NodeReport; import org.apache.hadoop.yarn.api.records.NodeState; import org.apache.hadoop.yarn.api.records.QueueACL; import org.apache.hadoop.yarn.api.records.QueueInfo; import org.apache.hadoop.yarn.api.records.QueueState; import org.apache.hadoop.yarn.api.records.QueueUserACLInfo; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.Token; import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState; import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.api.records.YarnClusterMetrics; import org.apache.hadoop.yarn.client.api.YarnClient; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.server.MiniYARNCluster; import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerResponse; import org.apache.hadoop.yarn.server.resourcemanager.AdminService; import org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService; import org.apache.hadoop.yarn.server.resourcemanager.ClientRMService; import org.apache.hadoop.yarn.server.resourcemanager.NMLivelinessMonitor; import org.apache.hadoop.yarn.server.resourcemanager.NodesListManager; import org.apache.hadoop.yarn.server.resourcemanager.RMAppManager; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; import org.apache.hadoop.yarn.server.resourcemanager.ResourceTrackerService; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler; import org.apache.hadoop.yarn.server.resourcemanager.security.NMTokenSecretManagerInRM; import org.apache.hadoop.yarn.server.resourcemanager.security.QueueACLsManager; import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager; import org.apache.hadoop.yarn.server.resourcemanager.security.RMDelegationTokenSecretManager; import org.apache.hadoop.yarn.server.security.ApplicationACLsManager; import org.apache.hadoop.yarn.util.Records; import org.junit.After; import org.junit.Before; /** * Test Base for ResourceManager's Protocol on HA. * * Limited scope: * For all the test cases, we only test whether the method will be re-entered * when failover happens. Does not cover the entire logic of test. * * Test strategy: * Create a separate failover thread with a trigger flag, * override all APIs that are added trigger flag. * When the APIs are called, we will set trigger flag as true to kick off * the failover. So We can make sure the failover happens during process * of the method. If this API is marked as @Idempotent or @AtMostOnce, * the test cases will pass; otherwise, they will throw the exception. * */ public abstract class ProtocolHATestBase extends ClientBaseWithFixes { protected static final HAServiceProtocol.StateChangeRequestInfo req = new HAServiceProtocol.StateChangeRequestInfo( HAServiceProtocol.RequestSource.REQUEST_BY_USER); protected static final String RM1_NODE_ID = "rm1"; protected static final int RM1_PORT_BASE = 10000; protected static final String RM2_NODE_ID = "rm2"; protected static final int RM2_PORT_BASE = 20000; protected Configuration conf; protected MiniYARNClusterForHATesting cluster; protected Thread failoverThread = null; private volatile boolean keepRunning; @Before public void setup() throws IOException { failoverThread = null; keepRunning = true; conf = new YarnConfiguration(); conf.setBoolean(YarnConfiguration.RM_HA_ENABLED, true); conf.setInt(YarnConfiguration.CLIENT_FAILOVER_MAX_ATTEMPTS, 5); conf.set(YarnConfiguration.RM_HA_IDS, RM1_NODE_ID + "," + RM2_NODE_ID); HATestUtil.setRpcAddressForRM(RM1_NODE_ID, RM1_PORT_BASE, conf); HATestUtil.setRpcAddressForRM(RM2_NODE_ID, RM2_PORT_BASE, conf); conf.setLong(YarnConfiguration.CLIENT_FAILOVER_SLEEPTIME_BASE_MS, 100L); conf.setBoolean(YarnConfiguration.YARN_MINICLUSTER_FIXED_PORTS, true); conf.setBoolean(YarnConfiguration.YARN_MINICLUSTER_USE_RPC, true); } @After public void teardown() throws Exception { keepRunning = false; if (failoverThread != null) { failoverThread.interrupt(); try { failoverThread.join(); } catch (InterruptedException ex) { LOG.error("Error joining with failover thread", ex); } } cluster.stop(); } protected AdminService getAdminService(int index) { return cluster.getResourceManager(index).getRMContext() .getRMAdminService(); } protected void explicitFailover() throws IOException { int activeRMIndex = cluster.getActiveRMIndex(); int newActiveRMIndex = (activeRMIndex + 1) % 2; getAdminService(activeRMIndex).transitionToStandby(req); getAdminService(newActiveRMIndex).transitionToActive(req); assertEquals("Failover failed", newActiveRMIndex, cluster.getActiveRMIndex()); } protected YarnClient createAndStartYarnClient(Configuration conf) { Configuration configuration = new YarnConfiguration(conf); YarnClient client = YarnClient.createYarnClient(); client.init(configuration); client.start(); return client; } protected void verifyConnections() throws InterruptedException, YarnException { assertTrue("NMs failed to connect to the RM", cluster.waitForNodeManagersToConnect(20000)); verifyClientConnection(); } protected void verifyClientConnection() { int numRetries = 3; while(numRetries-- > 0) { Configuration conf = new YarnConfiguration(this.conf); YarnClient client = createAndStartYarnClient(conf); try { Thread.sleep(100); client.getApplications(); return; } catch (Exception e) { LOG.error(e.getMessage()); } finally { client.stop(); } } fail("Client couldn't connect to the Active RM"); } protected Thread createAndStartFailoverThread() { Thread failoverThread = new Thread() { public void run() { keepRunning = true; while (keepRunning) { if (cluster.getStartFailoverFlag()) { try { explicitFailover(); keepRunning = false; cluster.resetFailoverTriggeredFlag(true); } catch (Exception e) { // Do Nothing } finally { keepRunning = false; } } try { Thread.sleep(50); } catch (InterruptedException e) { // DO NOTHING } } } }; failoverThread.start(); return failoverThread; } protected void startHACluster(int numOfNMs, boolean overrideClientRMService, boolean overrideRTS, boolean overrideApplicationMasterService) throws Exception { conf.setBoolean(YarnConfiguration.RECOVERY_ENABLED, true); conf.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED, false); cluster = new MiniYARNClusterForHATesting(TestRMFailover.class.getName(), 2, numOfNMs, 1, 1, false, overrideClientRMService, overrideRTS, overrideApplicationMasterService); cluster.resetStartFailoverFlag(false); cluster.init(conf); cluster.start(); getAdminService(0).transitionToActive(req); assertFalse("RM never turned active", -1 == cluster.getActiveRMIndex()); verifyConnections(); // Do the failover explicitFailover(); verifyConnections(); failoverThread = createAndStartFailoverThread(); } protected ResourceManager getActiveRM() { return cluster.getResourceManager(cluster.getActiveRMIndex()); } public class MiniYARNClusterForHATesting extends MiniYARNCluster { private boolean overrideClientRMService; private boolean overrideRTS; private boolean overrideApplicationMasterService; private final AtomicBoolean startFailover = new AtomicBoolean(false); private final AtomicBoolean failoverTriggered = new AtomicBoolean(false); public MiniYARNClusterForHATesting(String testName, int numResourceManagers, int numNodeManagers, int numLocalDirs, int numLogDirs, boolean enableAHS, boolean overrideClientRMService, boolean overrideRTS, boolean overrideApplicationMasterService) { super(testName, numResourceManagers, numNodeManagers, numLocalDirs, numLogDirs, enableAHS); this.overrideClientRMService = overrideClientRMService; this.overrideRTS = overrideRTS; this.overrideApplicationMasterService = overrideApplicationMasterService; } public boolean getStartFailoverFlag() { return startFailover.get(); } public void resetStartFailoverFlag(boolean flag) { startFailover.set(flag); } public void resetFailoverTriggeredFlag(boolean flag) { failoverTriggered.set(flag); } private boolean waittingForFailOver() { int maximumWaittingTime = 50; int count = 0; while (!failoverTriggered.get() && count >= maximumWaittingTime) { try { Thread.sleep(100); } catch (InterruptedException e) { // DO NOTHING } count++; } if (count >= maximumWaittingTime) { return false; } try { Thread.sleep(100); } catch (InterruptedException e) { // DO NOTHING } return true; } @Override protected ResourceManager createResourceManager() { return new ResourceManager() { @Override protected void doSecureLogin() throws IOException { // Don't try to login using keytab in the testcases. } @Override protected ClientRMService createClientRMService() { if (overrideClientRMService) { return new CustomedClientRMService(this.rmContext, this.scheduler, this.rmAppManager, this.applicationACLsManager, this.queueACLsManager, this.rmContext.getRMDelegationTokenSecretManager()); } return super.createClientRMService(); } @Override protected ResourceTrackerService createResourceTrackerService() { if (overrideRTS) { return new CustomedResourceTrackerService(this.rmContext, this.nodesListManager, this.nmLivelinessMonitor, this.rmContext.getContainerTokenSecretManager(), this.rmContext.getNMTokenSecretManager()); } return super.createResourceTrackerService(); } @Override protected ApplicationMasterService createApplicationMasterService() { if (overrideApplicationMasterService) { return new CustomedApplicationMasterService(this.rmContext, this.scheduler); } return super.createApplicationMasterService(); } }; } private class CustomedClientRMService extends ClientRMService { public CustomedClientRMService(RMContext rmContext, YarnScheduler scheduler, RMAppManager rmAppManager, ApplicationACLsManager applicationACLsManager, QueueACLsManager queueACLsManager, RMDelegationTokenSecretManager rmDTSecretManager) { super(rmContext, scheduler, rmAppManager, applicationACLsManager, queueACLsManager, rmDTSecretManager); } @Override public GetNewApplicationResponse getNewApplication( GetNewApplicationRequest request) throws YarnException { resetStartFailoverFlag(true); // make sure failover has been triggered Assert.assertTrue(waittingForFailOver()); // create the GetNewApplicationResponse with fake applicationId GetNewApplicationResponse response = GetNewApplicationResponse.newInstance( createFakeAppId(), null, null); return response; } @Override public GetApplicationReportResponse getApplicationReport( GetApplicationReportRequest request) throws YarnException { resetStartFailoverFlag(true); // make sure failover has been triggered Assert.assertTrue(waittingForFailOver()); // create a fake application report ApplicationReport report = createFakeAppReport(); GetApplicationReportResponse response = GetApplicationReportResponse.newInstance(report); return response; } @Override public GetClusterMetricsResponse getClusterMetrics( GetClusterMetricsRequest request) throws YarnException { resetStartFailoverFlag(true); // make sure failover has been triggered Assert.assertTrue(waittingForFailOver()); // create GetClusterMetricsResponse with fake YarnClusterMetrics GetClusterMetricsResponse response = GetClusterMetricsResponse.newInstance( createFakeYarnClusterMetrics()); return response; } @Override public GetApplicationsResponse getApplications( GetApplicationsRequest request) throws YarnException { resetStartFailoverFlag(true); // make sure failover has been triggered Assert.assertTrue(waittingForFailOver()); // create GetApplicationsResponse with fake applicationList GetApplicationsResponse response = GetApplicationsResponse.newInstance(createFakeAppReports()); return response; } @Override public GetClusterNodesResponse getClusterNodes( GetClusterNodesRequest request) throws YarnException { resetStartFailoverFlag(true); // make sure failover has been triggered Assert.assertTrue(waittingForFailOver()); // create GetClusterNodesResponse with fake ClusterNodeLists GetClusterNodesResponse response = GetClusterNodesResponse.newInstance(createFakeNodeReports()); return response; } @Override public GetQueueInfoResponse getQueueInfo(GetQueueInfoRequest request) throws YarnException { resetStartFailoverFlag(true); // make sure failover has been triggered Assert.assertTrue(waittingForFailOver()); // return fake QueueInfo return GetQueueInfoResponse.newInstance(createFakeQueueInfo()); } @Override public GetQueueUserAclsInfoResponse getQueueUserAcls( GetQueueUserAclsInfoRequest request) throws YarnException { resetStartFailoverFlag(true); // make sure failover has been triggered Assert.assertTrue(waittingForFailOver()); // return fake queueUserAcls return GetQueueUserAclsInfoResponse .newInstance(createFakeQueueUserACLInfoList()); } @Override public GetApplicationAttemptReportResponse getApplicationAttemptReport( GetApplicationAttemptReportRequest request) throws YarnException, IOException { resetStartFailoverFlag(true); // make sure failover has been triggered Assert.assertTrue(waittingForFailOver()); // return fake ApplicationAttemptReport return GetApplicationAttemptReportResponse .newInstance(createFakeApplicationAttemptReport()); } @Override public GetApplicationAttemptsResponse getApplicationAttempts( GetApplicationAttemptsRequest request) throws YarnException, IOException { resetStartFailoverFlag(true); // make sure failover has been triggered Assert.assertTrue(waittingForFailOver()); // return fake ApplicationAttemptReports return GetApplicationAttemptsResponse .newInstance(createFakeApplicationAttemptReports()); } @Override public GetContainerReportResponse getContainerReport( GetContainerReportRequest request) throws YarnException, IOException { resetStartFailoverFlag(true); // make sure failover has been triggered Assert.assertTrue(waittingForFailOver()); // return fake containerReport return GetContainerReportResponse .newInstance(createFakeContainerReport()); } @Override public GetContainersResponse getContainers(GetContainersRequest request) throws YarnException, IOException { resetStartFailoverFlag(true); // make sure failover has been triggered Assert.assertTrue(waittingForFailOver()); // return fake ContainerReports return GetContainersResponse.newInstance(createFakeContainerReports()); } @Override public SubmitApplicationResponse submitApplication( SubmitApplicationRequest request) throws YarnException { resetStartFailoverFlag(true); // make sure failover has been triggered Assert.assertTrue(waittingForFailOver()); return super.submitApplication(request); } @Override public KillApplicationResponse forceKillApplication( KillApplicationRequest request) throws YarnException { resetStartFailoverFlag(true); // make sure failover has been triggered Assert.assertTrue(waittingForFailOver()); return KillApplicationResponse.newInstance(true); } @Override public MoveApplicationAcrossQueuesResponse moveApplicationAcrossQueues( MoveApplicationAcrossQueuesRequest request) throws YarnException { resetStartFailoverFlag(true); // make sure failover has been triggered Assert.assertTrue(waittingForFailOver()); return Records.newRecord(MoveApplicationAcrossQueuesResponse.class); } @Override public GetDelegationTokenResponse getDelegationToken( GetDelegationTokenRequest request) throws YarnException { resetStartFailoverFlag(true); // make sure failover has been triggered Assert.assertTrue(waittingForFailOver()); return GetDelegationTokenResponse.newInstance(createFakeToken()); } @Override public RenewDelegationTokenResponse renewDelegationToken( RenewDelegationTokenRequest request) throws YarnException { resetStartFailoverFlag(true); // make sure failover has been triggered Assert.assertTrue(waittingForFailOver()); return RenewDelegationTokenResponse .newInstance(createNextExpirationTime()); } @Override public CancelDelegationTokenResponse cancelDelegationToken( CancelDelegationTokenRequest request) throws YarnException { resetStartFailoverFlag(true); // make sure failover has been triggered Assert.assertTrue(waittingForFailOver()); return CancelDelegationTokenResponse.newInstance(); } } public ApplicationReport createFakeAppReport() { ApplicationId appId = ApplicationId.newInstance(1000l, 1); ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 1); // create a fake application report ApplicationReport report = ApplicationReport.newInstance(appId, attemptId, "fakeUser", "fakeQueue", "fakeApplicationName", "localhost", 0, null, YarnApplicationState.FINISHED, "fake an application report", "", 1000l, 1200l, FinalApplicationStatus.FAILED, null, "", 50f, "fakeApplicationType", null); return report; } public List<ApplicationReport> createFakeAppReports() { List<ApplicationReport> reports = new ArrayList<ApplicationReport>(); reports.add(createFakeAppReport()); return reports; } public ApplicationId createFakeAppId() { return ApplicationId.newInstance(1000l, 1); } public ApplicationAttemptId createFakeApplicationAttemptId() { return ApplicationAttemptId.newInstance(createFakeAppId(), 0); } public ContainerId createFakeContainerId() { return ContainerId.newContainerId(createFakeApplicationAttemptId(), 0); } public YarnClusterMetrics createFakeYarnClusterMetrics() { return YarnClusterMetrics.newInstance(1); } public List<NodeReport> createFakeNodeReports() { NodeId nodeId = NodeId.newInstance("localhost", 0); NodeReport report = NodeReport.newInstance(nodeId, NodeState.RUNNING, "localhost", "rack1", null, null, 4, null, 1000l, null); List<NodeReport> reports = new ArrayList<NodeReport>(); reports.add(report); return reports; } public QueueInfo createFakeQueueInfo() { return QueueInfo.newInstance("root", 100f, 100f, 50f, null, createFakeAppReports(), QueueState.RUNNING, null, null, null); } public List<QueueUserACLInfo> createFakeQueueUserACLInfoList() { List<QueueACL> queueACL = new ArrayList<QueueACL>(); queueACL.add(QueueACL.SUBMIT_APPLICATIONS); QueueUserACLInfo info = QueueUserACLInfo.newInstance("root", queueACL); List<QueueUserACLInfo> infos = new ArrayList<QueueUserACLInfo>(); infos.add(info); return infos; } public ApplicationAttemptReport createFakeApplicationAttemptReport() { return ApplicationAttemptReport.newInstance( createFakeApplicationAttemptId(), "localhost", 0, "", "", "", YarnApplicationAttemptState.RUNNING, createFakeContainerId(), 1000l, 1200l); } public List<ApplicationAttemptReport> createFakeApplicationAttemptReports() { List<ApplicationAttemptReport> reports = new ArrayList<ApplicationAttemptReport>(); reports.add(createFakeApplicationAttemptReport()); return reports; } public ContainerReport createFakeContainerReport() { return ContainerReport.newInstance(createFakeContainerId(), null, NodeId.newInstance("localhost", 0), null, 1000l, 1200l, "", "", 0, ContainerState.COMPLETE, "http://" + NodeId.newInstance("localhost", 0).toString()); } public List<ContainerReport> createFakeContainerReports() { List<ContainerReport> reports = new ArrayList<ContainerReport>(); reports.add(createFakeContainerReport()); return reports; } public Token createFakeToken() { String identifier = "fake Token"; String password = "fake token passwd"; Token token = Token.newInstance( identifier.getBytes(), " ", password.getBytes(), " "); return token; } public long createNextExpirationTime() { return "fake Token".getBytes().length; } private class CustomedResourceTrackerService extends ResourceTrackerService { public CustomedResourceTrackerService(RMContext rmContext, NodesListManager nodesListManager, NMLivelinessMonitor nmLivelinessMonitor, RMContainerTokenSecretManager containerTokenSecretManager, NMTokenSecretManagerInRM nmTokenSecretManager) { super(rmContext, nodesListManager, nmLivelinessMonitor, containerTokenSecretManager, nmTokenSecretManager); } @Override public RegisterNodeManagerResponse registerNodeManager( RegisterNodeManagerRequest request) throws YarnException, IOException { resetStartFailoverFlag(true); // make sure failover has been triggered Assert.assertTrue(waittingForFailOver()); return super.registerNodeManager(request); } @Override public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request) throws YarnException, IOException { resetStartFailoverFlag(true); // make sure failover has been triggered Assert.assertTrue(waittingForFailOver()); return super.nodeHeartbeat(request); } } private class CustomedApplicationMasterService extends ApplicationMasterService { public CustomedApplicationMasterService(RMContext rmContext, YarnScheduler scheduler) { super(rmContext, scheduler); } @Override public AllocateResponse allocate(AllocateRequest request) throws YarnException, IOException { resetStartFailoverFlag(true); // make sure failover has been triggered Assert.assertTrue(waittingForFailOver()); return createFakeAllocateResponse(); } @Override public RegisterApplicationMasterResponse registerApplicationMaster( RegisterApplicationMasterRequest request) throws YarnException, IOException { resetStartFailoverFlag(true); // make sure failover has been triggered Assert.assertTrue(waittingForFailOver()); return createFakeRegisterApplicationMasterResponse(); } @Override public FinishApplicationMasterResponse finishApplicationMaster( FinishApplicationMasterRequest request) throws YarnException, IOException { resetStartFailoverFlag(true); // make sure failover has been triggered Assert.assertTrue(waittingForFailOver()); return createFakeFinishApplicationMasterResponse(); } } public RegisterApplicationMasterResponse createFakeRegisterApplicationMasterResponse() { Resource minCapability = Resource.newInstance(2048, 2); Resource maxCapability = Resource.newInstance(4096, 4); Map<ApplicationAccessType, String> acls = new HashMap<ApplicationAccessType, String>(); acls.put(ApplicationAccessType.MODIFY_APP, "*"); ByteBuffer key = ByteBuffer.wrap("fake_key".getBytes()); return RegisterApplicationMasterResponse.newInstance(minCapability, maxCapability, acls, key, new ArrayList<Container>(), "root", new ArrayList<NMToken>()); } public FinishApplicationMasterResponse createFakeFinishApplicationMasterResponse() { return FinishApplicationMasterResponse.newInstance(true); } public AllocateResponse createFakeAllocateResponse() { return AllocateResponse.newInstance(-1, new ArrayList<ContainerStatus>(), new ArrayList<Container>(), new ArrayList<NodeReport>(), Resource.newInstance(1024, 2), null, 1, null, new ArrayList<NMToken>()); } } }
31,958
38.117503
93
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/GetGroupsForTesting.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.client; import java.io.IOException; import java.io.PrintStream; import java.net.InetSocketAddress; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.tools.GetGroupsBase; import org.apache.hadoop.tools.GetUserMappingsProtocol; import org.apache.hadoop.util.ToolRunner; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.ipc.YarnRPC; import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocol; public class GetGroupsForTesting extends GetGroupsBase { public GetGroupsForTesting(Configuration conf) { super(conf); } public GetGroupsForTesting(Configuration conf, PrintStream out) { super(conf, out); } @Override protected InetSocketAddress getProtocolAddress(Configuration conf) throws IOException { return conf.getSocketAddr(YarnConfiguration.RM_ADMIN_ADDRESS, YarnConfiguration.DEFAULT_RM_ADMIN_ADDRESS, YarnConfiguration.DEFAULT_RM_ADMIN_PORT); } @Override public void setConf(Configuration conf) { conf = new YarnConfiguration(conf); super.setConf(conf); } @Override protected GetUserMappingsProtocol getUgmProtocol() throws IOException { Configuration conf = getConf(); final InetSocketAddress addr = conf.getSocketAddr( YarnConfiguration.RM_ADMIN_ADDRESS, YarnConfiguration.DEFAULT_RM_ADMIN_ADDRESS, YarnConfiguration.DEFAULT_RM_ADMIN_PORT); final YarnRPC rpc = YarnRPC.create(conf); ResourceManagerAdministrationProtocol adminProtocol = (ResourceManagerAdministrationProtocol) rpc.getProxy( ResourceManagerAdministrationProtocol.class, addr, getConf()); return adminProtocol; } public static void main(String[] argv) throws Exception { int res = ToolRunner.run(new GetGroupsForTesting(new YarnConfiguration()), argv); System.exit(res); } }
2,717
33.846154
111
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestYarnApiClasses.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.client; import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto; import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto; import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.CancelDelegationTokenRequestPBImpl; import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RenewDelegationTokenRequestPBImpl; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceRequest; import org.apache.hadoop.yarn.api.records.Token; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.junit.Test; import static org.junit.Assert.*; public class TestYarnApiClasses { private final org.apache.hadoop.yarn.factories.RecordFactory recordFactory = RecordFactoryProvider .getRecordFactory(null); /** * Simple test Resource request. * Test hashCode, equals and compare. */ @Test public void testResourceRequest() { Resource resource = recordFactory.newRecordInstance(Resource.class); Priority priority = recordFactory.newRecordInstance(Priority.class); ResourceRequest original = ResourceRequest.newInstance(priority, "localhost", resource, 2) ; ResourceRequest copy = ResourceRequest.newInstance(priority, "localhost", resource, 2); assertTrue(original.equals(copy)); assertEquals(0, original.compareTo(copy)); assertTrue(original.hashCode() == copy.hashCode()); copy.setNumContainers(1); assertFalse(original.equals(copy)); assertNotSame(0, original.compareTo(copy)); assertFalse(original.hashCode() == copy.hashCode()); } /** * Test CancelDelegationTokenRequestPBImpl. * Test a transformation to prototype and back */ @Test public void testCancelDelegationTokenRequestPBImpl() { Token token = getDelegationToken(); CancelDelegationTokenRequestPBImpl original = new CancelDelegationTokenRequestPBImpl(); original.setDelegationToken(token); CancelDelegationTokenRequestProto protoType = original.getProto(); CancelDelegationTokenRequestPBImpl copy = new CancelDelegationTokenRequestPBImpl(protoType); assertNotNull(copy.getDelegationToken()); //compare source and converted assertEquals(token, copy.getDelegationToken()); } /** * Test RenewDelegationTokenRequestPBImpl. * Test a transformation to prototype and back */ @Test public void testRenewDelegationTokenRequestPBImpl() { Token token = getDelegationToken(); RenewDelegationTokenRequestPBImpl original = new RenewDelegationTokenRequestPBImpl(); original.setDelegationToken(token); RenewDelegationTokenRequestProto protoType = original.getProto(); RenewDelegationTokenRequestPBImpl copy = new RenewDelegationTokenRequestPBImpl(protoType); assertNotNull(copy.getDelegationToken()); //compare source and converted assertEquals(token, copy.getDelegationToken()); } private Token getDelegationToken() { return Token.newInstance(new byte[0], "", new byte[0], ""); } }
3,935
33.526316
100
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestApplicationClientProtocolOnHA.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.client; import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.Text; import org.apache.hadoop.yarn.api.ApplicationClientProtocol; import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenRequest; import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenRequest; import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationReport; import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; import org.apache.hadoop.yarn.api.records.ContainerReport; import org.apache.hadoop.yarn.api.records.NodeReport; import org.apache.hadoop.yarn.api.records.NodeState; import org.apache.hadoop.yarn.api.records.QueueInfo; import org.apache.hadoop.yarn.api.records.QueueUserACLInfo; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.Token; import org.apache.hadoop.yarn.api.records.YarnClusterMetrics; import org.apache.hadoop.yarn.client.api.YarnClient; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.util.Records; import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Test; public class TestApplicationClientProtocolOnHA extends ProtocolHATestBase { private YarnClient client = null; @Before public void initiate() throws Exception { startHACluster(1, true, false, false); Configuration conf = new YarnConfiguration(this.conf); client = createAndStartYarnClient(conf); } @After public void shutDown() { if (client != null) { client.stop(); } } @Test(timeout = 15000) public void testGetApplicationReportOnHA() throws Exception { ApplicationReport report = client.getApplicationReport(cluster.createFakeAppId()); Assert.assertTrue(report != null); Assert.assertEquals(cluster.createFakeAppReport(), report); } @Test(timeout = 15000) public void testGetNewApplicationOnHA() throws Exception { ApplicationId appId = client.createApplication().getApplicationSubmissionContext() .getApplicationId(); Assert.assertTrue(appId != null); Assert.assertEquals(cluster.createFakeAppId(), appId); } @Test(timeout = 15000) public void testGetClusterMetricsOnHA() throws Exception { YarnClusterMetrics clusterMetrics = client.getYarnClusterMetrics(); Assert.assertTrue(clusterMetrics != null); Assert.assertEquals(cluster.createFakeYarnClusterMetrics(), clusterMetrics); } @Test(timeout = 15000) public void testGetApplicationsOnHA() throws Exception { List<ApplicationReport> reports = client.getApplications(); Assert.assertTrue(reports != null); Assert.assertFalse(reports.isEmpty()); Assert.assertEquals(cluster.createFakeAppReports(), reports); } @Test(timeout = 15000) public void testGetClusterNodesOnHA() throws Exception { List<NodeReport> reports = client.getNodeReports(NodeState.RUNNING); Assert.assertTrue(reports != null); Assert.assertFalse(reports.isEmpty()); Assert.assertEquals(cluster.createFakeNodeReports(), reports); } @Test(timeout = 15000) public void testGetQueueInfoOnHA() throws Exception { QueueInfo queueInfo = client.getQueueInfo("root"); Assert.assertTrue(queueInfo != null); Assert.assertEquals(cluster.createFakeQueueInfo(), queueInfo); } @Test(timeout = 15000) public void testGetQueueUserAclsOnHA() throws Exception { List<QueueUserACLInfo> queueUserAclsList = client.getQueueAclsInfo(); Assert.assertTrue(queueUserAclsList != null); Assert.assertFalse(queueUserAclsList.isEmpty()); Assert.assertEquals(cluster.createFakeQueueUserACLInfoList(), queueUserAclsList); } @Test(timeout = 15000) public void testGetApplicationAttemptReportOnHA() throws Exception { ApplicationAttemptReport report = client.getApplicationAttemptReport(cluster .createFakeApplicationAttemptId()); Assert.assertTrue(report != null); Assert.assertEquals(cluster.createFakeApplicationAttemptReport(), report); } @Test(timeout = 15000) public void testGetApplicationAttemptsOnHA() throws Exception { List<ApplicationAttemptReport> reports = client.getApplicationAttempts(cluster.createFakeAppId()); Assert.assertTrue(reports != null); Assert.assertFalse(reports.isEmpty()); Assert.assertEquals(cluster.createFakeApplicationAttemptReports(), reports); } @Test(timeout = 15000) public void testGetContainerReportOnHA() throws Exception { ContainerReport report = client.getContainerReport(cluster.createFakeContainerId()); Assert.assertTrue(report != null); Assert.assertEquals(cluster.createFakeContainerReport(), report); } @Test(timeout = 15000) public void testGetContainersOnHA() throws Exception { List<ContainerReport> reports = client.getContainers(cluster.createFakeApplicationAttemptId()); Assert.assertTrue(reports != null); Assert.assertFalse(reports.isEmpty()); Assert.assertEquals(cluster.createFakeContainerReports(), reports); } @Test(timeout = 15000) public void testSubmitApplicationOnHA() throws Exception { ApplicationSubmissionContext appContext = Records.newRecord(ApplicationSubmissionContext.class); appContext.setApplicationId(cluster.createFakeAppId()); ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class); appContext.setAMContainerSpec(amContainer); Resource capability = Records.newRecord(Resource.class); capability.setMemory(10); capability.setVirtualCores(1); appContext.setResource(capability); ApplicationId appId = client.submitApplication(appContext); Assert.assertTrue(getActiveRM().getRMContext().getRMApps() .containsKey(appId)); } @Test(timeout = 15000) public void testMoveApplicationAcrossQueuesOnHA() throws Exception{ client.moveApplicationAcrossQueues(cluster.createFakeAppId(), "root"); } @Test(timeout = 15000) public void testForceKillApplicationOnHA() throws Exception { client.killApplication(cluster.createFakeAppId()); } @Test(timeout = 15000) public void testGetDelegationTokenOnHA() throws Exception { Token token = client.getRMDelegationToken(new Text(" ")); Assert.assertEquals(token, cluster.createFakeToken()); } @Test(timeout = 15000) public void testRenewDelegationTokenOnHA() throws Exception { RenewDelegationTokenRequest request = RenewDelegationTokenRequest.newInstance(cluster.createFakeToken()); long newExpirationTime = ClientRMProxy.createRMProxy(this.conf, ApplicationClientProtocol.class) .renewDelegationToken(request).getNextExpirationTime(); Assert.assertEquals(newExpirationTime, cluster.createNextExpirationTime()); } @Test(timeout = 15000) public void testCancelDelegationTokenOnHA() throws Exception { CancelDelegationTokenRequest request = CancelDelegationTokenRequest.newInstance(cluster.createFakeToken()); ClientRMProxy.createRMProxy(this.conf, ApplicationClientProtocol.class) .cancelDelegationToken(request); } }
8,230
37.106481
79
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestResourceTrackerOnHA.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.client; import java.io.IOException; import org.junit.Assert; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.server.api.ResourceTracker; import org.apache.hadoop.yarn.server.api.ServerRMProxy; import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest; import org.apache.hadoop.yarn.server.api.records.NodeStatus; import org.apache.hadoop.yarn.util.YarnVersionInfo; import org.junit.After; import org.junit.Before; import org.junit.Test; public class TestResourceTrackerOnHA extends ProtocolHATestBase{ private ResourceTracker resourceTracker = null; @Before public void initiate() throws Exception { startHACluster(0, false, true, false); this.resourceTracker = getRMClient(); } @After public void shutDown() { if(this.resourceTracker != null) { RPC.stopProxy(this.resourceTracker); } } @Test(timeout = 15000) public void testResourceTrackerOnHA() throws Exception { NodeId nodeId = NodeId.newInstance("localhost", 0); Resource resource = Resource.newInstance(2048, 4); // make sure registerNodeManager works when failover happens RegisterNodeManagerRequest request = RegisterNodeManagerRequest.newInstance(nodeId, 0, resource, YarnVersionInfo.getVersion(), null, null); resourceTracker.registerNodeManager(request); Assert.assertTrue(waitForNodeManagerToConnect(10000, nodeId)); // restart the failover thread, and make sure nodeHeartbeat works failoverThread = createAndStartFailoverThread(); NodeStatus status = NodeStatus.newInstance(NodeId.newInstance("localhost", 0), 0, null, null, null, null); NodeHeartbeatRequest request2 = NodeHeartbeatRequest.newInstance(status, null, null,null); resourceTracker.nodeHeartbeat(request2); } private ResourceTracker getRMClient() throws IOException { return ServerRMProxy.createRMProxy(this.conf, ResourceTracker.class); } private boolean waitForNodeManagerToConnect(int timeout, NodeId nodeId) throws Exception { for (int i = 0; i < timeout / 100; i++) { if (getActiveRM().getRMContext().getRMNodes().containsKey(nodeId)) { return true; } Thread.sleep(100); } return false; } }
3,271
34.565217
84
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestRMFailover.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.client; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertNull; import static org.junit.Assert.fail; import java.io.IOException; import java.net.HttpURLConnection; import java.net.URL; import javax.servlet.http.HttpServletResponse; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ha.ClientBaseWithFixes; import org.apache.hadoop.ha.HAServiceProtocol; import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; import org.apache.hadoop.service.Service.STATE; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.client.api.YarnClient; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.server.MiniYARNCluster; import org.apache.hadoop.yarn.server.resourcemanager.AdminService; import org.apache.hadoop.yarn.server.resourcemanager.HATestUtil; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; import org.apache.hadoop.yarn.server.webproxy.WebAppProxyServer; import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Test; public class TestRMFailover extends ClientBaseWithFixes { private static final Log LOG = LogFactory.getLog(TestRMFailover.class.getName()); private static final HAServiceProtocol.StateChangeRequestInfo req = new HAServiceProtocol.StateChangeRequestInfo( HAServiceProtocol.RequestSource.REQUEST_BY_USER); private static final String RM1_NODE_ID = "rm1"; private static final int RM1_PORT_BASE = 10000; private static final String RM2_NODE_ID = "rm2"; private static final int RM2_PORT_BASE = 20000; private Configuration conf; private MiniYARNCluster cluster; private ApplicationId fakeAppId; @Before public void setup() throws IOException { fakeAppId = ApplicationId.newInstance(System.currentTimeMillis(), 0); conf = new YarnConfiguration(); conf.setBoolean(YarnConfiguration.RM_HA_ENABLED, true); conf.set(YarnConfiguration.RM_HA_IDS, RM1_NODE_ID + "," + RM2_NODE_ID); HATestUtil.setRpcAddressForRM(RM1_NODE_ID, RM1_PORT_BASE, conf); HATestUtil.setRpcAddressForRM(RM2_NODE_ID, RM2_PORT_BASE, conf); conf.setLong(YarnConfiguration.CLIENT_FAILOVER_SLEEPTIME_BASE_MS, 100L); conf.setBoolean(YarnConfiguration.YARN_MINICLUSTER_FIXED_PORTS, true); conf.setBoolean(YarnConfiguration.YARN_MINICLUSTER_USE_RPC, true); cluster = new MiniYARNCluster(TestRMFailover.class.getName(), 2, 1, 1, 1); } @After public void teardown() { cluster.stop(); } private void verifyClientConnection() { int numRetries = 3; while(numRetries-- > 0) { Configuration conf = new YarnConfiguration(this.conf); YarnClient client = YarnClient.createYarnClient(); client.init(conf); client.start(); try { client.getApplications(); return; } catch (Exception e) { LOG.error(e); } finally { client.stop(); } } fail("Client couldn't connect to the Active RM"); } private void verifyConnections() throws InterruptedException, YarnException { assertTrue("NMs failed to connect to the RM", cluster.waitForNodeManagersToConnect(20000)); verifyClientConnection(); } private AdminService getAdminService(int index) { return cluster.getResourceManager(index).getRMContext().getRMAdminService(); } private void explicitFailover() throws IOException { int activeRMIndex = cluster.getActiveRMIndex(); int newActiveRMIndex = (activeRMIndex + 1) % 2; getAdminService(activeRMIndex).transitionToStandby(req); getAdminService(newActiveRMIndex).transitionToActive(req); assertEquals("Failover failed", newActiveRMIndex, cluster.getActiveRMIndex()); } private void failover() throws IOException, InterruptedException, YarnException { int activeRMIndex = cluster.getActiveRMIndex(); cluster.stopResourceManager(activeRMIndex); assertEquals("Failover failed", (activeRMIndex + 1) % 2, cluster.getActiveRMIndex()); cluster.restartResourceManager(activeRMIndex); } @Test public void testExplicitFailover() throws YarnException, InterruptedException, IOException { conf.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED, false); cluster.init(conf); cluster.start(); getAdminService(0).transitionToActive(req); assertFalse("RM never turned active", -1 == cluster.getActiveRMIndex()); verifyConnections(); explicitFailover(); verifyConnections(); explicitFailover(); verifyConnections(); } @Test public void testAutomaticFailover() throws YarnException, InterruptedException, IOException { conf.set(YarnConfiguration.RM_CLUSTER_ID, "yarn-test-cluster"); conf.set(YarnConfiguration.RM_ZK_ADDRESS, hostPort); conf.setInt(YarnConfiguration.RM_ZK_TIMEOUT_MS, 2000); cluster.init(conf); cluster.start(); assertFalse("RM never turned active", -1 == cluster.getActiveRMIndex()); verifyConnections(); failover(); verifyConnections(); failover(); verifyConnections(); // Make the current Active handle an RMFatalEvent, // so it transitions to standby. ResourceManager rm = cluster.getResourceManager( cluster.getActiveRMIndex()); rm.handleTransitionToStandBy(); int maxWaitingAttempts = 2000; while (maxWaitingAttempts-- > 0 ) { if (rm.getRMContext().getHAServiceState() == HAServiceState.STANDBY) { break; } Thread.sleep(1); } Assert.assertFalse("RM didn't transition to Standby ", maxWaitingAttempts == 0); verifyConnections(); } @Test public void testWebAppProxyInStandAloneMode() throws YarnException, InterruptedException, IOException { conf.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED, false); WebAppProxyServer webAppProxyServer = new WebAppProxyServer(); try { conf.set(YarnConfiguration.PROXY_ADDRESS, "0.0.0.0:9099"); cluster.init(conf); cluster.start(); getAdminService(0).transitionToActive(req); assertFalse("RM never turned active", -1 == cluster.getActiveRMIndex()); verifyConnections(); webAppProxyServer.init(conf); // Start webAppProxyServer Assert.assertEquals(STATE.INITED, webAppProxyServer.getServiceState()); webAppProxyServer.start(); Assert.assertEquals(STATE.STARTED, webAppProxyServer.getServiceState()); // send httpRequest with fakeApplicationId // expect to get "Not Found" response and 404 response code URL wrongUrl = new URL("http://0.0.0.0:9099/proxy/" + fakeAppId); HttpURLConnection proxyConn = (HttpURLConnection) wrongUrl .openConnection(); proxyConn.connect(); verifyResponse(proxyConn); explicitFailover(); verifyConnections(); proxyConn.connect(); verifyResponse(proxyConn); } finally { webAppProxyServer.stop(); } } @Test public void testEmbeddedWebAppProxy() throws YarnException, InterruptedException, IOException { conf.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED, false); cluster.init(conf); cluster.start(); getAdminService(0).transitionToActive(req); assertFalse("RM never turned active", -1 == cluster.getActiveRMIndex()); verifyConnections(); // send httpRequest with fakeApplicationId // expect to get "Not Found" response and 404 response code URL wrongUrl = new URL("http://0.0.0.0:18088/proxy/" + fakeAppId); HttpURLConnection proxyConn = (HttpURLConnection) wrongUrl .openConnection(); proxyConn.connect(); verifyResponse(proxyConn); explicitFailover(); verifyConnections(); proxyConn.connect(); verifyResponse(proxyConn); } private void verifyResponse(HttpURLConnection response) throws IOException { assertEquals("Not Found", response.getResponseMessage()); assertEquals(404, response.getResponseCode()); } @Test public void testRMWebAppRedirect() throws YarnException, InterruptedException, IOException { cluster = new MiniYARNCluster(TestRMFailover.class.getName(), 2, 0, 1, 1); conf.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED, false); cluster.init(conf); cluster.start(); getAdminService(0).transitionToActive(req); String rm1Url = "http://0.0.0.0:18088"; String rm2Url = "http://0.0.0.0:28088"; String redirectURL = getRedirectURL(rm2Url); // if uri is null, RMWebAppFilter will append a slash at the trail of the redirection url assertEquals(redirectURL,rm1Url+"/"); redirectURL = getRedirectURL(rm2Url + "/metrics"); assertEquals(redirectURL,rm1Url + "/metrics"); redirectURL = getRedirectURL(rm2Url + "/jmx"); assertEquals(redirectURL,rm1Url + "/jmx"); // standby RM links /conf, /stacks, /logLevel, /static, /logs, // /cluster/cluster as well as webService // /ws/v1/cluster/info should not be redirected to active RM redirectURL = getRedirectURL(rm2Url + "/cluster/cluster"); assertNull(redirectURL); redirectURL = getRedirectURL(rm2Url + "/conf"); assertNull(redirectURL); redirectURL = getRedirectURL(rm2Url + "/stacks"); assertNull(redirectURL); redirectURL = getRedirectURL(rm2Url + "/logLevel"); assertNull(redirectURL); redirectURL = getRedirectURL(rm2Url + "/static"); assertNull(redirectURL); redirectURL = getRedirectURL(rm2Url + "/logs"); assertNull(redirectURL); redirectURL = getRedirectURL(rm2Url + "/ws/v1/cluster/info"); assertNull(redirectURL); redirectURL = getRedirectURL(rm2Url + "/ws/v1/cluster/apps"); assertEquals(redirectURL, rm1Url + "/ws/v1/cluster/apps"); redirectURL = getRedirectURL(rm2Url + "/proxy/" + fakeAppId); assertNull(redirectURL); } // set up http connection with the given url and get the redirection url from the response // return null if the url is not redirected static String getRedirectURL(String url) { String redirectUrl = null; try { HttpURLConnection conn = (HttpURLConnection) new URL(url).openConnection(); // do not automatically follow the redirection // otherwise we get too many redirections exception conn.setInstanceFollowRedirects(false); if(conn.getResponseCode() == HttpServletResponse.SC_TEMPORARY_REDIRECT) redirectUrl = conn.getHeaderField("Location"); } catch (Exception e) { // throw new RuntimeException(e); } return redirectUrl; } }
11,602
34.48318
93
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestResourceManagerAdministrationProtocolPBClientImpl.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.client; import java.io.IOException; import java.net.InetSocketAddress; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.service.Service.STATE; import org.apache.hadoop.yarn.api.records.DecommissionType; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocol; import org.apache.hadoop.yarn.server.api.impl.pb.client.ResourceManagerAdministrationProtocolPBClientImpl; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshQueuesRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshQueuesResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshServiceAclsRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshServiceAclsResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshSuperUserGroupsConfigurationRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshSuperUserGroupsConfigurationResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshUserToGroupsMappingsRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshUserToGroupsMappingsResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceResponse; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; import static org.junit.Assert.*; /** * Test ResourceManagerAdministrationProtocolPBClientImpl. Test a methods and the proxy without logic. */ public class TestResourceManagerAdministrationProtocolPBClientImpl { private static ResourceManager resourceManager; private static final Log LOG = LogFactory .getLog(TestResourceManagerAdministrationProtocolPBClientImpl.class); private final RecordFactory recordFactory = RecordFactoryProvider .getRecordFactory(null); private static ResourceManagerAdministrationProtocol client; /** * Start resource manager server */ @BeforeClass public static void setUpResourceManager() throws IOException, InterruptedException { Configuration.addDefaultResource("config-with-security.xml"); Configuration configuration = new YarnConfiguration(); resourceManager = new ResourceManager() { @Override protected void doSecureLogin() throws IOException { } }; resourceManager.init(configuration); new Thread() { public void run() { resourceManager.start(); } }.start(); int waitCount = 0; while (resourceManager.getServiceState() == STATE.INITED && waitCount++ < 10) { LOG.info("Waiting for RM to start..."); Thread.sleep(1000); } if (resourceManager.getServiceState() != STATE.STARTED) { throw new IOException("ResourceManager failed to start. Final state is " + resourceManager.getServiceState()); } LOG.info("ResourceManager RMAdmin address: " + configuration.get(YarnConfiguration.RM_ADMIN_ADDRESS)); client = new ResourceManagerAdministrationProtocolPBClientImpl(1L, getProtocolAddress(configuration), configuration); } /** * Test method refreshQueues. This method is present and it works. */ @Test public void testRefreshQueues() throws Exception { RefreshQueuesRequest request = recordFactory .newRecordInstance(RefreshQueuesRequest.class); RefreshQueuesResponse response = client.refreshQueues(request); assertNotNull(response); } /** * Test method refreshNodes. This method is present and it works. */ @Test public void testRefreshNodes() throws Exception { resourceManager.getClientRMService(); RefreshNodesRequest request = RefreshNodesRequest .newInstance(DecommissionType.NORMAL); RefreshNodesResponse response = client.refreshNodes(request); assertNotNull(response); } /** * Test method refreshSuperUserGroupsConfiguration. This method present and it works. */ @Test public void testRefreshSuperUserGroupsConfiguration() throws Exception { RefreshSuperUserGroupsConfigurationRequest request = recordFactory .newRecordInstance(RefreshSuperUserGroupsConfigurationRequest.class); RefreshSuperUserGroupsConfigurationResponse response = client .refreshSuperUserGroupsConfiguration(request); assertNotNull(response); } /** * Test method refreshUserToGroupsMappings. This method is present and it works. */ @Test public void testRefreshUserToGroupsMappings() throws Exception { RefreshUserToGroupsMappingsRequest request = recordFactory .newRecordInstance(RefreshUserToGroupsMappingsRequest.class); RefreshUserToGroupsMappingsResponse response = client .refreshUserToGroupsMappings(request); assertNotNull(response); } /** * Test method refreshAdminAcls. This method is present and it works. */ @Test public void testRefreshAdminAcls() throws Exception { RefreshAdminAclsRequest request = recordFactory .newRecordInstance(RefreshAdminAclsRequest.class); RefreshAdminAclsResponse response = client.refreshAdminAcls(request); assertNotNull(response); } @Test public void testUpdateNodeResource() throws Exception { UpdateNodeResourceRequest request = recordFactory .newRecordInstance(UpdateNodeResourceRequest.class); UpdateNodeResourceResponse response = client.updateNodeResource(request); assertNotNull(response); } @Test public void testRefreshServiceAcls() throws Exception { RefreshServiceAclsRequest request = recordFactory .newRecordInstance(RefreshServiceAclsRequest.class); RefreshServiceAclsResponse response = client.refreshServiceAcls(request); assertNotNull(response); } /** * Stop server */ @AfterClass public static void tearDownResourceManager() throws InterruptedException { if (resourceManager != null) { LOG.info("Stopping ResourceManager..."); resourceManager.stop(); } } private static InetSocketAddress getProtocolAddress(Configuration conf) throws IOException { return conf.getSocketAddr(YarnConfiguration.RM_ADMIN_ADDRESS, YarnConfiguration.DEFAULT_RM_ADMIN_ADDRESS, YarnConfiguration.DEFAULT_RM_ADMIN_PORT); } }
7,839
37.62069
106
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestRMAdminCLI.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.client.cli; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.mockito.Matchers.any; import static org.mockito.Matchers.anyInt; import static org.mockito.Matchers.argThat; import static org.mockito.Matchers.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.PrintStream; import java.util.HashSet; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ha.HAServiceProtocol; import org.apache.hadoop.ha.HAServiceStatus; import org.apache.hadoop.ha.HAServiceTarget; import org.apache.hadoop.service.Service.STATE; import org.apache.hadoop.yarn.api.records.DecommissionType; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager; import org.apache.hadoop.yarn.nodelabels.DummyCommonNodeLabelsManager; import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocol; import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshQueuesRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshServiceAclsRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshSuperUserGroupsConfigurationRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshUserToGroupsMappingsRequest; import org.apache.hadoop.yarn.util.Records; import org.junit.Before; import org.junit.Test; import org.mockito.ArgumentMatcher; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; import com.google.common.base.Charsets; import com.google.common.collect.ImmutableSet; public class TestRMAdminCLI { private ResourceManagerAdministrationProtocol admin; private HAServiceProtocol haadmin; private RMAdminCLI rmAdminCLI; private RMAdminCLI rmAdminCLIWithHAEnabled; private CommonNodeLabelsManager dummyNodeLabelsManager; private boolean remoteAdminServiceAccessed = false; @SuppressWarnings("static-access") @Before public void configure() throws IOException, YarnException { remoteAdminServiceAccessed = false; admin = mock(ResourceManagerAdministrationProtocol.class); when(admin.addToClusterNodeLabels(any(AddToClusterNodeLabelsRequest.class))) .thenAnswer(new Answer<AddToClusterNodeLabelsResponse>() { @Override public AddToClusterNodeLabelsResponse answer( InvocationOnMock invocation) throws Throwable { remoteAdminServiceAccessed = true; return AddToClusterNodeLabelsResponse.newInstance(); } }); haadmin = mock(HAServiceProtocol.class); when(haadmin.getServiceStatus()).thenReturn(new HAServiceStatus( HAServiceProtocol.HAServiceState.INITIALIZING)); final HAServiceTarget haServiceTarget = mock(HAServiceTarget.class); when(haServiceTarget.getProxy(any(Configuration.class), anyInt())) .thenReturn(haadmin); rmAdminCLI = new RMAdminCLI(new Configuration()) { @Override protected ResourceManagerAdministrationProtocol createAdminProtocol() throws IOException { return admin; } @Override protected HAServiceTarget resolveTarget(String rmId) { return haServiceTarget; } }; initDummyNodeLabelsManager(); rmAdminCLI.localNodeLabelsManager = dummyNodeLabelsManager; YarnConfiguration conf = new YarnConfiguration(); conf.setBoolean(YarnConfiguration.RM_HA_ENABLED, true); conf.set(YarnConfiguration.RM_HA_IDS, "rm1,rm2"); rmAdminCLIWithHAEnabled = new RMAdminCLI(conf) { @Override protected ResourceManagerAdministrationProtocol createAdminProtocol() throws IOException { return admin; } @Override protected HAServiceTarget resolveTarget(String rmId) { return haServiceTarget; } }; } private void initDummyNodeLabelsManager() { Configuration conf = new YarnConfiguration(); conf.setBoolean(YarnConfiguration.NODE_LABELS_ENABLED, true); dummyNodeLabelsManager = new DummyCommonNodeLabelsManager(); dummyNodeLabelsManager.init(conf); } @Test(timeout=500) public void testRefreshQueues() throws Exception { String[] args = { "-refreshQueues" }; assertEquals(0, rmAdminCLI.run(args)); verify(admin).refreshQueues(any(RefreshQueuesRequest.class)); } @Test(timeout=500) public void testRefreshUserToGroupsMappings() throws Exception { String[] args = { "-refreshUserToGroupsMappings" }; assertEquals(0, rmAdminCLI.run(args)); verify(admin).refreshUserToGroupsMappings( any(RefreshUserToGroupsMappingsRequest.class)); } @Test(timeout=500) public void testRefreshSuperUserGroupsConfiguration() throws Exception { String[] args = { "-refreshSuperUserGroupsConfiguration" }; assertEquals(0, rmAdminCLI.run(args)); verify(admin).refreshSuperUserGroupsConfiguration( any(RefreshSuperUserGroupsConfigurationRequest.class)); } @Test(timeout=500) public void testRefreshAdminAcls() throws Exception { String[] args = { "-refreshAdminAcls" }; assertEquals(0, rmAdminCLI.run(args)); verify(admin).refreshAdminAcls(any(RefreshAdminAclsRequest.class)); } @Test(timeout=500) public void testRefreshServiceAcl() throws Exception { String[] args = { "-refreshServiceAcl" }; assertEquals(0, rmAdminCLI.run(args)); verify(admin).refreshServiceAcls(any(RefreshServiceAclsRequest.class)); } @Test(timeout=500) public void testRefreshNodes() throws Exception { String[] args = { "-refreshNodes" }; assertEquals(0, rmAdminCLI.run(args)); verify(admin).refreshNodes(any(RefreshNodesRequest.class)); } @Test public void testRefreshNodesWithGracefulTimeout() throws Exception { // graceful decommission before timeout String[] args = { "-refreshNodes", "-g", "1" }; CheckForDecommissioningNodesResponse response = Records .newRecord(CheckForDecommissioningNodesResponse.class); HashSet<NodeId> decomNodes = new HashSet<NodeId>(); response.setDecommissioningNodes(decomNodes); when(admin.checkForDecommissioningNodes(any( CheckForDecommissioningNodesRequest.class))).thenReturn(response); assertEquals(0, rmAdminCLI.run(args)); // verify(admin).refreshNodes(any(RefreshNodesRequest.class)); verify(admin).refreshNodes( RefreshNodesRequest.newInstance(DecommissionType.GRACEFUL)); // Forceful decommission when timeout occurs String[] focefulDecomArgs = { "-refreshNodes", "-g", "1" }; decomNodes = new HashSet<NodeId>(); response.setDecommissioningNodes(decomNodes); decomNodes.add(NodeId.newInstance("node1", 100)); response.setDecommissioningNodes(decomNodes); when(admin.checkForDecommissioningNodes(any( CheckForDecommissioningNodesRequest.class))).thenReturn(response); assertEquals(0, rmAdminCLI.run(focefulDecomArgs)); verify(admin).refreshNodes( RefreshNodesRequest.newInstance(DecommissionType.FORCEFUL)); // invalid graceful timeout parameter String[] invalidArgs = { "-refreshNodes", "-ginvalid", "invalid" }; assertEquals(-1, rmAdminCLI.run(invalidArgs)); // invalid timeout String[] invalidTimeoutArgs = { "-refreshNodes", "-g", "invalid" }; assertEquals(-1, rmAdminCLI.run(invalidTimeoutArgs)); // negative timeout String[] negativeTimeoutArgs = { "-refreshNodes", "-g", "-1000" }; assertEquals(-1, rmAdminCLI.run(negativeTimeoutArgs)); } @Test(timeout=500) public void testGetGroups() throws Exception { when(admin.getGroupsForUser(eq("admin"))).thenReturn( new String[] {"group1", "group2"}); PrintStream origOut = System.out; PrintStream out = mock(PrintStream.class); System.setOut(out); try { String[] args = { "-getGroups", "admin" }; assertEquals(0, rmAdminCLI.run(args)); verify(admin).getGroupsForUser(eq("admin")); verify(out).println(argThat(new ArgumentMatcher<StringBuilder>() { @Override public boolean matches(Object argument) { return ("" + argument).equals("admin : group1 group2"); } })); } finally { System.setOut(origOut); } } @Test(timeout = 500) public void testTransitionToActive() throws Exception { String[] args = {"-transitionToActive", "rm1"}; // RM HA is disabled. // transitionToActive should not be executed assertEquals(-1, rmAdminCLI.run(args)); verify(haadmin, never()).transitionToActive( any(HAServiceProtocol.StateChangeRequestInfo.class)); // Now RM HA is enabled. // transitionToActive should be executed assertEquals(0, rmAdminCLIWithHAEnabled.run(args)); verify(haadmin).transitionToActive( any(HAServiceProtocol.StateChangeRequestInfo.class)); // HAAdmin#isOtherTargetNodeActive should check state of non-target node. verify(haadmin, times(1)).getServiceStatus(); } @Test(timeout = 500) public void testTransitionToStandby() throws Exception { String[] args = {"-transitionToStandby", "rm1"}; // RM HA is disabled. // transitionToStandby should not be executed assertEquals(-1, rmAdminCLI.run(args)); verify(haadmin, never()).transitionToStandby( any(HAServiceProtocol.StateChangeRequestInfo.class)); // Now RM HA is enabled. // transitionToActive should be executed assertEquals(0, rmAdminCLIWithHAEnabled.run(args)); verify(haadmin).transitionToStandby( any(HAServiceProtocol.StateChangeRequestInfo.class)); } @Test(timeout = 500) public void testGetServiceState() throws Exception { String[] args = {"-getServiceState", "rm1"}; // RM HA is disabled. // getServiceState should not be executed assertEquals(-1, rmAdminCLI.run(args)); verify(haadmin, never()).getServiceStatus(); // Now RM HA is enabled. // getServiceState should be executed assertEquals(0, rmAdminCLIWithHAEnabled.run(args)); verify(haadmin).getServiceStatus(); } @Test(timeout = 500) public void testCheckHealth() throws Exception { String[] args = {"-checkHealth", "rm1"}; // RM HA is disabled. // getServiceState should not be executed assertEquals(-1, rmAdminCLI.run(args)); verify(haadmin, never()).monitorHealth(); // Now RM HA is enabled. // getServiceState should be executed assertEquals(0, rmAdminCLIWithHAEnabled.run(args)); verify(haadmin).monitorHealth(); } /** * Test printing of help messages */ @Test(timeout=500) public void testHelp() throws Exception { PrintStream oldOutPrintStream = System.out; PrintStream oldErrPrintStream = System.err; ByteArrayOutputStream dataOut = new ByteArrayOutputStream(); ByteArrayOutputStream dataErr = new ByteArrayOutputStream(); System.setOut(new PrintStream(dataOut)); System.setErr(new PrintStream(dataErr)); try { String[] args = { "-help" }; assertEquals(0, rmAdminCLI.run(args)); oldOutPrintStream.println(dataOut); assertTrue(dataOut .toString() .contains( "rmadmin is the command to execute YARN administrative commands.")); assertTrue(dataOut .toString() .contains( "yarn rmadmin [-refreshQueues] [-refreshNodes [-g [timeout in seconds]]] [-refreshSuper" + "UserGroupsConfiguration] [-refreshUserToGroupsMappings] " + "[-refreshAdminAcls] [-refreshServiceAcl] [-getGroup" + " [username]] [-addToClusterNodeLabels <\"label1(exclusive=true),label2(exclusive=false),label3\">]" + " [-removeFromClusterNodeLabels <label1,label2,label3>] [-replaceLabelsOnNode " + "<\"node1[:port]=label1,label2 node2[:port]=label1\">] [-directlyAccessNodeLabelStore]] " + "[-help [cmd]]")); assertTrue(dataOut .toString() .contains( "-refreshQueues: Reload the queues' acls, states and scheduler " + "specific properties.")); assertTrue(dataOut .toString() .contains( "-refreshNodes [-g [timeout in seconds]]: Refresh the hosts information at the " + "ResourceManager.")); assertTrue(dataOut.toString().contains( "-refreshUserToGroupsMappings: Refresh user-to-groups mappings")); assertTrue(dataOut .toString() .contains( "-refreshSuperUserGroupsConfiguration: Refresh superuser proxy" + " groups mappings")); assertTrue(dataOut .toString() .contains( "-refreshAdminAcls: Refresh acls for administration of " + "ResourceManager")); assertTrue(dataOut .toString() .contains( "-refreshServiceAcl: Reload the service-level authorization" + " policy file")); assertTrue(dataOut .toString() .contains( "-help [cmd]: Displays help for the given command or all " + "commands if none")); testError(new String[] { "-help", "-refreshQueues" }, "Usage: yarn rmadmin [-refreshQueues]", dataErr, 0); testError(new String[] { "-help", "-refreshNodes" }, "Usage: yarn rmadmin [-refreshNodes [-g [timeout in seconds]]]", dataErr, 0); testError(new String[] { "-help", "-refreshUserToGroupsMappings" }, "Usage: yarn rmadmin [-refreshUserToGroupsMappings]", dataErr, 0); testError( new String[] { "-help", "-refreshSuperUserGroupsConfiguration" }, "Usage: yarn rmadmin [-refreshSuperUserGroupsConfiguration]", dataErr, 0); testError(new String[] { "-help", "-refreshAdminAcls" }, "Usage: yarn rmadmin [-refreshAdminAcls]", dataErr, 0); testError(new String[] { "-help", "-refreshServiceAcl" }, "Usage: yarn rmadmin [-refreshServiceAcl]", dataErr, 0); testError(new String[] { "-help", "-getGroups" }, "Usage: yarn rmadmin [-getGroups [username]]", dataErr, 0); testError(new String[] { "-help", "-transitionToActive" }, "Usage: yarn rmadmin [-transitionToActive [--forceactive]" + " <serviceId>]", dataErr, 0); testError(new String[] { "-help", "-transitionToStandby" }, "Usage: yarn rmadmin [-transitionToStandby <serviceId>]", dataErr, 0); testError(new String[] { "-help", "-getServiceState" }, "Usage: yarn rmadmin [-getServiceState <serviceId>]", dataErr, 0); testError(new String[] { "-help", "-checkHealth" }, "Usage: yarn rmadmin [-checkHealth <serviceId>]", dataErr, 0); testError(new String[] { "-help", "-failover" }, "Usage: yarn rmadmin " + "[-failover [--forcefence] [--forceactive] " + "<serviceId> <serviceId>]", dataErr, 0); testError(new String[] { "-help", "-badParameter" }, "Usage: yarn rmadmin", dataErr, 0); testError(new String[] { "-badParameter" }, "badParameter: Unknown command", dataErr, -1); // Test -help when RM HA is enabled assertEquals(0, rmAdminCLIWithHAEnabled.run(args)); oldOutPrintStream.println(dataOut); String expectedHelpMsg = "yarn rmadmin [-refreshQueues] [-refreshNodes [-g [timeout in seconds]]] [-refreshSuper" + "UserGroupsConfiguration] [-refreshUserToGroupsMappings] " + "[-refreshAdminAcls] [-refreshServiceAcl] [-getGroup" + " [username]] [-addToClusterNodeLabels <\"label1(exclusive=true)," + "label2(exclusive=false),label3\">]" + " [-removeFromClusterNodeLabels <label1,label2,label3>] [-replaceLabelsOnNode " + "<\"node1[:port]=label1,label2 node2[:port]=label1\">] [-directlyAccessNodeLabelStore]] " + "[-transitionToActive [--forceactive] <serviceId>] " + "[-transitionToStandby <serviceId>] " + "[-getServiceState <serviceId>] [-checkHealth <serviceId>] [-help [cmd]]"; String actualHelpMsg = dataOut.toString(); assertTrue(String.format("Help messages: %n " + actualHelpMsg + " %n doesn't include expected " + "messages: %n" + expectedHelpMsg), actualHelpMsg.contains(expectedHelpMsg )); } finally { System.setOut(oldOutPrintStream); System.setErr(oldErrPrintStream); } } @Test(timeout=500) public void testException() throws Exception { PrintStream oldErrPrintStream = System.err; ByteArrayOutputStream dataErr = new ByteArrayOutputStream(); System.setErr(new PrintStream(dataErr)); try { when(admin.refreshQueues(any(RefreshQueuesRequest.class))) .thenThrow(new IOException("test exception")); String[] args = { "-refreshQueues" }; assertEquals(-1, rmAdminCLI.run(args)); verify(admin).refreshQueues(any(RefreshQueuesRequest.class)); assertTrue(dataErr.toString().contains("refreshQueues: test exception")); } finally { System.setErr(oldErrPrintStream); } } @Test public void testAccessLocalNodeLabelManager() throws Exception { assertFalse(dummyNodeLabelsManager.getServiceState() == STATE.STOPPED); String[] args = { "-addToClusterNodeLabels", "x,y", "-directlyAccessNodeLabelStore" }; assertEquals(0, rmAdminCLI.run(args)); assertTrue(dummyNodeLabelsManager.getClusterNodeLabelNames().containsAll( ImmutableSet.of("x", "y"))); // reset localNodeLabelsManager dummyNodeLabelsManager.removeFromClusterNodeLabels(ImmutableSet.of("x", "y")); // change the sequence of "-directlyAccessNodeLabelStore" and labels, // should not matter args = new String[] { "-addToClusterNodeLabels", "-directlyAccessNodeLabelStore", "x,y" }; assertEquals(0, rmAdminCLI.run(args)); assertTrue(dummyNodeLabelsManager.getClusterNodeLabelNames().containsAll( ImmutableSet.of("x", "y"))); // local node labels manager will be close after running assertTrue(dummyNodeLabelsManager.getServiceState() == STATE.STOPPED); } @Test public void testAccessRemoteNodeLabelManager() throws Exception { String[] args = { "-addToClusterNodeLabels", "x,y" }; assertEquals(0, rmAdminCLI.run(args)); // localNodeLabelsManager shouldn't accessed assertTrue(dummyNodeLabelsManager.getClusterNodeLabelNames().isEmpty()); // remote node labels manager accessed assertTrue(remoteAdminServiceAccessed); } @Test public void testAddToClusterNodeLabels() throws Exception { // successfully add labels String[] args = { "-addToClusterNodeLabels", "x", "-directlyAccessNodeLabelStore" }; assertEquals(0, rmAdminCLI.run(args)); assertTrue(dummyNodeLabelsManager.getClusterNodeLabelNames().containsAll( ImmutableSet.of("x"))); // no labels, should fail args = new String[] { "-addToClusterNodeLabels" }; assertTrue(0 != rmAdminCLI.run(args)); // no labels, should fail args = new String[] { "-addToClusterNodeLabels", "-directlyAccessNodeLabelStore" }; assertTrue(0 != rmAdminCLI.run(args)); // no labels, should fail at client validation args = new String[] { "-addToClusterNodeLabels", " " }; assertTrue(0 != rmAdminCLI.run(args)); // no labels, should fail at client validation args = new String[] { "-addToClusterNodeLabels", " , " }; assertTrue(0 != rmAdminCLI.run(args)); // successfully add labels args = new String[] { "-addToClusterNodeLabels", ",x,,", "-directlyAccessNodeLabelStore" }; assertEquals(0, rmAdminCLI.run(args)); assertTrue(dummyNodeLabelsManager.getClusterNodeLabelNames().containsAll( ImmutableSet.of("x"))); } @Test public void testAddToClusterNodeLabelsWithExclusivitySetting() throws Exception { // Parenthese not match String[] args = new String[] { "-addToClusterNodeLabels", "x(" }; assertTrue(0 != rmAdminCLI.run(args)); args = new String[] { "-addToClusterNodeLabels", "x)" }; assertTrue(0 != rmAdminCLI.run(args)); // Not expected key=value specifying inner parentese args = new String[] { "-addToClusterNodeLabels", "x(key=value)" }; assertTrue(0 != rmAdminCLI.run(args)); // Not key is expected, but value not args = new String[] { "-addToClusterNodeLabels", "x(exclusive=)" }; assertTrue(0 != rmAdminCLI.run(args)); // key=value both set args = new String[] { "-addToClusterNodeLabels", "w,x(exclusive=true), y(exclusive=false),z()", "-directlyAccessNodeLabelStore" }; assertTrue(0 == rmAdminCLI.run(args)); assertTrue(dummyNodeLabelsManager.isExclusiveNodeLabel("w")); assertTrue(dummyNodeLabelsManager.isExclusiveNodeLabel("x")); assertFalse(dummyNodeLabelsManager.isExclusiveNodeLabel("y")); assertTrue(dummyNodeLabelsManager.isExclusiveNodeLabel("z")); // key=value both set, and some spaces need to be handled args = new String[] { "-addToClusterNodeLabels", "a (exclusive= true) , b( exclusive =false),c ", "-directlyAccessNodeLabelStore" }; assertTrue(0 == rmAdminCLI.run(args)); assertTrue(dummyNodeLabelsManager.isExclusiveNodeLabel("a")); assertFalse(dummyNodeLabelsManager.isExclusiveNodeLabel("b")); assertTrue(dummyNodeLabelsManager.isExclusiveNodeLabel("c")); } @Test public void testRemoveFromClusterNodeLabels() throws Exception { // Successfully remove labels dummyNodeLabelsManager.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("x", "y")); String[] args = { "-removeFromClusterNodeLabels", "x,,y", "-directlyAccessNodeLabelStore" }; assertEquals(0, rmAdminCLI.run(args)); assertTrue(dummyNodeLabelsManager.getClusterNodeLabelNames().isEmpty()); // no labels, should fail args = new String[] { "-removeFromClusterNodeLabels" }; assertTrue(0 != rmAdminCLI.run(args)); // no labels, should fail args = new String[] { "-removeFromClusterNodeLabels", "-directlyAccessNodeLabelStore" }; assertTrue(0 != rmAdminCLI.run(args)); // no labels, should fail at client validation args = new String[] { "-removeFromClusterNodeLabels", " " }; assertTrue(0 != rmAdminCLI.run(args)); // no labels, should fail at client validation args = new String[] { "-removeFromClusterNodeLabels", ", " }; assertTrue(0 != rmAdminCLI.run(args)); } @Test public void testReplaceLabelsOnNode() throws Exception { // Successfully replace labels dummyNodeLabelsManager .addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("x", "y", "Y")); String[] args = { "-replaceLabelsOnNode", "node1:8000,x node2:8000=y node3,x node4=Y", "-directlyAccessNodeLabelStore" }; assertEquals(0, rmAdminCLI.run(args)); assertTrue(dummyNodeLabelsManager.getNodeLabels().containsKey( NodeId.newInstance("node1", 8000))); assertTrue(dummyNodeLabelsManager.getNodeLabels().containsKey( NodeId.newInstance("node2", 8000))); assertTrue(dummyNodeLabelsManager.getNodeLabels().containsKey( NodeId.newInstance("node3", 0))); assertTrue(dummyNodeLabelsManager.getNodeLabels().containsKey( NodeId.newInstance("node4", 0))); // no labels, should fail args = new String[] { "-replaceLabelsOnNode" }; assertTrue(0 != rmAdminCLI.run(args)); // no labels, should fail args = new String[] { "-replaceLabelsOnNode", "-directlyAccessNodeLabelStore" }; assertTrue(0 != rmAdminCLI.run(args)); // no labels, should fail args = new String[] { "-replaceLabelsOnNode", " " }; assertTrue(0 != rmAdminCLI.run(args)); args = new String[] { "-replaceLabelsOnNode", ", " }; assertTrue(0 != rmAdminCLI.run(args)); } @Test public void testReplaceMultipleLabelsOnSingleNode() throws Exception { // Successfully replace labels dummyNodeLabelsManager.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("x", "y")); String[] args = { "-replaceLabelsOnNode", "node1,x,y", "-directlyAccessNodeLabelStore" }; assertTrue(0 != rmAdminCLI.run(args)); } private void testError(String[] args, String template, ByteArrayOutputStream data, int resultCode) throws Exception { int actualResultCode = rmAdminCLI.run(args); assertEquals("Expected result code: " + resultCode + ", actual result code is: " + actualResultCode, resultCode, actualResultCode); assertTrue(String.format("Expected error message: %n" + template + " is not included in messages: %n" + data.toString()), data.toString().contains(template)); data.reset(); } @Test public void testRMHAErrorUsage() throws Exception { ByteArrayOutputStream errOutBytes = new ByteArrayOutputStream(); rmAdminCLIWithHAEnabled.setErrOut(new PrintStream(errOutBytes)); try { String[] args = { "-failover" }; assertEquals(-1, rmAdminCLIWithHAEnabled.run(args)); String errOut = new String(errOutBytes.toByteArray(), Charsets.UTF_8); errOutBytes.reset(); assertTrue(errOut.contains("Usage: rmadmin")); } finally { rmAdminCLIWithHAEnabled.setErrOut(System.err); } } }
27,187
39.338279
116
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.client.cli; import static org.junit.Assert.assertTrue; import static org.mockito.Matchers.any; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import java.io.ByteArrayOutputStream; import java.io.DataOutputStream; import java.io.File; import java.io.FileWriter; import java.io.IOException; import java.io.PrintStream; import java.io.PrintWriter; import java.io.Writer; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import org.junit.Assert; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.LocalFileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.yarn.api.records.ApplicationAccessType; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationReport; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationAttemptIdPBImpl; import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl; import org.apache.hadoop.yarn.api.records.impl.pb.ContainerIdPBImpl; import org.apache.hadoop.yarn.client.api.YarnClient; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat; import org.apache.hadoop.yarn.logaggregation.LogAggregationUtils; import org.apache.hadoop.yarn.logaggregation.LogCLIHelpers; import org.junit.Before; import org.junit.Test; public class TestLogsCLI { ByteArrayOutputStream sysOutStream; private PrintStream sysOut; ByteArrayOutputStream sysErrStream; private PrintStream sysErr; @Before public void setUp() { sysOutStream = new ByteArrayOutputStream(); sysOut = new PrintStream(sysOutStream); System.setOut(sysOut); sysErrStream = new ByteArrayOutputStream(); sysErr = new PrintStream(sysErrStream); System.setErr(sysErr); } @Test(timeout = 5000l) public void testFailResultCodes() throws Exception { Configuration conf = new YarnConfiguration(); conf.setClass("fs.file.impl", LocalFileSystem.class, FileSystem.class); LogCLIHelpers cliHelper = new LogCLIHelpers(); cliHelper.setConf(conf); YarnClient mockYarnClient = createMockYarnClient(YarnApplicationState.FINISHED); LogsCLI dumper = new LogsCLIForTest(mockYarnClient); dumper.setConf(conf); // verify dumping a non-existent application's logs returns a failure code int exitCode = dumper.run( new String[] { "-applicationId", "application_0_0" } ); assertTrue("Should return an error code", exitCode != 0); // verify dumping a non-existent container log is a failure code exitCode = cliHelper.dumpAContainersLogs("application_0_0", "container_0_0", "nonexistentnode:1234", "nobody"); assertTrue("Should return an error code", exitCode != 0); } @Test(timeout = 5000l) public void testInvalidApplicationId() throws Exception { Configuration conf = new YarnConfiguration(); YarnClient mockYarnClient = createMockYarnClient(YarnApplicationState.FINISHED); LogsCLI cli = new LogsCLIForTest(mockYarnClient); cli.setConf(conf); int exitCode = cli.run( new String[] { "-applicationId", "not_an_app_id"}); assertTrue(exitCode == -1); assertTrue(sysErrStream.toString().startsWith("Invalid ApplicationId specified")); } @Test(timeout = 5000l) public void testUnknownApplicationId() throws Exception { Configuration conf = new YarnConfiguration(); YarnClient mockYarnClient = createMockYarnClientUnknownApp(); LogsCLI cli = new LogsCLIForTest(mockYarnClient); cli.setConf(conf); int exitCode = cli.run(new String[] { "-applicationId", ApplicationId.newInstance(1, 1).toString() }); // Error since no logs present for the app. assertTrue(exitCode != 0); assertTrue(sysErrStream.toString().startsWith( "Unable to get ApplicationState")); } @Test(timeout = 5000l) public void testHelpMessage() throws Exception { Configuration conf = new YarnConfiguration(); YarnClient mockYarnClient = createMockYarnClient(YarnApplicationState.FINISHED); LogsCLI dumper = new LogsCLIForTest(mockYarnClient); dumper.setConf(conf); int exitCode = dumper.run(new String[]{}); assertTrue(exitCode == -1); ByteArrayOutputStream baos = new ByteArrayOutputStream(); PrintWriter pw = new PrintWriter(baos); pw.println("Retrieve logs for completed YARN applications."); pw.println("usage: yarn logs -applicationId <application ID> [OPTIONS]"); pw.println(); pw.println("general options are:"); pw.println(" -am <AM Containers> Prints the AM Container logs for this"); pw.println(" application. Specify comma-separated"); pw.println(" value to get logs for related AM"); pw.println(" Container. For example, If we specify -am"); pw.println(" 1,2, we will get the logs for the first"); pw.println(" AM Container as well as the second AM"); pw.println(" Container. To get logs for all AM"); pw.println(" Containers, use -am ALL. To get logs for"); pw.println(" the latest AM Container, use -am -1. By"); pw.println(" default, it will only print out syslog."); pw.println(" Work with -logFiles to get other logs"); pw.println(" -appOwner <Application Owner> AppOwner (assumed to be current user if"); pw.println(" not specified)"); pw.println(" -containerId <Container ID> ContainerId. By default, it will only"); pw.println(" print syslog if the application is"); pw.println(" runing. Work with -logFiles to get other"); pw.println(" logs."); pw.println(" -help Displays help for all commands."); pw.println(" -logFiles <Log File Name> Work with -am/-containerId and specify"); pw.println(" comma-separated value to get specified"); pw.println(" Container log files"); pw.println(" -nodeAddress <Node Address> NodeAddress in the format nodename:port"); pw.close(); String appReportStr = baos.toString("UTF-8"); Assert.assertEquals(appReportStr, sysOutStream.toString()); } @Test (timeout = 15000) public void testFetchApplictionLogs() throws Exception { String remoteLogRootDir = "target/logs/"; Configuration configuration = new Configuration(); configuration.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, true); configuration .set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, remoteLogRootDir); configuration.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true); configuration.set(YarnConfiguration.YARN_ADMIN_ACL, "admin"); FileSystem fs = FileSystem.get(configuration); UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); ApplicationId appId = ApplicationIdPBImpl.newInstance(0, 1); ApplicationAttemptId appAttemptId = ApplicationAttemptIdPBImpl.newInstance(appId, 1); ContainerId containerId0 = ContainerIdPBImpl.newContainerId(appAttemptId, 0); ContainerId containerId1 = ContainerIdPBImpl.newContainerId(appAttemptId, 1); ContainerId containerId2 = ContainerIdPBImpl.newContainerId(appAttemptId, 2); ContainerId containerId3 = ContainerIdPBImpl.newContainerId(appAttemptId, 3); NodeId nodeId = NodeId.newInstance("localhost", 1234); // create local logs String rootLogDir = "target/LocalLogs"; Path rootLogDirPath = new Path(rootLogDir); if (fs.exists(rootLogDirPath)) { fs.delete(rootLogDirPath, true); } assertTrue(fs.mkdirs(rootLogDirPath)); Path appLogsDir = new Path(rootLogDirPath, appId.toString()); if (fs.exists(appLogsDir)) { fs.delete(appLogsDir, true); } assertTrue(fs.mkdirs(appLogsDir)); List<String> rootLogDirs = Arrays.asList(rootLogDir); List<String> logTypes = new ArrayList<String>(); logTypes.add("syslog"); // create container logs in localLogDir createContainerLogInLocalDir(appLogsDir, containerId1, fs, logTypes); createContainerLogInLocalDir(appLogsDir, containerId2, fs, logTypes); // create two logs for container3 in localLogDir logTypes.add("stdout"); createContainerLogInLocalDir(appLogsDir, containerId3, fs, logTypes); Path path = new Path(remoteLogRootDir + ugi.getShortUserName() + "/logs/application_0_0001"); if (fs.exists(path)) { fs.delete(path, true); } assertTrue(fs.mkdirs(path)); // upload container logs into remote directory // the first two logs is empty. When we try to read first two logs, // we will meet EOF exception, but it will not impact other logs. // Other logs should be read successfully. uploadEmptyContainerLogIntoRemoteDir(ugi, configuration, rootLogDirs, nodeId, containerId0, path, fs); uploadEmptyContainerLogIntoRemoteDir(ugi, configuration, rootLogDirs, nodeId, containerId1, path, fs); uploadContainerLogIntoRemoteDir(ugi, configuration, rootLogDirs, nodeId, containerId1, path, fs); uploadContainerLogIntoRemoteDir(ugi, configuration, rootLogDirs, nodeId, containerId2, path, fs); uploadContainerLogIntoRemoteDir(ugi, configuration, rootLogDirs, nodeId, containerId3, path, fs); YarnClient mockYarnClient = createMockYarnClient(YarnApplicationState.FINISHED); LogsCLI cli = new LogsCLIForTest(mockYarnClient); cli.setConf(configuration); int exitCode = cli.run(new String[] { "-applicationId", appId.toString() }); assertTrue(exitCode == 0); assertTrue(sysOutStream.toString().contains( "Hello container_0_0001_01_000001 in syslog!")); assertTrue(sysOutStream.toString().contains( "Hello container_0_0001_01_000002 in syslog!")); assertTrue(sysOutStream.toString().contains( "Hello container_0_0001_01_000003 in syslog!")); assertTrue(sysOutStream.toString().contains( "Hello container_0_0001_01_000003 in stdout!")); sysOutStream.reset(); // uploaded two logs for container1. The first log is empty. // The second one is not empty. // We can still successfully read logs for container1. exitCode = cli.run(new String[] { "-applicationId", appId.toString(), "-nodeAddress", nodeId.toString(), "-containerId", containerId1.toString() }); assertTrue(exitCode == 0); assertTrue(sysOutStream.toString().contains( "Hello container_0_0001_01_000001 in syslog!")); assertTrue(sysOutStream.toString().contains("Log Upload Time")); assertTrue(!sysOutStream.toString().contains( "Logs for container " + containerId1.toString() + " are not present in this log-file.")); sysOutStream.reset(); // Uploaded the empty log for container0. // We should see the message showing the log for container0 // are not present. exitCode = cli.run(new String[] { "-applicationId", appId.toString(), "-nodeAddress", nodeId.toString(), "-containerId", containerId0.toString() }); assertTrue(exitCode == -1); assertTrue(sysOutStream.toString().contains( "Logs for container " + containerId0.toString() + " are not present in this log-file.")); sysOutStream.reset(); // uploaded two logs for container3. The first log is named as syslog. // The second one is named as stdout. exitCode = cli.run(new String[] { "-applicationId", appId.toString(), "-nodeAddress", nodeId.toString(), "-containerId", containerId3.toString() }); assertTrue(exitCode == 0); assertTrue(sysOutStream.toString().contains( "Hello container_0_0001_01_000003 in syslog!")); assertTrue(sysOutStream.toString().contains( "Hello container_0_0001_01_000003 in stdout!")); sysOutStream.reset(); // set -logFiles option as stdout // should only print log with the name as stdout exitCode = cli.run(new String[] { "-applicationId", appId.toString(), "-nodeAddress", nodeId.toString(), "-containerId", containerId3.toString() , "-logFiles", "stdout"}); assertTrue(exitCode == 0); assertTrue(sysOutStream.toString().contains( "Hello container_0_0001_01_000003 in stdout!")); assertTrue(!sysOutStream.toString().contains( "Hello container_0_0001_01_000003 in syslog!")); sysOutStream.reset(); fs.delete(new Path(remoteLogRootDir), true); fs.delete(new Path(rootLogDir), true); } private static void createContainerLogInLocalDir(Path appLogsDir, ContainerId containerId, FileSystem fs, List<String> logTypes) throws Exception { Path containerLogsDir = new Path(appLogsDir, containerId.toString()); if (fs.exists(containerLogsDir)) { fs.delete(containerLogsDir, true); } assertTrue(fs.mkdirs(containerLogsDir)); for (String logType : logTypes) { Writer writer = new FileWriter(new File(containerLogsDir.toString(), logType)); writer.write("Hello " + containerId + " in " + logType + "!"); writer.close(); } } private static void uploadContainerLogIntoRemoteDir(UserGroupInformation ugi, Configuration configuration, List<String> rootLogDirs, NodeId nodeId, ContainerId containerId, Path appDir, FileSystem fs) throws Exception { Path path = new Path(appDir, LogAggregationUtils.getNodeString(nodeId) + System.currentTimeMillis()); AggregatedLogFormat.LogWriter writer = new AggregatedLogFormat.LogWriter(configuration, path, ugi); writer.writeApplicationOwner(ugi.getUserName()); Map<ApplicationAccessType, String> appAcls = new HashMap<ApplicationAccessType, String>(); appAcls.put(ApplicationAccessType.VIEW_APP, ugi.getUserName()); writer.writeApplicationACLs(appAcls); writer.append(new AggregatedLogFormat.LogKey(containerId), new AggregatedLogFormat.LogValue(rootLogDirs, containerId, UserGroupInformation.getCurrentUser().getShortUserName())); writer.close(); } private static void uploadEmptyContainerLogIntoRemoteDir(UserGroupInformation ugi, Configuration configuration, List<String> rootLogDirs, NodeId nodeId, ContainerId containerId, Path appDir, FileSystem fs) throws Exception { Path path = new Path(appDir, LogAggregationUtils.getNodeString(nodeId) + System.currentTimeMillis()); AggregatedLogFormat.LogWriter writer = new AggregatedLogFormat.LogWriter(configuration, path, ugi); writer.writeApplicationOwner(ugi.getUserName()); Map<ApplicationAccessType, String> appAcls = new HashMap<ApplicationAccessType, String>(); appAcls.put(ApplicationAccessType.VIEW_APP, ugi.getUserName()); writer.writeApplicationACLs(appAcls); DataOutputStream out = writer.getWriter().prepareAppendKey(-1); new AggregatedLogFormat.LogKey(containerId).write(out); out.close(); out = writer.getWriter().prepareAppendValue(-1); new AggregatedLogFormat.LogValue(rootLogDirs, containerId, UserGroupInformation.getCurrentUser().getShortUserName()).write(out, new HashSet<File>()); out.close(); writer.close(); } private YarnClient createMockYarnClient(YarnApplicationState appState) throws YarnException, IOException { YarnClient mockClient = mock(YarnClient.class); ApplicationReport mockAppReport = mock(ApplicationReport.class); doReturn(appState).when(mockAppReport).getYarnApplicationState(); doReturn(mockAppReport).when(mockClient).getApplicationReport( any(ApplicationId.class)); return mockClient; } private YarnClient createMockYarnClientUnknownApp() throws YarnException, IOException { YarnClient mockClient = mock(YarnClient.class); doThrow(new YarnException("Unknown AppId")).when(mockClient) .getApplicationReport(any(ApplicationId.class)); return mockClient; } private static class LogsCLIForTest extends LogsCLI { private YarnClient yarnClient; public LogsCLIForTest(YarnClient yarnClient) { super(); this.yarnClient = yarnClient; } protected YarnClient createYarnClient() { return yarnClient; } } }
17,982
42.542373
93
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.client.cli; import static org.junit.Assert.assertEquals; import static org.mockito.Matchers.any; import static org.mockito.Matchers.anyInt; import static org.mockito.Matchers.isA; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.PrintStream; import java.io.PrintWriter; import java.text.DateFormat; import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.Date; import java.util.EnumSet; import java.util.HashSet; import java.util.List; import java.util.Set; import org.apache.commons.cli.Options; import org.apache.commons.lang.time.DateFormatUtils; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationReport; import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerReport; import org.apache.hadoop.yarn.api.records.ContainerState; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.api.records.LogAggregationStatus; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.NodeReport; import org.apache.hadoop.yarn.api.records.NodeState; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.QueueInfo; import org.apache.hadoop.yarn.api.records.QueueState; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState; import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.client.api.YarnClient; import org.apache.hadoop.yarn.exceptions.ApplicationAttemptNotFoundException; import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException; import org.apache.hadoop.yarn.exceptions.ContainerNotFoundException; import org.apache.hadoop.yarn.util.Records; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.mortbay.log.Log; import com.google.common.collect.ImmutableSet; public class TestYarnCLI { private YarnClient client = mock(YarnClient.class); ByteArrayOutputStream sysOutStream; private PrintStream sysOut; ByteArrayOutputStream sysErrStream; private PrintStream sysErr; @Before public void setup() { sysOutStream = new ByteArrayOutputStream(); sysOut = spy(new PrintStream(sysOutStream)); sysErrStream = new ByteArrayOutputStream(); sysErr = spy(new PrintStream(sysErrStream)); System.setOut(sysOut); } @Test public void testGetApplicationReport() throws Exception { for (int i = 0; i < 2; ++i) { ApplicationCLI cli = createAndGetAppCLI(); ApplicationId applicationId = ApplicationId.newInstance(1234, 5); ApplicationResourceUsageReport usageReport = i == 0 ? null : ApplicationResourceUsageReport.newInstance( 2, 0, null, null, null, 123456, 4567); ApplicationReport newApplicationReport = ApplicationReport.newInstance( applicationId, ApplicationAttemptId.newInstance(applicationId, 1), "user", "queue", "appname", "host", 124, null, YarnApplicationState.FINISHED, "diagnostics", "url", 0, 0, FinalApplicationStatus.SUCCEEDED, usageReport, "N/A", 0.53789f, "YARN", null, null, false); newApplicationReport.setLogAggregationStatus(LogAggregationStatus.SUCCEEDED); newApplicationReport.setPriority(Priority.newInstance(0)); when(client.getApplicationReport(any(ApplicationId.class))).thenReturn( newApplicationReport); int result = cli.run(new String[] { "application", "-status", applicationId.toString() }); assertEquals(0, result); verify(client, times(1 + i)).getApplicationReport(applicationId); ByteArrayOutputStream baos = new ByteArrayOutputStream(); PrintWriter pw = new PrintWriter(baos); pw.println("Application Report : "); pw.println("\tApplication-Id : application_1234_0005"); pw.println("\tApplication-Name : appname"); pw.println("\tApplication-Type : YARN"); pw.println("\tUser : user"); pw.println("\tQueue : queue"); pw.println("\tApplication Priority : 0"); pw.println("\tStart-Time : 0"); pw.println("\tFinish-Time : 0"); pw.println("\tProgress : 53.79%"); pw.println("\tState : FINISHED"); pw.println("\tFinal-State : SUCCEEDED"); pw.println("\tTracking-URL : N/A"); pw.println("\tRPC Port : 124"); pw.println("\tAM Host : host"); pw.println("\tAggregate Resource Allocation : " + (i == 0 ? "N/A" : "123456 MB-seconds, 4567 vcore-seconds")); pw.println("\tLog Aggregation Status : SUCCEEDED"); pw.println("\tDiagnostics : diagnostics"); pw.println("\tUnmanaged Application : false"); pw.close(); String appReportStr = baos.toString("UTF-8"); Assert.assertEquals(appReportStr, sysOutStream.toString()); sysOutStream.reset(); verify(sysOut, times(1 + i)).println(isA(String.class)); } } @Test public void testGetApplicationAttemptReport() throws Exception { ApplicationCLI cli = createAndGetAppCLI(); ApplicationId applicationId = ApplicationId.newInstance(1234, 5); ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance( applicationId, 1); ApplicationAttemptReport attemptReport = ApplicationAttemptReport.newInstance(attemptId, "host", 124, "url", "oUrl", "diagnostics", YarnApplicationAttemptState.FINISHED, ContainerId.newContainerId(attemptId, 1), 1000l, 2000l); when( client .getApplicationAttemptReport(any(ApplicationAttemptId.class))) .thenReturn(attemptReport); int result = cli.run(new String[] { "applicationattempt", "-status", attemptId.toString() }); assertEquals(0, result); verify(client).getApplicationAttemptReport(attemptId); ByteArrayOutputStream baos = new ByteArrayOutputStream(); PrintWriter pw = new PrintWriter(baos); pw.println("Application Attempt Report : "); pw.println("\tApplicationAttempt-Id : appattempt_1234_0005_000001"); pw.println("\tState : FINISHED"); pw.println("\tAMContainer : container_1234_0005_01_000001"); pw.println("\tTracking-URL : url"); pw.println("\tRPC Port : 124"); pw.println("\tAM Host : host"); pw.println("\tDiagnostics : diagnostics"); pw.close(); String appReportStr = baos.toString("UTF-8"); Assert.assertEquals(appReportStr, sysOutStream.toString()); verify(sysOut, times(1)).println(isA(String.class)); } @Test public void testGetApplicationAttempts() throws Exception { ApplicationCLI cli = createAndGetAppCLI(); ApplicationId applicationId = ApplicationId.newInstance(1234, 5); ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance( applicationId, 1); ApplicationAttemptId attemptId1 = ApplicationAttemptId.newInstance( applicationId, 2); ApplicationAttemptReport attemptReport = ApplicationAttemptReport .newInstance(attemptId, "host", 124, "url", "oUrl", "diagnostics", YarnApplicationAttemptState.FINISHED, ContainerId.newContainerId( attemptId, 1)); ApplicationAttemptReport attemptReport1 = ApplicationAttemptReport .newInstance(attemptId1, "host", 124, "url", "oUrl", "diagnostics", YarnApplicationAttemptState.FINISHED, ContainerId.newContainerId( attemptId1, 1)); List<ApplicationAttemptReport> reports = new ArrayList<ApplicationAttemptReport>(); reports.add(attemptReport); reports.add(attemptReport1); when(client.getApplicationAttempts(any(ApplicationId.class))) .thenReturn(reports); int result = cli.run(new String[] { "applicationattempt", "-list", applicationId.toString() }); assertEquals(0, result); verify(client).getApplicationAttempts(applicationId); ByteArrayOutputStream baos = new ByteArrayOutputStream(); PrintWriter pw = new PrintWriter(baos); pw.println("Total number of application attempts :2"); pw.print(" ApplicationAttempt-Id"); pw.print("\t State"); pw.print("\t AM-Container-Id"); pw.println("\t Tracking-URL"); pw.print(" appattempt_1234_0005_000001"); pw.print("\t FINISHED"); pw.print("\t container_1234_0005_01_000001"); pw.println("\t url"); pw.print(" appattempt_1234_0005_000002"); pw.print("\t FINISHED"); pw.print("\t container_1234_0005_02_000001"); pw.println("\t url"); pw.close(); String appReportStr = baos.toString("UTF-8"); Assert.assertEquals(appReportStr, sysOutStream.toString()); } @Test public void testGetContainerReport() throws Exception { ApplicationCLI cli = createAndGetAppCLI(); ApplicationId applicationId = ApplicationId.newInstance(1234, 5); ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance( applicationId, 1); ContainerId containerId = ContainerId.newContainerId(attemptId, 1); ContainerReport container = ContainerReport.newInstance(containerId, null, NodeId.newInstance("host", 1234), Priority.UNDEFINED, 1234, 5678, "diagnosticInfo", "logURL", 0, ContainerState.COMPLETE, "http://" + NodeId.newInstance("host", 2345).toString()); when(client.getContainerReport(any(ContainerId.class))).thenReturn( container); int result = cli.run(new String[] { "container", "-status", containerId.toString() }); assertEquals(0, result); verify(client).getContainerReport(containerId); ByteArrayOutputStream baos = new ByteArrayOutputStream(); PrintWriter pw = new PrintWriter(baos); pw.println("Container Report : "); pw.println("\tContainer-Id : container_1234_0005_01_000001"); pw.println("\tStart-Time : 1234"); pw.println("\tFinish-Time : 5678"); pw.println("\tState : COMPLETE"); pw.println("\tLOG-URL : logURL"); pw.println("\tHost : host:1234"); pw.println("\tNodeHttpAddress : http://host:2345"); pw.println("\tDiagnostics : diagnosticInfo"); pw.close(); String appReportStr = baos.toString("UTF-8"); Assert.assertEquals(appReportStr, sysOutStream.toString()); verify(sysOut, times(1)).println(isA(String.class)); } @Test public void testGetContainers() throws Exception { ApplicationCLI cli = createAndGetAppCLI(); ApplicationId applicationId = ApplicationId.newInstance(1234, 5); ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance( applicationId, 1); ContainerId containerId = ContainerId.newContainerId(attemptId, 1); ContainerId containerId1 = ContainerId.newContainerId(attemptId, 2); ContainerId containerId2 = ContainerId.newContainerId(attemptId, 3); long time1=1234,time2=5678; ContainerReport container = ContainerReport.newInstance(containerId, null, NodeId.newInstance("host", 1234), Priority.UNDEFINED, time1, time2, "diagnosticInfo", "logURL", 0, ContainerState.COMPLETE, "http://" + NodeId.newInstance("host", 2345).toString()); ContainerReport container1 = ContainerReport.newInstance(containerId1, null, NodeId.newInstance("host", 1234), Priority.UNDEFINED, time1, time2, "diagnosticInfo", "logURL", 0, ContainerState.COMPLETE, "http://" + NodeId.newInstance("host", 2345).toString()); ContainerReport container2 = ContainerReport.newInstance(containerId2, null, NodeId.newInstance("host", 1234), Priority.UNDEFINED, time1,0, "diagnosticInfo", "", 0, ContainerState.RUNNING, "http://" + NodeId.newInstance("host", 2345).toString()); List<ContainerReport> reports = new ArrayList<ContainerReport>(); reports.add(container); reports.add(container1); reports.add(container2); DateFormat dateFormat=new SimpleDateFormat("EEE MMM dd HH:mm:ss Z yyyy"); when(client.getContainers(any(ApplicationAttemptId.class))).thenReturn( reports); sysOutStream.reset(); int result = cli.run(new String[] { "container", "-list", attemptId.toString() }); assertEquals(0, result); verify(client).getContainers(attemptId); ByteArrayOutputStream baos = new ByteArrayOutputStream(); PrintWriter pw = new PrintWriter(baos); pw.println("Total number of containers :3"); pw.print(" Container-Id"); pw.print("\t Start Time"); pw.print("\t Finish Time"); pw.print("\t State"); pw.print("\t Host"); pw.print("\t Node Http Address"); pw.println("\t LOG-URL"); pw.print(" container_1234_0005_01_000001"); pw.print("\t"+dateFormat.format(new Date(time1))); pw.print("\t"+dateFormat.format(new Date(time2))); pw.print("\t COMPLETE"); pw.print("\t host:1234"); pw.print("\t http://host:2345"); pw.println("\t logURL"); pw.print(" container_1234_0005_01_000002"); pw.print("\t"+dateFormat.format(new Date(time1))); pw.print("\t"+dateFormat.format(new Date(time2))); pw.print("\t COMPLETE"); pw.print("\t host:1234"); pw.print("\t http://host:2345"); pw.println("\t logURL"); pw.print(" container_1234_0005_01_000003"); pw.print("\t"+dateFormat.format(new Date(time1))); pw.print("\t N/A"); pw.print("\t RUNNING"); pw.print("\t host:1234"); pw.print("\t http://host:2345"); pw.println("\t "); pw.close(); String appReportStr = baos.toString("UTF-8"); Log.info("ExpectedOutput"); Log.info("["+appReportStr+"]"); Log.info("OutputFrom command"); String actualOutput = sysOutStream.toString(); Log.info("["+actualOutput+"]"); Assert.assertEquals(appReportStr, sysOutStream.toString()); } @Test public void testGetApplicationReportException() throws Exception { ApplicationCLI cli = createAndGetAppCLI(); ApplicationId applicationId = ApplicationId.newInstance(1234, 5); when(client.getApplicationReport(any(ApplicationId.class))).thenThrow( new ApplicationNotFoundException("History file for application" + applicationId + " is not found")); int exitCode = cli.run(new String[] { "application", "-status", applicationId.toString() }); verify(sysOut).println( "Application with id '" + applicationId + "' doesn't exist in RM or Timeline Server."); Assert.assertNotSame("should return non-zero exit code.", 0, exitCode); } @Test public void testGetApplications() throws Exception { ApplicationCLI cli = createAndGetAppCLI(); ApplicationId applicationId = ApplicationId.newInstance(1234, 5); ApplicationReport newApplicationReport = ApplicationReport.newInstance( applicationId, ApplicationAttemptId.newInstance(applicationId, 1), "user", "queue", "appname", "host", 124, null, YarnApplicationState.RUNNING, "diagnostics", "url", 0, 0, FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null); List<ApplicationReport> applicationReports = new ArrayList<ApplicationReport>(); applicationReports.add(newApplicationReport); ApplicationId applicationId2 = ApplicationId.newInstance(1234, 6); ApplicationReport newApplicationReport2 = ApplicationReport.newInstance( applicationId2, ApplicationAttemptId.newInstance(applicationId2, 2), "user2", "queue2", "appname2", "host2", 125, null, YarnApplicationState.FINISHED, "diagnostics2", "url2", 2, 2, FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.63789f, "NON-YARN", null); applicationReports.add(newApplicationReport2); ApplicationId applicationId3 = ApplicationId.newInstance(1234, 7); ApplicationReport newApplicationReport3 = ApplicationReport.newInstance( applicationId3, ApplicationAttemptId.newInstance(applicationId3, 3), "user3", "queue3", "appname3", "host3", 126, null, YarnApplicationState.RUNNING, "diagnostics3", "url3", 3, 3, FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.73789f, "MAPREDUCE", null); applicationReports.add(newApplicationReport3); ApplicationId applicationId4 = ApplicationId.newInstance(1234, 8); ApplicationReport newApplicationReport4 = ApplicationReport.newInstance( applicationId4, ApplicationAttemptId.newInstance(applicationId4, 4), "user4", "queue4", "appname4", "host4", 127, null, YarnApplicationState.FAILED, "diagnostics4", "url4", 4, 4, FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.83789f, "NON-MAPREDUCE", null); applicationReports.add(newApplicationReport4); ApplicationId applicationId5 = ApplicationId.newInstance(1234, 9); ApplicationReport newApplicationReport5 = ApplicationReport.newInstance( applicationId5, ApplicationAttemptId.newInstance(applicationId5, 5), "user5", "queue5", "appname5", "host5", 128, null, YarnApplicationState.ACCEPTED, "diagnostics5", "url5", 5, 5, FinalApplicationStatus.KILLED, null, "N/A", 0.93789f, "HIVE", null); applicationReports.add(newApplicationReport5); ApplicationId applicationId6 = ApplicationId.newInstance(1234, 10); ApplicationReport newApplicationReport6 = ApplicationReport.newInstance( applicationId6, ApplicationAttemptId.newInstance(applicationId6, 6), "user6", "queue6", "appname6", "host6", 129, null, YarnApplicationState.SUBMITTED, "diagnostics6", "url6", 6, 6, FinalApplicationStatus.KILLED, null, "N/A", 0.99789f, "PIG", null); applicationReports.add(newApplicationReport6); // Test command yarn application -list // if the set appStates is empty, RUNNING state will be automatically added // to the appStates list // the output of yarn application -list should be the same as // equals to yarn application -list --appStates RUNNING,ACCEPTED,SUBMITTED Set<String> appType1 = new HashSet<String>(); EnumSet<YarnApplicationState> appState1 = EnumSet.noneOf(YarnApplicationState.class); appState1.add(YarnApplicationState.RUNNING); appState1.add(YarnApplicationState.ACCEPTED); appState1.add(YarnApplicationState.SUBMITTED); when(client.getApplications(appType1, appState1)).thenReturn( getApplicationReports(applicationReports, appType1, appState1, false)); int result = cli.run(new String[] { "application", "-list" }); assertEquals(0, result); verify(client).getApplications(appType1, appState1); ByteArrayOutputStream baos = new ByteArrayOutputStream(); PrintWriter pw = new PrintWriter(baos); pw.println("Total number of applications (application-types: " + appType1 + " and states: " + appState1 + ")" + ":" + 4); pw.print(" Application-Id\t Application-Name"); pw.print("\t Application-Type"); pw.print("\t User\t Queue\t State\t "); pw.print("Final-State\t Progress"); pw.println("\t Tracking-URL"); pw.print(" application_1234_0005\t "); pw.print("appname\t YARN\t user\t "); pw.print("queue\t RUNNING\t "); pw.print("SUCCEEDED\t 53.79%"); pw.println("\t N/A"); pw.print(" application_1234_0007\t "); pw.print("appname3\t MAPREDUCE\t user3\t "); pw.print("queue3\t RUNNING\t "); pw.print("SUCCEEDED\t 73.79%"); pw.println("\t N/A"); pw.print(" application_1234_0009\t "); pw.print("appname5\t HIVE\t user5\t "); pw.print("queue5\t ACCEPTED\t "); pw.print("KILLED\t 93.79%"); pw.println("\t N/A"); pw.print(" application_1234_0010\t "); pw.print("appname6\t PIG\t user6\t "); pw.print("queue6\t SUBMITTED\t "); pw.print("KILLED\t 99.79%"); pw.println("\t N/A"); pw.close(); String appsReportStr = baos.toString("UTF-8"); Assert.assertEquals(appsReportStr, sysOutStream.toString()); verify(sysOut, times(1)).write(any(byte[].class), anyInt(), anyInt()); //Test command yarn application -list --appTypes apptype1,apptype2 //the output should be the same as // yarn application -list --appTypes apptyp1, apptype2 --appStates // RUNNING,ACCEPTED,SUBMITTED sysOutStream.reset(); Set<String> appType2 = new HashSet<String>(); appType2.add("YARN"); appType2.add("NON-YARN"); EnumSet<YarnApplicationState> appState2 = EnumSet.noneOf(YarnApplicationState.class); appState2.add(YarnApplicationState.RUNNING); appState2.add(YarnApplicationState.ACCEPTED); appState2.add(YarnApplicationState.SUBMITTED); when(client.getApplications(appType2, appState2)).thenReturn( getApplicationReports(applicationReports, appType2, appState2, false)); result = cli.run(new String[] { "application", "-list", "-appTypes", "YARN, ,, NON-YARN", " ,, ,," }); assertEquals(0, result); verify(client).getApplications(appType2, appState2); baos = new ByteArrayOutputStream(); pw = new PrintWriter(baos); pw.println("Total number of applications (application-types: " + appType2 + " and states: " + appState2 + ")" + ":" + 1); pw.print(" Application-Id\t Application-Name"); pw.print("\t Application-Type"); pw.print("\t User\t Queue\t State\t "); pw.print("Final-State\t Progress"); pw.println("\t Tracking-URL"); pw.print(" application_1234_0005\t "); pw.print("appname\t YARN\t user\t "); pw.print("queue\t RUNNING\t "); pw.print("SUCCEEDED\t 53.79%"); pw.println("\t N/A"); pw.close(); appsReportStr = baos.toString("UTF-8"); Assert.assertEquals(appsReportStr, sysOutStream.toString()); verify(sysOut, times(2)).write(any(byte[].class), anyInt(), anyInt()); //Test command yarn application -list --appStates appState1,appState2 sysOutStream.reset(); Set<String> appType3 = new HashSet<String>(); EnumSet<YarnApplicationState> appState3 = EnumSet.noneOf(YarnApplicationState.class); appState3.add(YarnApplicationState.FINISHED); appState3.add(YarnApplicationState.FAILED); when(client.getApplications(appType3, appState3)).thenReturn( getApplicationReports(applicationReports, appType3, appState3, false)); result = cli.run(new String[] { "application", "-list", "--appStates", "FINISHED ,, , FAILED", ",,FINISHED" }); assertEquals(0, result); verify(client).getApplications(appType3, appState3); baos = new ByteArrayOutputStream(); pw = new PrintWriter(baos); pw.println("Total number of applications (application-types: " + appType3 + " and states: " + appState3 + ")" + ":" + 2); pw.print(" Application-Id\t Application-Name"); pw.print("\t Application-Type"); pw.print("\t User\t Queue\t State\t "); pw.print("Final-State\t Progress"); pw.println("\t Tracking-URL"); pw.print(" application_1234_0006\t "); pw.print("appname2\t NON-YARN\t user2\t "); pw.print("queue2\t FINISHED\t "); pw.print("SUCCEEDED\t 63.79%"); pw.println("\t N/A"); pw.print(" application_1234_0008\t "); pw.print("appname4\t NON-MAPREDUCE\t user4\t "); pw.print("queue4\t FAILED\t "); pw.print("SUCCEEDED\t 83.79%"); pw.println("\t N/A"); pw.close(); appsReportStr = baos.toString("UTF-8"); Assert.assertEquals(appsReportStr, sysOutStream.toString()); verify(sysOut, times(3)).write(any(byte[].class), anyInt(), anyInt()); // Test command yarn application -list --appTypes apptype1,apptype2 // --appStates appstate1,appstate2 sysOutStream.reset(); Set<String> appType4 = new HashSet<String>(); appType4.add("YARN"); appType4.add("NON-YARN"); EnumSet<YarnApplicationState> appState4 = EnumSet.noneOf(YarnApplicationState.class); appState4.add(YarnApplicationState.FINISHED); appState4.add(YarnApplicationState.FAILED); when(client.getApplications(appType4, appState4)).thenReturn( getApplicationReports(applicationReports, appType4, appState4, false)); result = cli.run(new String[] { "application", "-list", "--appTypes", "YARN,NON-YARN", "--appStates", "FINISHED ,, , FAILED" }); assertEquals(0, result); verify(client).getApplications(appType2, appState2); baos = new ByteArrayOutputStream(); pw = new PrintWriter(baos); pw.println("Total number of applications (application-types: " + appType4 + " and states: " + appState4 + ")" + ":" + 1); pw.print(" Application-Id\t Application-Name"); pw.print("\t Application-Type"); pw.print("\t User\t Queue\t State\t "); pw.print("Final-State\t Progress"); pw.println("\t Tracking-URL"); pw.print(" application_1234_0006\t "); pw.print("appname2\t NON-YARN\t user2\t "); pw.print("queue2\t FINISHED\t "); pw.print("SUCCEEDED\t 63.79%"); pw.println("\t N/A"); pw.close(); appsReportStr = baos.toString("UTF-8"); Assert.assertEquals(appsReportStr, sysOutStream.toString()); verify(sysOut, times(4)).write(any(byte[].class), anyInt(), anyInt()); //Test command yarn application -list --appStates with invalid appStates sysOutStream.reset(); result = cli.run(new String[] { "application", "-list", "--appStates", "FINISHED ,, , INVALID" }); assertEquals(-1, result); baos = new ByteArrayOutputStream(); pw = new PrintWriter(baos); pw.println("The application state INVALID is invalid."); pw.print("The valid application state can be one of the following: "); StringBuilder sb = new StringBuilder(); sb.append("ALL,"); for(YarnApplicationState state : YarnApplicationState.values()) { sb.append(state+","); } String output = sb.toString(); pw.println(output.substring(0, output.length()-1)); pw.close(); appsReportStr = baos.toString("UTF-8"); Assert.assertEquals(appsReportStr, sysOutStream.toString()); verify(sysOut, times(4)).write(any(byte[].class), anyInt(), anyInt()); //Test command yarn application -list --appStates all sysOutStream.reset(); Set<String> appType5 = new HashSet<String>(); EnumSet<YarnApplicationState> appState5 = EnumSet.noneOf(YarnApplicationState.class); appState5.add(YarnApplicationState.FINISHED); when(client.getApplications(appType5, appState5)).thenReturn( getApplicationReports(applicationReports, appType5, appState5, true)); result = cli.run(new String[] { "application", "-list", "--appStates", "FINISHED ,, , ALL" }); assertEquals(0, result); verify(client).getApplications(appType5, appState5); baos = new ByteArrayOutputStream(); pw = new PrintWriter(baos); pw.println("Total number of applications (application-types: " + appType5 + " and states: " + appState5 + ")" + ":" + 6); pw.print(" Application-Id\t Application-Name"); pw.print("\t Application-Type"); pw.print("\t User\t Queue\t State\t "); pw.print("Final-State\t Progress"); pw.println("\t Tracking-URL"); pw.print(" application_1234_0005\t "); pw.print("appname\t YARN\t user\t "); pw.print("queue\t RUNNING\t "); pw.print("SUCCEEDED\t 53.79%"); pw.println("\t N/A"); pw.print(" application_1234_0006\t "); pw.print("appname2\t NON-YARN\t user2\t "); pw.print("queue2\t FINISHED\t "); pw.print("SUCCEEDED\t 63.79%"); pw.println("\t N/A"); pw.print(" application_1234_0007\t "); pw.print("appname3\t MAPREDUCE\t user3\t "); pw.print("queue3\t RUNNING\t "); pw.print("SUCCEEDED\t 73.79%"); pw.println("\t N/A"); pw.print(" application_1234_0008\t "); pw.print("appname4\t NON-MAPREDUCE\t user4\t "); pw.print("queue4\t FAILED\t "); pw.print("SUCCEEDED\t 83.79%"); pw.println("\t N/A"); pw.print(" application_1234_0009\t "); pw.print("appname5\t HIVE\t user5\t "); pw.print("queue5\t ACCEPTED\t "); pw.print("KILLED\t 93.79%"); pw.println("\t N/A"); pw.print(" application_1234_0010\t "); pw.print("appname6\t PIG\t user6\t "); pw.print("queue6\t SUBMITTED\t "); pw.print("KILLED\t 99.79%"); pw.println("\t N/A"); pw.close(); appsReportStr = baos.toString("UTF-8"); Assert.assertEquals(appsReportStr, sysOutStream.toString()); verify(sysOut, times(5)).write(any(byte[].class), anyInt(), anyInt()); // Test command yarn application user case insensitive sysOutStream.reset(); Set<String> appType6 = new HashSet<String>(); appType6.add("YARN"); appType6.add("NON-YARN"); EnumSet<YarnApplicationState> appState6 = EnumSet.noneOf(YarnApplicationState.class); appState6.add(YarnApplicationState.FINISHED); when(client.getApplications(appType6, appState6)).thenReturn( getApplicationReports(applicationReports, appType6, appState6, false)); result = cli.run(new String[] { "application", "-list", "-appTypes", "YARN, ,, NON-YARN", "--appStates", "finished" }); assertEquals(0, result); verify(client).getApplications(appType6, appState6); baos = new ByteArrayOutputStream(); pw = new PrintWriter(baos); pw.println("Total number of applications (application-types: " + appType6 + " and states: " + appState6 + ")" + ":" + 1); pw.print(" Application-Id\t Application-Name"); pw.print("\t Application-Type"); pw.print("\t User\t Queue\t State\t "); pw.print("Final-State\t Progress"); pw.println("\t Tracking-URL"); pw.print(" application_1234_0006\t "); pw.print("appname2\t NON-YARN\t user2\t "); pw.print("queue2\t FINISHED\t "); pw.print("SUCCEEDED\t 63.79%"); pw.println("\t N/A"); pw.close(); appsReportStr = baos.toString("UTF-8"); Assert.assertEquals(appsReportStr, sysOutStream.toString()); verify(sysOut, times(6)).write(any(byte[].class), anyInt(), anyInt()); } private List<ApplicationReport> getApplicationReports( List<ApplicationReport> applicationReports, Set<String> appTypes, EnumSet<YarnApplicationState> appStates, boolean allStates) { List<ApplicationReport> appReports = new ArrayList<ApplicationReport>(); if (allStates) { for(YarnApplicationState state : YarnApplicationState.values()) { appStates.add(state); } } for (ApplicationReport appReport : applicationReports) { if (appTypes != null && !appTypes.isEmpty()) { if (!appTypes.contains(appReport.getApplicationType())) { continue; } } if (appStates != null && !appStates.isEmpty()) { if (!appStates.contains(appReport.getYarnApplicationState())) { continue; } } appReports.add(appReport); } return appReports; } @Test (timeout = 10000) public void testAppsHelpCommand() throws Exception { ApplicationCLI cli = createAndGetAppCLI(); ApplicationCLI spyCli = spy(cli); int result = spyCli.run(new String[] { "application", "-help" }); Assert.assertTrue(result == 0); verify(spyCli).printUsage(any(String.class), any(Options.class)); Assert.assertEquals(createApplicationCLIHelpMessage(), sysOutStream.toString()); sysOutStream.reset(); ApplicationId applicationId = ApplicationId.newInstance(1234, 5); result = cli.run( new String[] {"application", "-kill", applicationId.toString(), "args" }); verify(spyCli).printUsage(any(String.class), any(Options.class)); Assert.assertEquals(createApplicationCLIHelpMessage(), sysOutStream.toString()); sysOutStream.reset(); NodeId nodeId = NodeId.newInstance("host0", 0); result = cli.run( new String[] { "application", "-status", nodeId.toString(), "args" }); verify(spyCli).printUsage(any(String.class), any(Options.class)); Assert.assertEquals(createApplicationCLIHelpMessage(), sysOutStream.toString()); } @Test (timeout = 10000) public void testAppAttemptsHelpCommand() throws Exception { ApplicationCLI cli = createAndGetAppCLI(); ApplicationCLI spyCli = spy(cli); int result = spyCli.run(new String[] { "applicationattempt", "-help" }); Assert.assertTrue(result == 0); verify(spyCli).printUsage(any(String.class), any(Options.class)); Assert.assertEquals(createApplicationAttemptCLIHelpMessage(), sysOutStream.toString()); sysOutStream.reset(); ApplicationId applicationId = ApplicationId.newInstance(1234, 5); result = cli.run( new String[] {"applicationattempt", "-list", applicationId.toString(), "args" }); verify(spyCli).printUsage(any(String.class), any(Options.class)); Assert.assertEquals(createApplicationAttemptCLIHelpMessage(), sysOutStream.toString()); sysOutStream.reset(); ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(applicationId, 6); result = cli.run( new String[] { "applicationattempt", "-status", appAttemptId.toString(), "args" }); verify(spyCli).printUsage(any(String.class), any(Options.class)); Assert.assertEquals(createApplicationAttemptCLIHelpMessage(), sysOutStream.toString()); } @Test (timeout = 10000) public void testContainersHelpCommand() throws Exception { ApplicationCLI cli = createAndGetAppCLI(); ApplicationCLI spyCli = spy(cli); int result = spyCli.run(new String[] { "container", "-help" }); Assert.assertTrue(result == 0); verify(spyCli).printUsage(any(String.class), any(Options.class)); Assert.assertEquals(createContainerCLIHelpMessage(), sysOutStream.toString()); sysOutStream.reset(); ApplicationId applicationId = ApplicationId.newInstance(1234, 5); ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(applicationId, 6); result = cli.run( new String[] {"container", "-list", appAttemptId.toString(), "args" }); verify(spyCli).printUsage(any(String.class), any(Options.class)); Assert.assertEquals(createContainerCLIHelpMessage(), sysOutStream.toString()); sysOutStream.reset(); ContainerId containerId = ContainerId.newContainerId(appAttemptId, 7); result = cli.run( new String[] { "container", "-status", containerId.toString(), "args" }); verify(spyCli).printUsage(any(String.class), any(Options.class)); Assert.assertEquals(createContainerCLIHelpMessage(), sysOutStream.toString()); } @Test (timeout = 5000) public void testNodesHelpCommand() throws Exception { NodeCLI nodeCLI = new NodeCLI(); nodeCLI.setClient(client); nodeCLI.setSysOutPrintStream(sysOut); nodeCLI.setSysErrPrintStream(sysErr); nodeCLI.run(new String[] {}); Assert.assertEquals(createNodeCLIHelpMessage(), sysOutStream.toString()); } @Test public void testKillApplication() throws Exception { ApplicationCLI cli = createAndGetAppCLI(); ApplicationId applicationId = ApplicationId.newInstance(1234, 5); ApplicationReport newApplicationReport2 = ApplicationReport.newInstance( applicationId, ApplicationAttemptId.newInstance(applicationId, 1), "user", "queue", "appname", "host", 124, null, YarnApplicationState.FINISHED, "diagnostics", "url", 0, 0, FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null); when(client.getApplicationReport(any(ApplicationId.class))).thenReturn( newApplicationReport2); int result = cli.run(new String[] { "application","-kill", applicationId.toString() }); assertEquals(0, result); verify(client, times(0)).killApplication(any(ApplicationId.class)); verify(sysOut).println( "Application " + applicationId + " has already finished "); ApplicationReport newApplicationReport = ApplicationReport.newInstance( applicationId, ApplicationAttemptId.newInstance(applicationId, 1), "user", "queue", "appname", "host", 124, null, YarnApplicationState.RUNNING, "diagnostics", "url", 0, 0, FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null); when(client.getApplicationReport(any(ApplicationId.class))).thenReturn( newApplicationReport); result = cli.run(new String[] { "application","-kill", applicationId.toString() }); assertEquals(0, result); verify(client).killApplication(any(ApplicationId.class)); verify(sysOut).println("Killing application application_1234_0005"); doThrow(new ApplicationNotFoundException("Application with id '" + applicationId + "' doesn't exist in RM.")).when(client) .getApplicationReport(applicationId); cli = createAndGetAppCLI(); try { int exitCode = cli.run(new String[] { "application","-kill", applicationId.toString() }); verify(sysOut).println("Application with id '" + applicationId + "' doesn't exist in RM."); Assert.assertNotSame("should return non-zero exit code.", 0, exitCode); } catch (ApplicationNotFoundException appEx) { Assert.fail("application -kill should not throw" + "ApplicationNotFoundException. " + appEx); } catch (Exception e) { Assert.fail("Unexpected exception: " + e); } } @Test public void testMoveApplicationAcrossQueues() throws Exception { ApplicationCLI cli = createAndGetAppCLI(); ApplicationId applicationId = ApplicationId.newInstance(1234, 5); ApplicationReport newApplicationReport2 = ApplicationReport.newInstance( applicationId, ApplicationAttemptId.newInstance(applicationId, 1), "user", "queue", "appname", "host", 124, null, YarnApplicationState.FINISHED, "diagnostics", "url", 0, 0, FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null); when(client.getApplicationReport(any(ApplicationId.class))).thenReturn( newApplicationReport2); int result = cli.run(new String[] { "application", "-movetoqueue", applicationId.toString(), "-queue", "targetqueue"}); assertEquals(0, result); verify(client, times(0)).moveApplicationAcrossQueues( any(ApplicationId.class), any(String.class)); verify(sysOut).println( "Application " + applicationId + " has already finished "); ApplicationReport newApplicationReport = ApplicationReport.newInstance( applicationId, ApplicationAttemptId.newInstance(applicationId, 1), "user", "queue", "appname", "host", 124, null, YarnApplicationState.RUNNING, "diagnostics", "url", 0, 0, FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null); when(client.getApplicationReport(any(ApplicationId.class))).thenReturn( newApplicationReport); result = cli.run(new String[] { "application", "-movetoqueue", applicationId.toString(), "-queue", "targetqueue"}); assertEquals(0, result); verify(client).moveApplicationAcrossQueues(any(ApplicationId.class), any(String.class)); verify(sysOut).println("Moving application application_1234_0005 to queue targetqueue"); verify(sysOut).println("Successfully completed move."); doThrow(new ApplicationNotFoundException("Application with id '" + applicationId + "' doesn't exist in RM.")).when(client) .moveApplicationAcrossQueues(applicationId, "targetqueue"); cli = createAndGetAppCLI(); try { result = cli.run(new String[] { "application", "-movetoqueue", applicationId.toString(), "-queue", "targetqueue"}); Assert.fail(); } catch (Exception ex) { Assert.assertTrue(ex instanceof ApplicationNotFoundException); Assert.assertEquals("Application with id '" + applicationId + "' doesn't exist in RM.", ex.getMessage()); } } @Test public void testListClusterNodes() throws Exception { List<NodeReport> nodeReports = new ArrayList<NodeReport>(); nodeReports.addAll(getNodeReports(1, NodeState.NEW)); nodeReports.addAll(getNodeReports(2, NodeState.RUNNING)); nodeReports.addAll(getNodeReports(1, NodeState.UNHEALTHY)); nodeReports.addAll(getNodeReports(1, NodeState.DECOMMISSIONED)); nodeReports.addAll(getNodeReports(1, NodeState.REBOOTED)); nodeReports.addAll(getNodeReports(1, NodeState.LOST)); NodeCLI cli = new NodeCLI(); cli.setClient(client); cli.setSysOutPrintStream(sysOut); Set<NodeState> nodeStates = new HashSet<NodeState>(); nodeStates.add(NodeState.NEW); NodeState[] states = nodeStates.toArray(new NodeState[0]); when(client.getNodeReports(states)) .thenReturn(getNodeReports(nodeReports, nodeStates)); int result = cli.run(new String[] { "-list", "--states", "NEW" }); assertEquals(0, result); verify(client).getNodeReports(states); ByteArrayOutputStream baos = new ByteArrayOutputStream(); PrintWriter pw = new PrintWriter(baos); pw.println("Total Nodes:1"); pw.print(" Node-Id\t Node-State\tNode-Http-Address\t"); pw.println("Number-of-Running-Containers"); pw.print(" host0:0\t NEW\t host1:8888\t"); pw.println(" 0"); pw.close(); String nodesReportStr = baos.toString("UTF-8"); Assert.assertEquals(nodesReportStr, sysOutStream.toString()); verify(sysOut, times(1)).write(any(byte[].class), anyInt(), anyInt()); sysOutStream.reset(); nodeStates.clear(); nodeStates.add(NodeState.RUNNING); states = nodeStates.toArray(new NodeState[0]); when(client.getNodeReports(states)) .thenReturn(getNodeReports(nodeReports, nodeStates)); result = cli.run(new String[] { "-list", "--states", "RUNNING" }); assertEquals(0, result); verify(client).getNodeReports(states); baos = new ByteArrayOutputStream(); pw = new PrintWriter(baos); pw.println("Total Nodes:2"); pw.print(" Node-Id\t Node-State\tNode-Http-Address\t"); pw.println("Number-of-Running-Containers"); pw.print(" host0:0\t RUNNING\t host1:8888\t"); pw.println(" 0"); pw.print(" host1:0\t RUNNING\t host1:8888\t"); pw.println(" 0"); pw.close(); nodesReportStr = baos.toString("UTF-8"); Assert.assertEquals(nodesReportStr, sysOutStream.toString()); verify(sysOut, times(2)).write(any(byte[].class), anyInt(), anyInt()); sysOutStream.reset(); result = cli.run(new String[] { "-list" }); assertEquals(0, result); Assert.assertEquals(nodesReportStr, sysOutStream.toString()); verify(sysOut, times(3)).write(any(byte[].class), anyInt(), anyInt()); sysOutStream.reset(); nodeStates.clear(); nodeStates.add(NodeState.UNHEALTHY); states = nodeStates.toArray(new NodeState[0]); when(client.getNodeReports(states)) .thenReturn(getNodeReports(nodeReports, nodeStates)); result = cli.run(new String[] { "-list", "--states", "UNHEALTHY" }); assertEquals(0, result); verify(client).getNodeReports(states); baos = new ByteArrayOutputStream(); pw = new PrintWriter(baos); pw.println("Total Nodes:1"); pw.print(" Node-Id\t Node-State\tNode-Http-Address\t"); pw.println("Number-of-Running-Containers"); pw.print(" host0:0\t UNHEALTHY\t host1:8888\t"); pw.println(" 0"); pw.close(); nodesReportStr = baos.toString("UTF-8"); Assert.assertEquals(nodesReportStr, sysOutStream.toString()); verify(sysOut, times(4)).write(any(byte[].class), anyInt(), anyInt()); sysOutStream.reset(); nodeStates.clear(); nodeStates.add(NodeState.DECOMMISSIONED); states = nodeStates.toArray(new NodeState[0]); when(client.getNodeReports(states)) .thenReturn(getNodeReports(nodeReports, nodeStates)); result = cli.run(new String[] { "-list", "--states", "DECOMMISSIONED" }); assertEquals(0, result); verify(client).getNodeReports(states); baos = new ByteArrayOutputStream(); pw = new PrintWriter(baos); pw.println("Total Nodes:1"); pw.print(" Node-Id\t Node-State\tNode-Http-Address\t"); pw.println("Number-of-Running-Containers"); pw.print(" host0:0\t DECOMMISSIONED\t host1:8888\t"); pw.println(" 0"); pw.close(); nodesReportStr = baos.toString("UTF-8"); Assert.assertEquals(nodesReportStr, sysOutStream.toString()); verify(sysOut, times(5)).write(any(byte[].class), anyInt(), anyInt()); sysOutStream.reset(); nodeStates.clear(); nodeStates.add(NodeState.REBOOTED); states = nodeStates.toArray(new NodeState[0]); when(client.getNodeReports(states)) .thenReturn(getNodeReports(nodeReports, nodeStates)); result = cli.run(new String[] { "-list", "--states", "REBOOTED" }); assertEquals(0, result); verify(client).getNodeReports(states); baos = new ByteArrayOutputStream(); pw = new PrintWriter(baos); pw.println("Total Nodes:1"); pw.print(" Node-Id\t Node-State\tNode-Http-Address\t"); pw.println("Number-of-Running-Containers"); pw.print(" host0:0\t REBOOTED\t host1:8888\t"); pw.println(" 0"); pw.close(); nodesReportStr = baos.toString("UTF-8"); Assert.assertEquals(nodesReportStr, sysOutStream.toString()); verify(sysOut, times(6)).write(any(byte[].class), anyInt(), anyInt()); sysOutStream.reset(); nodeStates.clear(); nodeStates.add(NodeState.LOST); states = nodeStates.toArray(new NodeState[0]); when(client.getNodeReports(states)) .thenReturn(getNodeReports(nodeReports, nodeStates)); result = cli.run(new String[] { "-list", "--states", "LOST" }); assertEquals(0, result); verify(client).getNodeReports(states); baos = new ByteArrayOutputStream(); pw = new PrintWriter(baos); pw.println("Total Nodes:1"); pw.print(" Node-Id\t Node-State\tNode-Http-Address\t"); pw.println("Number-of-Running-Containers"); pw.print(" host0:0\t LOST\t host1:8888\t"); pw.println(" 0"); pw.close(); nodesReportStr = baos.toString("UTF-8"); Assert.assertEquals(nodesReportStr, sysOutStream.toString()); verify(sysOut, times(7)).write(any(byte[].class), anyInt(), anyInt()); sysOutStream.reset(); nodeStates.clear(); nodeStates.add(NodeState.NEW); nodeStates.add(NodeState.RUNNING); nodeStates.add(NodeState.LOST); nodeStates.add(NodeState.REBOOTED); states = nodeStates.toArray(new NodeState[0]); when(client.getNodeReports(states)) .thenReturn(getNodeReports(nodeReports, nodeStates)); result = cli.run(new String[] { "-list", "--states", "NEW,RUNNING,LOST,REBOOTED" }); assertEquals(0, result); verify(client).getNodeReports(states); baos = new ByteArrayOutputStream(); pw = new PrintWriter(baos); pw.println("Total Nodes:5"); pw.print(" Node-Id\t Node-State\tNode-Http-Address\t"); pw.println("Number-of-Running-Containers"); pw.print(" host0:0\t NEW\t host1:8888\t"); pw.println(" 0"); pw.print(" host0:0\t RUNNING\t host1:8888\t"); pw.println(" 0"); pw.print(" host1:0\t RUNNING\t host1:8888\t"); pw.println(" 0"); pw.print(" host0:0\t REBOOTED\t host1:8888\t"); pw.println(" 0"); pw.print(" host0:0\t LOST\t host1:8888\t"); pw.println(" 0"); pw.close(); nodesReportStr = baos.toString("UTF-8"); Assert.assertEquals(nodesReportStr, sysOutStream.toString()); verify(sysOut, times(8)).write(any(byte[].class), anyInt(), anyInt()); sysOutStream.reset(); nodeStates.clear(); for (NodeState s : NodeState.values()) { nodeStates.add(s); } states = nodeStates.toArray(new NodeState[0]); when(client.getNodeReports(states)) .thenReturn(getNodeReports(nodeReports, nodeStates)); result = cli.run(new String[] { "-list", "--all" }); assertEquals(0, result); verify(client).getNodeReports(states); baos = new ByteArrayOutputStream(); pw = new PrintWriter(baos); pw.println("Total Nodes:7"); pw.print(" Node-Id\t Node-State\tNode-Http-Address\t"); pw.println("Number-of-Running-Containers"); pw.print(" host0:0\t NEW\t host1:8888\t"); pw.println(" 0"); pw.print(" host0:0\t RUNNING\t host1:8888\t"); pw.println(" 0"); pw.print(" host1:0\t RUNNING\t host1:8888\t"); pw.println(" 0"); pw.print(" host0:0\t UNHEALTHY\t host1:8888\t"); pw.println(" 0"); pw.print(" host0:0\t DECOMMISSIONED\t host1:8888\t"); pw.println(" 0"); pw.print(" host0:0\t REBOOTED\t host1:8888\t"); pw.println(" 0"); pw.print(" host0:0\t LOST\t host1:8888\t"); pw.println(" 0"); pw.close(); nodesReportStr = baos.toString("UTF-8"); Assert.assertEquals(nodesReportStr, sysOutStream.toString()); verify(sysOut, times(9)).write(any(byte[].class), anyInt(), anyInt()); } private List<NodeReport> getNodeReports( List<NodeReport> nodeReports, Set<NodeState> nodeStates) { List<NodeReport> reports = new ArrayList<NodeReport>(); for (NodeReport nodeReport : nodeReports) { if (nodeStates.contains(nodeReport.getNodeState())) { reports.add(nodeReport); } } return reports; } @Test public void testNodeStatus() throws Exception { NodeId nodeId = NodeId.newInstance("host0", 0); NodeCLI cli = new NodeCLI(); when(client.getNodeReports()).thenReturn( getNodeReports(3, NodeState.RUNNING, false)); cli.setClient(client); cli.setSysOutPrintStream(sysOut); cli.setSysErrPrintStream(sysErr); int result = cli.run(new String[] { "-status", nodeId.toString() }); assertEquals(0, result); verify(client).getNodeReports(); ByteArrayOutputStream baos = new ByteArrayOutputStream(); PrintWriter pw = new PrintWriter(baos); pw.println("Node Report : "); pw.println("\tNode-Id : host0:0"); pw.println("\tRack : rack1"); pw.println("\tNode-State : RUNNING"); pw.println("\tNode-Http-Address : host1:8888"); pw.println("\tLast-Health-Update : " + DateFormatUtils.format(new Date(0), "E dd/MMM/yy hh:mm:ss:SSzz")); pw.println("\tHealth-Report : "); pw.println("\tContainers : 0"); pw.println("\tMemory-Used : 0MB"); pw.println("\tMemory-Capacity : 0MB"); pw.println("\tCPU-Used : 0 vcores"); pw.println("\tCPU-Capacity : 0 vcores"); pw.println("\tNode-Labels : a,b,c,x,y,z"); pw.close(); String nodeStatusStr = baos.toString("UTF-8"); verify(sysOut, times(1)).println(isA(String.class)); verify(sysOut).println(nodeStatusStr); } @Test public void testNodeStatusWithEmptyNodeLabels() throws Exception { NodeId nodeId = NodeId.newInstance("host0", 0); NodeCLI cli = new NodeCLI(); when(client.getNodeReports()).thenReturn( getNodeReports(3, NodeState.RUNNING)); cli.setClient(client); cli.setSysOutPrintStream(sysOut); cli.setSysErrPrintStream(sysErr); int result = cli.run(new String[] { "-status", nodeId.toString() }); assertEquals(0, result); verify(client).getNodeReports(); ByteArrayOutputStream baos = new ByteArrayOutputStream(); PrintWriter pw = new PrintWriter(baos); pw.println("Node Report : "); pw.println("\tNode-Id : host0:0"); pw.println("\tRack : rack1"); pw.println("\tNode-State : RUNNING"); pw.println("\tNode-Http-Address : host1:8888"); pw.println("\tLast-Health-Update : " + DateFormatUtils.format(new Date(0), "E dd/MMM/yy hh:mm:ss:SSzz")); pw.println("\tHealth-Report : "); pw.println("\tContainers : 0"); pw.println("\tMemory-Used : 0MB"); pw.println("\tMemory-Capacity : 0MB"); pw.println("\tCPU-Used : 0 vcores"); pw.println("\tCPU-Capacity : 0 vcores"); pw.println("\tNode-Labels : "); pw.close(); String nodeStatusStr = baos.toString("UTF-8"); verify(sysOut, times(1)).println(isA(String.class)); verify(sysOut).println(nodeStatusStr); } @Test public void testAbsentNodeStatus() throws Exception { NodeId nodeId = NodeId.newInstance("Absenthost0", 0); NodeCLI cli = new NodeCLI(); when(client.getNodeReports()).thenReturn( getNodeReports(0, NodeState.RUNNING)); cli.setClient(client); cli.setSysOutPrintStream(sysOut); cli.setSysErrPrintStream(sysErr); int result = cli.run(new String[] { "-status", nodeId.toString() }); assertEquals(0, result); verify(client).getNodeReports(); verify(sysOut, times(1)).println(isA(String.class)); verify(sysOut).println( "Could not find the node report for node id : " + nodeId.toString()); } @Test public void testAppCLIUsageInfo() throws Exception { verifyUsageInfo(new ApplicationCLI()); } @Test public void testNodeCLIUsageInfo() throws Exception { verifyUsageInfo(new NodeCLI()); } @Test public void testMissingArguments() throws Exception { ApplicationCLI cli = createAndGetAppCLI(); int result = cli.run(new String[] { "application", "-status" }); Assert.assertEquals(result, -1); Assert.assertEquals(String.format("Missing argument for options%n%1s", createApplicationCLIHelpMessage()), sysOutStream.toString()); sysOutStream.reset(); result = cli.run(new String[] { "applicationattempt", "-status" }); Assert.assertEquals(result, -1); Assert.assertEquals(String.format("Missing argument for options%n%1s", createApplicationAttemptCLIHelpMessage()), sysOutStream.toString()); sysOutStream.reset(); result = cli.run(new String[] { "container", "-status" }); Assert.assertEquals(result, -1); Assert.assertEquals(String.format("Missing argument for options%n%1s", createContainerCLIHelpMessage()), sysOutStream.toString()); sysOutStream.reset(); NodeCLI nodeCLI = new NodeCLI(); nodeCLI.setClient(client); nodeCLI.setSysOutPrintStream(sysOut); nodeCLI.setSysErrPrintStream(sysErr); result = nodeCLI.run(new String[] { "-status" }); Assert.assertEquals(result, -1); Assert.assertEquals(String.format("Missing argument for options%n%1s", createNodeCLIHelpMessage()), sysOutStream.toString()); } @Test public void testGetQueueInfo() throws Exception { QueueCLI cli = createAndGetQueueCLI(); Set<String> nodeLabels = new HashSet<String>(); nodeLabels.add("GPU"); nodeLabels.add("JDK_7"); QueueInfo queueInfo = QueueInfo.newInstance("queueA", 0.4f, 0.8f, 0.5f, null, null, QueueState.RUNNING, nodeLabels, "GPU", null); when(client.getQueueInfo(any(String.class))).thenReturn(queueInfo); int result = cli.run(new String[] { "-status", "queueA" }); assertEquals(0, result); verify(client).getQueueInfo("queueA"); ByteArrayOutputStream baos = new ByteArrayOutputStream(); PrintWriter pw = new PrintWriter(baos); pw.println("Queue Information : "); pw.println("Queue Name : " + "queueA"); pw.println("\tState : " + "RUNNING"); pw.println("\tCapacity : " + "40.0%"); pw.println("\tCurrent Capacity : " + "50.0%"); pw.println("\tMaximum Capacity : " + "80.0%"); pw.println("\tDefault Node Label expression : " + "GPU"); pw.println("\tAccessible Node Labels : " + "JDK_7,GPU"); pw.close(); String queueInfoStr = baos.toString("UTF-8"); Assert.assertEquals(queueInfoStr, sysOutStream.toString()); } @Test public void testGetQueueInfoWithEmptyNodeLabel() throws Exception { QueueCLI cli = createAndGetQueueCLI(); QueueInfo queueInfo = QueueInfo.newInstance("queueA", 0.4f, 0.8f, 0.5f, null, null, QueueState.RUNNING, null, null, null); when(client.getQueueInfo(any(String.class))).thenReturn(queueInfo); int result = cli.run(new String[] { "-status", "queueA" }); assertEquals(0, result); verify(client).getQueueInfo("queueA"); ByteArrayOutputStream baos = new ByteArrayOutputStream(); PrintWriter pw = new PrintWriter(baos); pw.println("Queue Information : "); pw.println("Queue Name : " + "queueA"); pw.println("\tState : " + "RUNNING"); pw.println("\tCapacity : " + "40.0%"); pw.println("\tCurrent Capacity : " + "50.0%"); pw.println("\tMaximum Capacity : " + "80.0%"); pw.println("\tDefault Node Label expression : "); pw.println("\tAccessible Node Labels : "); pw.close(); String queueInfoStr = baos.toString("UTF-8"); Assert.assertEquals(queueInfoStr, sysOutStream.toString()); } @Test public void testGetQueueInfoWithNonExistedQueue() throws Exception { String queueName = "non-existed-queue"; QueueCLI cli = createAndGetQueueCLI(); when(client.getQueueInfo(any(String.class))).thenReturn(null); int result = cli.run(new String[] { "-status", queueName }); assertEquals(-1, result);; ByteArrayOutputStream baos = new ByteArrayOutputStream(); PrintWriter pw = new PrintWriter(baos); pw.println("Cannot get queue from RM by queueName = " + queueName + ", please check."); pw.close(); String queueInfoStr = baos.toString("UTF-8"); Assert.assertEquals(queueInfoStr, sysOutStream.toString()); } @Test public void testGetApplicationAttemptReportException() throws Exception { ApplicationCLI cli = createAndGetAppCLI(); ApplicationId applicationId = ApplicationId.newInstance(1234, 5); ApplicationAttemptId attemptId1 = ApplicationAttemptId.newInstance( applicationId, 1); when(client.getApplicationAttemptReport(attemptId1)).thenThrow( new ApplicationNotFoundException("History file for application" + applicationId + " is not found")); int exitCode = cli.run(new String[] { "applicationattempt", "-status", attemptId1.toString() }); verify(sysOut).println( "Application for AppAttempt with id '" + attemptId1 + "' doesn't exist in RM or Timeline Server."); Assert.assertNotSame("should return non-zero exit code.", 0, exitCode); ApplicationAttemptId attemptId2 = ApplicationAttemptId.newInstance( applicationId, 2); when(client.getApplicationAttemptReport(attemptId2)).thenThrow( new ApplicationAttemptNotFoundException( "History file for application attempt" + attemptId2 + " is not found")); exitCode = cli.run(new String[] { "applicationattempt", "-status", attemptId2.toString() }); verify(sysOut).println( "Application Attempt with id '" + attemptId2 + "' doesn't exist in RM or Timeline Server."); Assert.assertNotSame("should return non-zero exit code.", 0, exitCode); } @Test public void testGetContainerReportException() throws Exception { ApplicationCLI cli = createAndGetAppCLI(); ApplicationId applicationId = ApplicationId.newInstance(1234, 5); ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance( applicationId, 1); long cntId = 1; ContainerId containerId1 = ContainerId.newContainerId(attemptId, cntId++); when(client.getContainerReport(containerId1)).thenThrow( new ApplicationNotFoundException("History file for application" + applicationId + " is not found")); int exitCode = cli.run(new String[] { "container", "-status", containerId1.toString() }); verify(sysOut).println( "Application for Container with id '" + containerId1 + "' doesn't exist in RM or Timeline Server."); Assert.assertNotSame("should return non-zero exit code.", 0, exitCode); ContainerId containerId2 = ContainerId.newContainerId(attemptId, cntId++); when(client.getContainerReport(containerId2)).thenThrow( new ApplicationAttemptNotFoundException( "History file for application attempt" + attemptId + " is not found")); exitCode = cli.run(new String[] { "container", "-status", containerId2.toString() }); verify(sysOut).println( "Application Attempt for Container with id '" + containerId2 + "' doesn't exist in RM or Timeline Server."); Assert.assertNotSame("should return non-zero exit code.", 0, exitCode); ContainerId containerId3 = ContainerId.newContainerId(attemptId, cntId++); when(client.getContainerReport(containerId3)).thenThrow( new ContainerNotFoundException("History file for container" + containerId3 + " is not found")); exitCode = cli.run(new String[] { "container", "-status", containerId3.toString() }); verify(sysOut).println( "Container with id '" + containerId3 + "' doesn't exist in RM or Timeline Server."); Assert.assertNotSame("should return non-zero exit code.", 0, exitCode); } private void verifyUsageInfo(YarnCLI cli) throws Exception { cli.setSysErrPrintStream(sysErr); cli.run(new String[] { "application" }); verify(sysErr).println("Invalid Command Usage : "); } private List<NodeReport> getNodeReports(int noOfNodes, NodeState state) { return getNodeReports(noOfNodes, state, true); } private List<NodeReport> getNodeReports(int noOfNodes, NodeState state, boolean emptyNodeLabel) { List<NodeReport> nodeReports = new ArrayList<NodeReport>(); for (int i = 0; i < noOfNodes; i++) { Set<String> nodeLabels = null; if (!emptyNodeLabel) { // node labels is not ordered, but when we output it, it should be // ordered nodeLabels = ImmutableSet.of("c", "b", "a", "x", "z", "y"); } NodeReport nodeReport = NodeReport.newInstance(NodeId .newInstance("host" + i, 0), state, "host" + 1 + ":8888", "rack1", Records.newRecord(Resource.class), Records .newRecord(Resource.class), 0, "", 0, nodeLabels); nodeReports.add(nodeReport); } return nodeReports; } private ApplicationCLI createAndGetAppCLI() { ApplicationCLI cli = new ApplicationCLI(); cli.setClient(client); cli.setSysOutPrintStream(sysOut); return cli; } private QueueCLI createAndGetQueueCLI() { QueueCLI cli = new QueueCLI(); cli.setClient(client); cli.setSysOutPrintStream(sysOut); cli.setSysErrPrintStream(sysErr); return cli; } private String createApplicationCLIHelpMessage() throws IOException { ByteArrayOutputStream baos = new ByteArrayOutputStream(); PrintWriter pw = new PrintWriter(baos); pw.println("usage: application"); pw.println(" -appStates <States> Works with -list to filter applications"); pw.println(" based on input comma-separated list of"); pw.println(" application states. The valid application"); pw.println(" state can be one of the following:"); pw.println(" ALL,NEW,NEW_SAVING,SUBMITTED,ACCEPTED,RUN"); pw.println(" NING,FINISHED,FAILED,KILLED"); pw.println(" -appTypes <Types> Works with -list to filter applications"); pw.println(" based on input comma-separated list of"); pw.println(" application types."); pw.println(" -help Displays help for all commands."); pw.println(" -kill <Application ID> Kills the application."); pw.println(" -list List applications. Supports optional use"); pw.println(" of -appTypes to filter applications based"); pw.println(" on application type, and -appStates to"); pw.println(" filter applications based on application"); pw.println(" state."); pw.println(" -movetoqueue <Application ID> Moves the application to a different"); pw.println(" queue."); pw.println(" -queue <Queue Name> Works with the movetoqueue command to"); pw.println(" specify which queue to move an"); pw.println(" application to."); pw.println(" -status <Application ID> Prints the status of the application."); pw.close(); String appsHelpStr = baos.toString("UTF-8"); return appsHelpStr; } private String createApplicationAttemptCLIHelpMessage() throws IOException { ByteArrayOutputStream baos = new ByteArrayOutputStream(); PrintWriter pw = new PrintWriter(baos); pw.println("usage: applicationattempt"); pw.println(" -help Displays help for all commands."); pw.println(" -list <Application ID> List application attempts for"); pw.println(" aplication."); pw.println(" -status <Application Attempt ID> Prints the status of the application"); pw.println(" attempt."); pw.close(); String appsHelpStr = baos.toString("UTF-8"); return appsHelpStr; } private String createContainerCLIHelpMessage() throws IOException { ByteArrayOutputStream baos = new ByteArrayOutputStream(); PrintWriter pw = new PrintWriter(baos); pw.println("usage: container"); pw.println(" -help Displays help for all commands."); pw.println(" -list <Application Attempt ID> List containers for application attempt."); pw.println(" -status <Container ID> Prints the status of the container."); pw.close(); String appsHelpStr = baos.toString("UTF-8"); return appsHelpStr; } private String createNodeCLIHelpMessage() throws IOException { ByteArrayOutputStream baos = new ByteArrayOutputStream(); PrintWriter pw = new PrintWriter(baos); pw.println("usage: node"); pw.println(" -all Works with -list to list all nodes."); pw.println(" -help Displays help for all commands."); pw.println(" -list List all running nodes. Supports optional use of"); pw.println(" -states to filter nodes based on node state, all -all"); pw.println(" to list all nodes."); pw.println(" -states <States> Works with -list to filter nodes based on input"); pw.println(" comma-separated list of node states."); pw.println(" -status <NodeId> Prints the status report of the node."); pw.close(); String nodesHelpStr = baos.toString("UTF-8"); return nodesHelpStr; } }
70,859
45.283475
96
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestClusterCLI.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.client.cli; import static org.junit.Assert.assertEquals; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import java.io.ByteArrayOutputStream; import java.io.PrintStream; import java.io.PrintWriter; import java.util.ArrayList; import java.util.Arrays; import org.apache.hadoop.yarn.api.records.NodeLabel; import org.apache.hadoop.yarn.client.api.YarnClient; import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager; import org.junit.Before; import org.junit.Test; import com.google.common.collect.ImmutableSet; public class TestClusterCLI { ByteArrayOutputStream sysOutStream; private PrintStream sysOut; ByteArrayOutputStream sysErrStream; private PrintStream sysErr; @Before public void setup() { sysOutStream = new ByteArrayOutputStream(); sysOut = spy(new PrintStream(sysOutStream)); sysErrStream = new ByteArrayOutputStream(); sysErr = spy(new PrintStream(sysErrStream)); System.setOut(sysOut); } @Test public void testGetClusterNodeLabels() throws Exception { YarnClient client = mock(YarnClient.class); when(client.getClusterNodeLabels()).thenReturn( Arrays.asList(NodeLabel.newInstance("label1"), NodeLabel.newInstance("label2"))); ClusterCLI cli = new ClusterCLI(); cli.setClient(client); cli.setSysOutPrintStream(sysOut); cli.setSysErrPrintStream(sysErr); int rc = cli.run(new String[] { ClusterCLI.CMD, "-" + ClusterCLI.LIST_LABELS_CMD }); assertEquals(0, rc); ByteArrayOutputStream baos = new ByteArrayOutputStream(); PrintWriter pw = new PrintWriter(baos); pw.print("Node Labels: <label1:exclusivity=true>,<label2:exclusivity=true>"); pw.close(); verify(sysOut).println(baos.toString("UTF-8")); } @Test public void testGetClusterNodeLabelsWithLocalAccess() throws Exception { YarnClient client = mock(YarnClient.class); when(client.getClusterNodeLabels()).thenReturn( Arrays.asList(NodeLabel.newInstance("remote1"), NodeLabel.newInstance("remote2"))); ClusterCLI cli = new ClusterCLI(); cli.setClient(client); cli.setSysOutPrintStream(sysOut); cli.setSysErrPrintStream(sysErr); ClusterCLI.localNodeLabelsManager = mock(CommonNodeLabelsManager.class); when(ClusterCLI.localNodeLabelsManager.getClusterNodeLabels()).thenReturn( Arrays.asList(NodeLabel.newInstance("local1"), NodeLabel.newInstance("local2"))); int rc = cli.run(new String[] { ClusterCLI.CMD, "-" + ClusterCLI.LIST_LABELS_CMD, "-" + ClusterCLI.DIRECTLY_ACCESS_NODE_LABEL_STORE }); assertEquals(0, rc); ByteArrayOutputStream baos = new ByteArrayOutputStream(); PrintWriter pw = new PrintWriter(baos); // it should return local* instead of remote* pw.print("Node Labels: <local1:exclusivity=true>,<local2:exclusivity=true>"); pw.close(); verify(sysOut).println(baos.toString("UTF-8")); } @Test public void testGetEmptyClusterNodeLabels() throws Exception { YarnClient client = mock(YarnClient.class); when(client.getClusterNodeLabels()).thenReturn(new ArrayList<NodeLabel>()); ClusterCLI cli = new ClusterCLI(); cli.setClient(client); cli.setSysOutPrintStream(sysOut); cli.setSysErrPrintStream(sysErr); int rc = cli.run(new String[] { ClusterCLI.CMD, "-" + ClusterCLI.LIST_LABELS_CMD }); assertEquals(0, rc); ByteArrayOutputStream baos = new ByteArrayOutputStream(); PrintWriter pw = new PrintWriter(baos); pw.print("Node Labels: "); pw.close(); verify(sysOut).println(baos.toString("UTF-8")); } @Test public void testHelp() throws Exception { ClusterCLI cli = new ClusterCLI(); cli.setSysOutPrintStream(sysOut); cli.setSysErrPrintStream(sysErr); int rc = cli.run(new String[] { "cluster", "--help" }); assertEquals(0, rc); ByteArrayOutputStream baos = new ByteArrayOutputStream(); PrintWriter pw = new PrintWriter(baos); pw.println("usage: yarn cluster"); pw.println(" -dnl,--directly-access-node-label-store This is DEPRECATED, will be"); pw.println(" removed in future releases."); pw.println(" Directly access node label"); pw.println(" store, with this option, all"); pw.println(" node label related operations"); pw.println(" will NOT connect RM. Instead,"); pw.println(" they will access/modify stored"); pw.println(" node labels directly. By"); pw.println(" default, it is false (access"); pw.println(" via RM). AND PLEASE NOTE: if"); pw.println(" you configured"); pw.println(" yarn.node-labels.fs-store.root-"); pw.println(" dir to a local directory"); pw.println(" (instead of NFS or HDFS), this"); pw.println(" option will only work when the"); pw.println(" command run on the machine"); pw.println(" where RM is running. Also, this"); pw.println(" option is UNSTABLE, could be"); pw.println(" removed in future releases."); pw.println(" -h,--help Displays help for all commands."); pw.println(" -lnl,--list-node-labels List cluster node-label"); pw.println(" collection"); pw.close(); verify(sysOut).println(baos.toString("UTF-8")); } }
7,041
41.678788
93
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/async/impl/TestNMClientAsync.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.client.api.async.impl; import static org.mockito.Matchers.any; import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import java.io.IOException; import java.nio.ByteBuffer; import java.util.Collections; import java.util.HashSet; import java.util.Map; import java.util.Set; import java.util.concurrent.BrokenBarrierException; import java.util.concurrent.CyclicBarrier; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicIntegerArray; import org.junit.Assert; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.service.ServiceOperations; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.Token; import org.apache.hadoop.yarn.client.api.NMClient; import org.apache.hadoop.yarn.client.api.async.NMClientAsync; import org.apache.hadoop.yarn.client.api.async.impl.NMClientAsyncImpl; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.ipc.RPCUtil; import org.junit.After; import org.junit.Test; public class TestNMClientAsync { private final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null); private NMClientAsyncImpl asyncClient; private NodeId nodeId; private Token containerToken; @After public void teardown() { ServiceOperations.stop(asyncClient); } @Test (timeout = 10000) public void testNMClientAsync() throws Exception { Configuration conf = new Configuration(); conf.setInt(YarnConfiguration.NM_CLIENT_ASYNC_THREAD_POOL_MAX_SIZE, 10); // Threads to run are more than the max size of the thread pool int expectedSuccess = 40; int expectedFailure = 40; asyncClient = new MockNMClientAsync1(expectedSuccess, expectedFailure); asyncClient.init(conf); Assert.assertEquals("The max thread pool size is not correctly set", 10, asyncClient.maxThreadPoolSize); asyncClient.start(); for (int i = 0; i < expectedSuccess + expectedFailure; ++i) { if (i == expectedSuccess) { while (!((TestCallbackHandler1) asyncClient.getCallbackHandler()) .isAllSuccessCallsExecuted()) { Thread.sleep(10); } asyncClient.setClient(mockNMClient(1)); } Container container = mockContainer(i); ContainerLaunchContext clc = recordFactory.newRecordInstance(ContainerLaunchContext.class); asyncClient.startContainerAsync(container, clc); } while (!((TestCallbackHandler1) asyncClient.getCallbackHandler()) .isStartAndQueryFailureCallsExecuted()) { Thread.sleep(10); } asyncClient.setClient(mockNMClient(2)); ((TestCallbackHandler1) asyncClient.getCallbackHandler()).path = false; for (int i = 0; i < expectedFailure; ++i) { Container container = mockContainer( expectedSuccess + expectedFailure + i); ContainerLaunchContext clc = recordFactory.newRecordInstance(ContainerLaunchContext.class); asyncClient.startContainerAsync(container, clc); } while (!((TestCallbackHandler1) asyncClient.getCallbackHandler()) .isStopFailureCallsExecuted()) { Thread.sleep(10); } for (String errorMsg : ((TestCallbackHandler1) asyncClient.getCallbackHandler()) .errorMsgs) { System.out.println(errorMsg); } Assert.assertEquals("Error occurs in CallbackHandler", 0, ((TestCallbackHandler1) asyncClient.getCallbackHandler()) .errorMsgs.size()); for (String errorMsg : ((MockNMClientAsync1) asyncClient).errorMsgs) { System.out.println(errorMsg); } Assert.assertEquals("Error occurs in ContainerEventProcessor", 0, ((MockNMClientAsync1) asyncClient).errorMsgs.size()); // When the callback functions are all executed, the event processor threads // may still not terminate and the containers may still not removed. while (asyncClient.containers.size() > 0) { Thread.sleep(10); } asyncClient.stop(); Assert.assertFalse( "The thread of Container Management Event Dispatcher is still alive", asyncClient.eventDispatcherThread.isAlive()); Assert.assertTrue("The thread pool is not shut down", asyncClient.threadPool.isShutdown()); } private class MockNMClientAsync1 extends NMClientAsyncImpl { private Set<String> errorMsgs = Collections.synchronizedSet(new HashSet<String>()); protected MockNMClientAsync1(int expectedSuccess, int expectedFailure) throws YarnException, IOException { super(MockNMClientAsync1.class.getName(), mockNMClient(0), new TestCallbackHandler1(expectedSuccess, expectedFailure)); } private class MockContainerEventProcessor extends ContainerEventProcessor { public MockContainerEventProcessor(ContainerEvent event) { super(event); } @Override public void run() { try { super.run(); } catch (RuntimeException e) { // If the unexpected throwable comes from error callback functions, it // will break ContainerEventProcessor.run(). Therefore, monitor // the exception here errorMsgs.add("Unexpected throwable from callback functions should" + " be ignored by Container " + event.getContainerId()); } } } @Override protected ContainerEventProcessor getContainerEventProcessor( ContainerEvent event) { return new MockContainerEventProcessor(event); } } private class TestCallbackHandler1 implements NMClientAsync.CallbackHandler { private boolean path = true; private int expectedSuccess; private int expectedFailure; private AtomicInteger actualStartSuccess = new AtomicInteger(0); private AtomicInteger actualStartFailure = new AtomicInteger(0); private AtomicInteger actualQuerySuccess = new AtomicInteger(0); private AtomicInteger actualQueryFailure = new AtomicInteger(0); private AtomicInteger actualStopSuccess = new AtomicInteger(0); private AtomicInteger actualStopFailure = new AtomicInteger(0); private AtomicIntegerArray actualStartSuccessArray; private AtomicIntegerArray actualStartFailureArray; private AtomicIntegerArray actualQuerySuccessArray; private AtomicIntegerArray actualQueryFailureArray; private AtomicIntegerArray actualStopSuccessArray; private AtomicIntegerArray actualStopFailureArray; private Set<String> errorMsgs = Collections.synchronizedSet(new HashSet<String>()); public TestCallbackHandler1(int expectedSuccess, int expectedFailure) { this.expectedSuccess = expectedSuccess; this.expectedFailure = expectedFailure; actualStartSuccessArray = new AtomicIntegerArray(expectedSuccess); actualStartFailureArray = new AtomicIntegerArray(expectedFailure); actualQuerySuccessArray = new AtomicIntegerArray(expectedSuccess); actualQueryFailureArray = new AtomicIntegerArray(expectedFailure); actualStopSuccessArray = new AtomicIntegerArray(expectedSuccess); actualStopFailureArray = new AtomicIntegerArray(expectedFailure); } @SuppressWarnings("deprecation") @Override public void onContainerStarted(ContainerId containerId, Map<String, ByteBuffer> allServiceResponse) { if (path) { if (containerId.getId() >= expectedSuccess) { errorMsgs.add("Container " + containerId + " should throw the exception onContainerStarted"); return; } actualStartSuccess.addAndGet(1); actualStartSuccessArray.set(containerId.getId(), 1); // move on to the following success tests asyncClient.getContainerStatusAsync(containerId, nodeId); } else { // move on to the following failure tests asyncClient.stopContainerAsync(containerId, nodeId); } // Shouldn't crash the test thread throw new RuntimeException("Ignorable Exception"); } @SuppressWarnings("deprecation") @Override public void onContainerStatusReceived(ContainerId containerId, ContainerStatus containerStatus) { if (containerId.getId() >= expectedSuccess) { errorMsgs.add("Container " + containerId + " should throw the exception onContainerStatusReceived"); return; } actualQuerySuccess.addAndGet(1); actualQuerySuccessArray.set(containerId.getId(), 1); // move on to the following success tests asyncClient.stopContainerAsync(containerId, nodeId); // Shouldn't crash the test thread throw new RuntimeException("Ignorable Exception"); } @SuppressWarnings("deprecation") @Override public void onContainerStopped(ContainerId containerId) { if (containerId.getId() >= expectedSuccess) { errorMsgs.add("Container " + containerId + " should throw the exception onContainerStopped"); return; } actualStopSuccess.addAndGet(1); actualStopSuccessArray.set(containerId.getId(), 1); // Shouldn't crash the test thread throw new RuntimeException("Ignorable Exception"); } @SuppressWarnings("deprecation") @Override public void onStartContainerError(ContainerId containerId, Throwable t) { // If the unexpected throwable comes from success callback functions, it // will be handled by the error callback functions. Therefore, monitor // the exception here if (t instanceof RuntimeException) { errorMsgs.add("Unexpected throwable from callback functions should be" + " ignored by Container " + containerId); } if (containerId.getId() < expectedSuccess) { errorMsgs.add("Container " + containerId + " shouldn't throw the exception onStartContainerError"); return; } actualStartFailure.addAndGet(1); actualStartFailureArray.set(containerId.getId() - expectedSuccess, 1); // move on to the following failure tests asyncClient.getContainerStatusAsync(containerId, nodeId); // Shouldn't crash the test thread throw new RuntimeException("Ignorable Exception"); } @SuppressWarnings("deprecation") @Override public void onStopContainerError(ContainerId containerId, Throwable t) { if (t instanceof RuntimeException) { errorMsgs.add("Unexpected throwable from callback functions should be" + " ignored by Container " + containerId); } if (containerId.getId() < expectedSuccess + expectedFailure) { errorMsgs.add("Container " + containerId + " shouldn't throw the exception onStopContainerError"); return; } actualStopFailure.addAndGet(1); actualStopFailureArray.set( containerId.getId() - expectedSuccess - expectedFailure, 1); // Shouldn't crash the test thread throw new RuntimeException("Ignorable Exception"); } @SuppressWarnings("deprecation") @Override public void onGetContainerStatusError(ContainerId containerId, Throwable t) { if (t instanceof RuntimeException) { errorMsgs.add("Unexpected throwable from callback functions should be" + " ignored by Container " + containerId); } if (containerId.getId() < expectedSuccess) { errorMsgs.add("Container " + containerId + " shouldn't throw the exception onGetContainerStatusError"); return; } actualQueryFailure.addAndGet(1); actualQueryFailureArray.set(containerId.getId() - expectedSuccess, 1); // Shouldn't crash the test thread throw new RuntimeException("Ignorable Exception"); } public boolean isAllSuccessCallsExecuted() { boolean isAllSuccessCallsExecuted = actualStartSuccess.get() == expectedSuccess && actualQuerySuccess.get() == expectedSuccess && actualStopSuccess.get() == expectedSuccess; if (isAllSuccessCallsExecuted) { assertAtomicIntegerArray(actualStartSuccessArray); assertAtomicIntegerArray(actualQuerySuccessArray); assertAtomicIntegerArray(actualStopSuccessArray); } return isAllSuccessCallsExecuted; } public boolean isStartAndQueryFailureCallsExecuted() { boolean isStartAndQueryFailureCallsExecuted = actualStartFailure.get() == expectedFailure && actualQueryFailure.get() == expectedFailure; if (isStartAndQueryFailureCallsExecuted) { assertAtomicIntegerArray(actualStartFailureArray); assertAtomicIntegerArray(actualQueryFailureArray); } return isStartAndQueryFailureCallsExecuted; } public boolean isStopFailureCallsExecuted() { boolean isStopFailureCallsExecuted = actualStopFailure.get() == expectedFailure; if (isStopFailureCallsExecuted) { assertAtomicIntegerArray(actualStopFailureArray); } return isStopFailureCallsExecuted; } private void assertAtomicIntegerArray(AtomicIntegerArray array) { for (int i = 0; i < array.length(); ++i) { Assert.assertEquals(1, array.get(i)); } } } private NMClient mockNMClient(int mode) throws YarnException, IOException { NMClient client = mock(NMClient.class); switch (mode) { case 0: when(client.startContainer(any(Container.class), any(ContainerLaunchContext.class))).thenReturn( Collections.<String, ByteBuffer>emptyMap()); when(client.getContainerStatus(any(ContainerId.class), any(NodeId.class))).thenReturn( recordFactory.newRecordInstance(ContainerStatus.class)); doNothing().when(client).stopContainer(any(ContainerId.class), any(NodeId.class)); break; case 1: doThrow(RPCUtil.getRemoteException("Start Exception")).when(client) .startContainer(any(Container.class), any(ContainerLaunchContext.class)); doThrow(RPCUtil.getRemoteException("Query Exception")).when(client) .getContainerStatus(any(ContainerId.class), any(NodeId.class)); doThrow(RPCUtil.getRemoteException("Stop Exception")).when(client) .stopContainer(any(ContainerId.class), any(NodeId.class)); break; case 2: when(client.startContainer(any(Container.class), any(ContainerLaunchContext.class))).thenReturn( Collections.<String, ByteBuffer>emptyMap()); when(client.getContainerStatus(any(ContainerId.class), any(NodeId.class))).thenReturn( recordFactory.newRecordInstance(ContainerStatus.class)); doThrow(RPCUtil.getRemoteException("Stop Exception")).when(client) .stopContainer(any(ContainerId.class), any(NodeId.class)); } return client; } @Test (timeout = 10000) public void testOutOfOrder() throws Exception { CyclicBarrier barrierA = new CyclicBarrier(2); CyclicBarrier barrierB = new CyclicBarrier(2); CyclicBarrier barrierC = new CyclicBarrier(2); asyncClient = new MockNMClientAsync2(barrierA, barrierB, barrierC); asyncClient.init(new Configuration()); asyncClient.start(); final Container container = mockContainer(1); final ContainerLaunchContext clc = recordFactory.newRecordInstance(ContainerLaunchContext.class); // start container from another thread Thread t = new Thread() { @Override public void run() { asyncClient.startContainerAsync(container, clc); } }; t.start(); barrierA.await(); asyncClient.stopContainerAsync(container.getId(), container.getNodeId()); barrierC.await(); Assert.assertFalse("Starting and stopping should be out of order", ((TestCallbackHandler2) asyncClient.getCallbackHandler()) .exceptionOccurred.get()); } private class MockNMClientAsync2 extends NMClientAsyncImpl { private CyclicBarrier barrierA; private CyclicBarrier barrierB; protected MockNMClientAsync2(CyclicBarrier barrierA, CyclicBarrier barrierB, CyclicBarrier barrierC) throws YarnException, IOException { super(MockNMClientAsync2.class.getName(), mockNMClient(0), new TestCallbackHandler2(barrierC)); this.barrierA = barrierA; this.barrierB = barrierB; } private class MockContainerEventProcessor extends ContainerEventProcessor { public MockContainerEventProcessor(ContainerEvent event) { super(event); } @Override public void run() { try { if (event.getType() == ContainerEventType.START_CONTAINER) { barrierA.await(); barrierB.await(); } super.run(); if (event.getType() == ContainerEventType.STOP_CONTAINER) { barrierB.await(); } } catch (InterruptedException e) { e.printStackTrace(); } catch (BrokenBarrierException e) { e.printStackTrace(); } } } @Override protected ContainerEventProcessor getContainerEventProcessor( ContainerEvent event) { return new MockContainerEventProcessor(event); } } private class TestCallbackHandler2 implements NMClientAsync.CallbackHandler { private CyclicBarrier barrierC; private AtomicBoolean exceptionOccurred = new AtomicBoolean(false); public TestCallbackHandler2(CyclicBarrier barrierC) { this.barrierC = barrierC; } @Override public void onContainerStarted(ContainerId containerId, Map<String, ByteBuffer> allServiceResponse) { } @Override public void onContainerStatusReceived(ContainerId containerId, ContainerStatus containerStatus) { } @Override public void onContainerStopped(ContainerId containerId) { } @Override public void onStartContainerError(ContainerId containerId, Throwable t) { if (!t.getMessage().equals(NMClientAsyncImpl.StatefulContainer .OutOfOrderTransition.STOP_BEFORE_START_ERROR_MSG)) { exceptionOccurred.set(true); return; } try { barrierC.await(); } catch (InterruptedException e) { e.printStackTrace(); } catch (BrokenBarrierException e) { e.printStackTrace(); } } @Override public void onGetContainerStatusError(ContainerId containerId, Throwable t) { } @Override public void onStopContainerError(ContainerId containerId, Throwable t) { } } private Container mockContainer(int i) { ApplicationId appId = ApplicationId.newInstance(System.currentTimeMillis(), 1); ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 1); ContainerId containerId = ContainerId.newContainerId(attemptId, i); nodeId = NodeId.newInstance("localhost", 0); // Create an empty record containerToken = recordFactory.newRecordInstance(Token.class); return Container.newInstance(containerId, nodeId, null, null, null, containerToken); } }
20,670
36.044803
80
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/async/impl/TestAMRMClientAsync.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.client.api.async.impl; import static org.mockito.Matchers.any; import static org.mockito.Matchers.anyFloat; import static org.mockito.Matchers.anyInt; import static org.mockito.Matchers.anyString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerState; import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.NMToken; import org.apache.hadoop.yarn.api.records.NodeReport; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.client.api.AMRMClient; import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest; import org.apache.hadoop.yarn.client.api.async.AMRMClientAsync; import org.apache.hadoop.yarn.client.api.impl.AMRMClientImpl; import org.apache.hadoop.yarn.exceptions.ApplicationAttemptNotFoundException; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.junit.Assert; import org.junit.Test; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; import com.google.common.base.Supplier; public class TestAMRMClientAsync { private static final Log LOG = LogFactory.getLog(TestAMRMClientAsync.class); @SuppressWarnings("unchecked") @Test(timeout=10000) public void testAMRMClientAsync() throws Exception { Configuration conf = new Configuration(); final AtomicBoolean heartbeatBlock = new AtomicBoolean(true); List<ContainerStatus> completed1 = Arrays.asList( ContainerStatus.newInstance(newContainerId(0, 0, 0, 0), ContainerState.COMPLETE, "", 0)); List<Container> allocated1 = Arrays.asList( Container.newInstance(null, null, null, null, null, null)); final AllocateResponse response1 = createAllocateResponse( new ArrayList<ContainerStatus>(), allocated1, null); final AllocateResponse response2 = createAllocateResponse(completed1, new ArrayList<Container>(), null); final AllocateResponse emptyResponse = createAllocateResponse( new ArrayList<ContainerStatus>(), new ArrayList<Container>(), null); TestCallbackHandler callbackHandler = new TestCallbackHandler(); final AMRMClient<ContainerRequest> client = mock(AMRMClientImpl.class); final AtomicInteger secondHeartbeatSync = new AtomicInteger(0); when(client.allocate(anyFloat())).thenReturn(response1).thenAnswer(new Answer<AllocateResponse>() { @Override public AllocateResponse answer(InvocationOnMock invocation) throws Throwable { secondHeartbeatSync.incrementAndGet(); while(heartbeatBlock.get()) { synchronized(heartbeatBlock) { heartbeatBlock.wait(); } } secondHeartbeatSync.incrementAndGet(); return response2; } }).thenReturn(emptyResponse); when(client.registerApplicationMaster(anyString(), anyInt(), anyString())) .thenReturn(null); when(client.getAvailableResources()).thenAnswer(new Answer<Resource>() { @Override public Resource answer(InvocationOnMock invocation) throws Throwable { // take client lock to simulate behavior of real impl synchronized (client) { Thread.sleep(10); } return null; } }); AMRMClientAsync<ContainerRequest> asyncClient = AMRMClientAsync.createAMRMClientAsync(client, 20, callbackHandler); asyncClient.init(conf); asyncClient.start(); asyncClient.registerApplicationMaster("localhost", 1234, null); // while the CallbackHandler will still only be processing the first response, // heartbeater thread should still be sending heartbeats. // To test this, wait for the second heartbeat to be received. while (secondHeartbeatSync.get() < 1) { Thread.sleep(10); } // heartbeat will be blocked. make sure we can call client methods at this // time. Checks that heartbeat is not holding onto client lock assert(secondHeartbeatSync.get() < 2); asyncClient.getAvailableResources(); // method returned. now unblock heartbeat assert(secondHeartbeatSync.get() < 2); synchronized (heartbeatBlock) { heartbeatBlock.set(false); heartbeatBlock.notifyAll(); } // allocated containers should come before completed containers Assert.assertEquals(null, callbackHandler.takeCompletedContainers()); // wait for the allocated containers from the first heartbeat's response while (callbackHandler.takeAllocatedContainers() == null) { Assert.assertEquals(null, callbackHandler.takeCompletedContainers()); Thread.sleep(10); } // wait for the completed containers from the second heartbeat's response while (callbackHandler.takeCompletedContainers() == null) { Thread.sleep(10); } asyncClient.stop(); Assert.assertEquals(null, callbackHandler.takeAllocatedContainers()); Assert.assertEquals(null, callbackHandler.takeCompletedContainers()); } @Test(timeout=10000) public void testAMRMClientAsyncException() throws Exception { String exStr = "TestException"; YarnException mockException = mock(YarnException.class); when(mockException.getMessage()).thenReturn(exStr); runHeartBeatThrowOutException(mockException); } @Test(timeout=10000) public void testAMRMClientAsyncRunTimeException() throws Exception { String exStr = "TestRunTimeException"; RuntimeException mockRunTimeException = mock(RuntimeException.class); when(mockRunTimeException.getMessage()).thenReturn(exStr); runHeartBeatThrowOutException(mockRunTimeException); } private void runHeartBeatThrowOutException(Exception ex) throws Exception{ Configuration conf = new Configuration(); TestCallbackHandler callbackHandler = new TestCallbackHandler(); @SuppressWarnings("unchecked") AMRMClient<ContainerRequest> client = mock(AMRMClientImpl.class); when(client.allocate(anyFloat())).thenThrow(ex); AMRMClientAsync<ContainerRequest> asyncClient = AMRMClientAsync.createAMRMClientAsync(client, 20, callbackHandler); asyncClient.init(conf); asyncClient.start(); synchronized (callbackHandler.notifier) { asyncClient.registerApplicationMaster("localhost", 1234, null); while(callbackHandler.savedException == null) { try { callbackHandler.notifier.wait(); } catch (InterruptedException e) { e.printStackTrace(); } } } Assert.assertTrue(callbackHandler.savedException.getMessage().contains( ex.getMessage())); asyncClient.stop(); // stopping should have joined all threads and completed all callbacks Assert.assertTrue(callbackHandler.callbackCount == 0); } @Test (timeout = 10000) public void testAMRMClientAsyncShutDown() throws Exception { Configuration conf = new Configuration(); TestCallbackHandler callbackHandler = new TestCallbackHandler(); @SuppressWarnings("unchecked") AMRMClient<ContainerRequest> client = mock(AMRMClientImpl.class); createAllocateResponse(new ArrayList<ContainerStatus>(), new ArrayList<Container>(), null); when(client.allocate(anyFloat())).thenThrow( new ApplicationAttemptNotFoundException("app not found, shut down")); AMRMClientAsync<ContainerRequest> asyncClient = AMRMClientAsync.createAMRMClientAsync(client, 10, callbackHandler); asyncClient.init(conf); asyncClient.start(); asyncClient.registerApplicationMaster("localhost", 1234, null); Thread.sleep(50); verify(client, times(1)).allocate(anyFloat()); asyncClient.stop(); } @Test (timeout = 10000) public void testAMRMClientAsyncShutDownWithWaitFor() throws Exception { Configuration conf = new Configuration(); final TestCallbackHandler callbackHandler = new TestCallbackHandler(); @SuppressWarnings("unchecked") AMRMClient<ContainerRequest> client = mock(AMRMClientImpl.class); when(client.allocate(anyFloat())).thenThrow( new ApplicationAttemptNotFoundException("app not found, shut down")); AMRMClientAsync<ContainerRequest> asyncClient = AMRMClientAsync.createAMRMClientAsync(client, 10, callbackHandler); asyncClient.init(conf); asyncClient.start(); Supplier<Boolean> checker = new Supplier<Boolean>() { @Override public Boolean get() { return callbackHandler.reboot; } }; asyncClient.registerApplicationMaster("localhost", 1234, null); asyncClient.waitFor(checker); asyncClient.stop(); // stopping should have joined all threads and completed all callbacks Assert.assertTrue(callbackHandler.callbackCount == 0); verify(client, times(1)).allocate(anyFloat()); asyncClient.stop(); } @Test (timeout = 5000) public void testCallAMRMClientAsyncStopFromCallbackHandler() throws YarnException, IOException, InterruptedException { Configuration conf = new Configuration(); TestCallbackHandler2 callbackHandler = new TestCallbackHandler2(); @SuppressWarnings("unchecked") AMRMClient<ContainerRequest> client = mock(AMRMClientImpl.class); List<ContainerStatus> completed = Arrays.asList( ContainerStatus.newInstance(newContainerId(0, 0, 0, 0), ContainerState.COMPLETE, "", 0)); final AllocateResponse response = createAllocateResponse(completed, new ArrayList<Container>(), null); when(client.allocate(anyFloat())).thenReturn(response); AMRMClientAsync<ContainerRequest> asyncClient = AMRMClientAsync.createAMRMClientAsync(client, 20, callbackHandler); callbackHandler.asynClient = asyncClient; asyncClient.init(conf); asyncClient.start(); synchronized (callbackHandler.notifier) { asyncClient.registerApplicationMaster("localhost", 1234, null); while(callbackHandler.notify == false) { try { callbackHandler.notifier.wait(); } catch (InterruptedException e) { e.printStackTrace(); } } } } @Test (timeout = 5000) public void testCallAMRMClientAsyncStopFromCallbackHandlerWithWaitFor() throws YarnException, IOException, InterruptedException { Configuration conf = new Configuration(); final TestCallbackHandler2 callbackHandler = new TestCallbackHandler2(); @SuppressWarnings("unchecked") AMRMClient<ContainerRequest> client = mock(AMRMClientImpl.class); List<ContainerStatus> completed = Arrays.asList( ContainerStatus.newInstance(newContainerId(0, 0, 0, 0), ContainerState.COMPLETE, "", 0)); final AllocateResponse response = createAllocateResponse(completed, new ArrayList<Container>(), null); when(client.allocate(anyFloat())).thenReturn(response); AMRMClientAsync<ContainerRequest> asyncClient = AMRMClientAsync.createAMRMClientAsync(client, 20, callbackHandler); callbackHandler.asynClient = asyncClient; asyncClient.init(conf); asyncClient.start(); Supplier<Boolean> checker = new Supplier<Boolean>() { @Override public Boolean get() { return callbackHandler.notify; } }; asyncClient.registerApplicationMaster("localhost", 1234, null); asyncClient.waitFor(checker); Assert.assertTrue(checker.get()); } void runCallBackThrowOutException(TestCallbackHandler2 callbackHandler) throws InterruptedException, YarnException, IOException { Configuration conf = new Configuration(); @SuppressWarnings("unchecked") AMRMClient<ContainerRequest> client = mock(AMRMClientImpl.class); List<ContainerStatus> completed = Arrays.asList( ContainerStatus.newInstance(newContainerId(0, 0, 0, 0), ContainerState.COMPLETE, "", 0)); final AllocateResponse response = createAllocateResponse(completed, new ArrayList<Container>(), null); when(client.allocate(anyFloat())).thenReturn(response); AMRMClientAsync<ContainerRequest> asyncClient = AMRMClientAsync.createAMRMClientAsync(client, 20, callbackHandler); callbackHandler.asynClient = asyncClient; callbackHandler.throwOutException = true; asyncClient.init(conf); asyncClient.start(); // call register and wait for error callback and stop synchronized (callbackHandler.notifier) { asyncClient.registerApplicationMaster("localhost", 1234, null); while(callbackHandler.notify == false) { try { callbackHandler.notifier.wait(); } catch (InterruptedException e) { e.printStackTrace(); } } } // verify error invoked verify(callbackHandler, times(0)).getProgress(); verify(callbackHandler, times(1)).onError(any(Exception.class)); // sleep to wait for a few heartbeat calls that can trigger callbacks Thread.sleep(50); // verify no more invocations after the first one. // ie. callback thread has stopped verify(callbackHandler, times(0)).getProgress(); verify(callbackHandler, times(1)).onError(any(Exception.class)); } @Test (timeout = 5000) public void testCallBackThrowOutException() throws YarnException, IOException, InterruptedException { // test exception in callback with app calling stop() on app.onError() TestCallbackHandler2 callbackHandler = spy(new TestCallbackHandler2()); runCallBackThrowOutException(callbackHandler); } @Test (timeout = 5000) public void testCallBackThrowOutExceptionNoStop() throws YarnException, IOException, InterruptedException { // test exception in callback with app not calling stop() on app.onError() TestCallbackHandler2 callbackHandler = spy(new TestCallbackHandler2()); callbackHandler.stop = false; runCallBackThrowOutException(callbackHandler); } private AllocateResponse createAllocateResponse( List<ContainerStatus> completed, List<Container> allocated, List<NMToken> nmTokens) { AllocateResponse response = AllocateResponse.newInstance(0, completed, allocated, new ArrayList<NodeReport>(), null, null, 1, null, nmTokens); return response; } public static ContainerId newContainerId(int appId, int appAttemptId, long timestamp, int containerId) { ApplicationId applicationId = ApplicationId.newInstance(timestamp, appId); ApplicationAttemptId applicationAttemptId = ApplicationAttemptId.newInstance(applicationId, appAttemptId); return ContainerId.newContainerId(applicationAttemptId, containerId); } private class TestCallbackHandler implements AMRMClientAsync.CallbackHandler { private volatile List<ContainerStatus> completedContainers; private volatile List<Container> allocatedContainers; Exception savedException = null; volatile boolean reboot = false; Object notifier = new Object(); int callbackCount = 0; public List<ContainerStatus> takeCompletedContainers() { List<ContainerStatus> ret = completedContainers; if (ret == null) { return null; } completedContainers = null; synchronized (ret) { ret.notify(); } return ret; } public List<Container> takeAllocatedContainers() { List<Container> ret = allocatedContainers; if (ret == null) { return null; } allocatedContainers = null; synchronized (ret) { ret.notify(); } return ret; } @Override public void onContainersCompleted(List<ContainerStatus> statuses) { completedContainers = statuses; // wait for containers to be taken before returning synchronized (completedContainers) { while (completedContainers != null) { try { completedContainers.wait(); } catch (InterruptedException ex) { LOG.error("Interrupted during wait", ex); } } } } @Override public void onContainersAllocated(List<Container> containers) { allocatedContainers = containers; // wait for containers to be taken before returning synchronized (allocatedContainers) { while (allocatedContainers != null) { try { allocatedContainers.wait(); } catch (InterruptedException ex) { LOG.error("Interrupted during wait", ex); } } } } @Override public void onShutdownRequest() { reboot = true; synchronized (notifier) { notifier.notifyAll(); } } @Override public void onNodesUpdated(List<NodeReport> updatedNodes) {} @Override public float getProgress() { callbackCount++; return 0.5f; } @Override public void onError(Throwable e) { savedException = new Exception(e.getMessage()); synchronized (notifier) { notifier.notifyAll(); } } } private class TestCallbackHandler2 implements AMRMClientAsync.CallbackHandler { Object notifier = new Object(); @SuppressWarnings("rawtypes") AMRMClientAsync asynClient; boolean stop = true; volatile boolean notify = false; boolean throwOutException = false; @Override public void onContainersCompleted(List<ContainerStatus> statuses) { if (throwOutException) { throw new YarnRuntimeException("Exception from callback handler"); } } @Override public void onContainersAllocated(List<Container> containers) {} @Override public void onShutdownRequest() {} @Override public void onNodesUpdated(List<NodeReport> updatedNodes) {} @Override public float getProgress() { callStopAndNotify(); return 0; } @Override public void onError(Throwable e) { Assert.assertEquals(e.getMessage(), "Exception from callback handler"); callStopAndNotify(); } void callStopAndNotify() { if(stop) { asynClient.stop(); } notify = true; synchronized (notifier) { notifier.notifyAll(); } } } }
19,756
35.318015
103
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAHSClient.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.client.api.impl; import static org.mockito.Matchers.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import org.junit.Assert; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.ApplicationHistoryProtocol; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetContainersRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetContainersResponse; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationReport; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerReport; import org.apache.hadoop.yarn.api.records.ContainerState; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState; import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.client.api.AHSClient; import org.apache.hadoop.yarn.exceptions.YarnException; import org.junit.Test; public class TestAHSClient { @Test public void testClientStop() { Configuration conf = new Configuration(); AHSClient client = AHSClient.createAHSClient(); client.init(conf); client.start(); client.stop(); } @Test(timeout = 10000) public void testGetApplications() throws YarnException, IOException { Configuration conf = new Configuration(); final AHSClient client = new MockAHSClient(); client.init(conf); client.start(); List<ApplicationReport> expectedReports = ((MockAHSClient) client).getReports(); List<ApplicationReport> reports = client.getApplications(); Assert.assertEquals(reports, expectedReports); reports = client.getApplications(); Assert.assertEquals(reports.size(), 4); client.stop(); } @Test(timeout = 10000) public void testGetApplicationReport() throws YarnException, IOException { Configuration conf = new Configuration(); final AHSClient client = new MockAHSClient(); client.init(conf); client.start(); List<ApplicationReport> expectedReports = ((MockAHSClient) client).getReports(); ApplicationId applicationId = ApplicationId.newInstance(1234, 5); ApplicationReport report = client.getApplicationReport(applicationId); Assert.assertEquals(report, expectedReports.get(0)); Assert.assertEquals(report.getApplicationId().toString(), expectedReports .get(0).getApplicationId().toString()); client.stop(); } @Test(timeout = 10000) public void testGetApplicationAttempts() throws YarnException, IOException { Configuration conf = new Configuration(); final AHSClient client = new MockAHSClient(); client.init(conf); client.start(); ApplicationId applicationId = ApplicationId.newInstance(1234, 5); List<ApplicationAttemptReport> reports = client.getApplicationAttempts(applicationId); Assert.assertNotNull(reports); Assert.assertEquals(reports.get(0).getApplicationAttemptId(), ApplicationAttemptId.newInstance(applicationId, 1)); Assert.assertEquals(reports.get(1).getApplicationAttemptId(), ApplicationAttemptId.newInstance(applicationId, 2)); client.stop(); } @Test(timeout = 10000) public void testGetApplicationAttempt() throws YarnException, IOException { Configuration conf = new Configuration(); final AHSClient client = new MockAHSClient(); client.init(conf); client.start(); List<ApplicationReport> expectedReports = ((MockAHSClient) client).getReports(); ApplicationId applicationId = ApplicationId.newInstance(1234, 5); ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(applicationId, 1); ApplicationAttemptReport report = client.getApplicationAttemptReport(appAttemptId); Assert.assertNotNull(report); Assert.assertEquals(report.getApplicationAttemptId().toString(), expectedReports.get(0).getCurrentApplicationAttemptId().toString()); client.stop(); } @Test(timeout = 10000) public void testGetContainers() throws YarnException, IOException { Configuration conf = new Configuration(); final AHSClient client = new MockAHSClient(); client.init(conf); client.start(); ApplicationId applicationId = ApplicationId.newInstance(1234, 5); ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(applicationId, 1); List<ContainerReport> reports = client.getContainers(appAttemptId); Assert.assertNotNull(reports); Assert.assertEquals(reports.get(0).getContainerId(), (ContainerId.newContainerId(appAttemptId, 1))); Assert.assertEquals(reports.get(1).getContainerId(), (ContainerId.newContainerId(appAttemptId, 2))); client.stop(); } @Test(timeout = 10000) public void testGetContainerReport() throws YarnException, IOException { Configuration conf = new Configuration(); final AHSClient client = new MockAHSClient(); client.init(conf); client.start(); List<ApplicationReport> expectedReports = ((MockAHSClient) client).getReports(); ApplicationId applicationId = ApplicationId.newInstance(1234, 5); ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(applicationId, 1); ContainerId containerId = ContainerId.newContainerId(appAttemptId, 1); ContainerReport report = client.getContainerReport(containerId); Assert.assertNotNull(report); Assert.assertEquals(report.getContainerId().toString(), (ContainerId .newContainerId(expectedReports.get(0).getCurrentApplicationAttemptId(), 1)) .toString()); client.stop(); } private static class MockAHSClient extends AHSClientImpl { // private ApplicationReport mockReport; private List<ApplicationReport> reports = new ArrayList<ApplicationReport>(); private HashMap<ApplicationId, List<ApplicationAttemptReport>> attempts = new HashMap<ApplicationId, List<ApplicationAttemptReport>>(); private HashMap<ApplicationAttemptId, List<ContainerReport>> containers = new HashMap<ApplicationAttemptId, List<ContainerReport>>(); GetApplicationsResponse mockAppResponse = mock(GetApplicationsResponse.class); GetApplicationReportResponse mockResponse = mock(GetApplicationReportResponse.class); GetApplicationAttemptsResponse mockAppAttemptsResponse = mock(GetApplicationAttemptsResponse.class); GetApplicationAttemptReportResponse mockAttemptResponse = mock(GetApplicationAttemptReportResponse.class); GetContainersResponse mockContainersResponse = mock(GetContainersResponse.class); GetContainerReportResponse mockContainerResponse = mock(GetContainerReportResponse.class); public MockAHSClient() { super(); createAppReports(); } @Override public void start() { ahsClient = mock(ApplicationHistoryProtocol.class); try { when( ahsClient .getApplicationReport(any(GetApplicationReportRequest.class))) .thenReturn(mockResponse); when(ahsClient.getApplications(any(GetApplicationsRequest.class))) .thenReturn(mockAppResponse); when( ahsClient .getApplicationAttemptReport(any(GetApplicationAttemptReportRequest.class))) .thenReturn(mockAttemptResponse); when( ahsClient .getApplicationAttempts(any(GetApplicationAttemptsRequest.class))) .thenReturn(mockAppAttemptsResponse); when(ahsClient.getContainers(any(GetContainersRequest.class))) .thenReturn(mockContainersResponse); when(ahsClient.getContainerReport(any(GetContainerReportRequest.class))) .thenReturn(mockContainerResponse); } catch (YarnException e) { Assert.fail("Exception is not expected."); } catch (IOException e) { Assert.fail("Exception is not expected."); } } @Override public List<ApplicationReport> getApplications() throws YarnException, IOException { when(mockAppResponse.getApplicationList()).thenReturn(reports); return super.getApplications(); } @Override public ApplicationReport getApplicationReport(ApplicationId appId) throws YarnException, IOException { when(mockResponse.getApplicationReport()).thenReturn(getReport(appId)); return super.getApplicationReport(appId); } @Override public List<ApplicationAttemptReport> getApplicationAttempts( ApplicationId appId) throws YarnException, IOException { when(mockAppAttemptsResponse.getApplicationAttemptList()).thenReturn( getAttempts(appId)); return super.getApplicationAttempts(appId); } @Override public ApplicationAttemptReport getApplicationAttemptReport( ApplicationAttemptId appAttemptId) throws YarnException, IOException { when(mockAttemptResponse.getApplicationAttemptReport()).thenReturn( getAttempt(appAttemptId)); return super.getApplicationAttemptReport(appAttemptId); } @Override public List<ContainerReport> getContainers(ApplicationAttemptId appAttemptId) throws YarnException, IOException { when(mockContainersResponse.getContainerList()).thenReturn( getContainersReport(appAttemptId)); return super.getContainers(appAttemptId); } @Override public ContainerReport getContainerReport(ContainerId containerId) throws YarnException, IOException { when(mockContainerResponse.getContainerReport()).thenReturn( getContainer(containerId)); return super.getContainerReport(containerId); } @Override public void stop() { } public ApplicationReport getReport(ApplicationId appId) { for (int i = 0; i < reports.size(); ++i) { if (appId.toString().equalsIgnoreCase( reports.get(i).getApplicationId().toString())) { return reports.get(i); } } return null; } public List<ApplicationAttemptReport> getAttempts(ApplicationId appId) { return attempts.get(appId); } public ApplicationAttemptReport getAttempt(ApplicationAttemptId appAttemptId) { return attempts.get(appAttemptId.getApplicationId()).get(0); } public List<ContainerReport> getContainersReport( ApplicationAttemptId appAttemptId) { return containers.get(appAttemptId); } public ContainerReport getContainer(ContainerId containerId) { return containers.get(containerId.getApplicationAttemptId()).get(0); } public List<ApplicationReport> getReports() { return this.reports; } private void createAppReports() { ApplicationId applicationId = ApplicationId.newInstance(1234, 5); ApplicationReport newApplicationReport = ApplicationReport.newInstance(applicationId, ApplicationAttemptId.newInstance(applicationId, 1), "user", "queue", "appname", "host", 124, null, YarnApplicationState.RUNNING, "diagnostics", "url", 0, 0, FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null); List<ApplicationReport> applicationReports = new ArrayList<ApplicationReport>(); applicationReports.add(newApplicationReport); List<ApplicationAttemptReport> appAttempts = new ArrayList<ApplicationAttemptReport>(); ApplicationAttemptReport attempt = ApplicationAttemptReport.newInstance( ApplicationAttemptId.newInstance(applicationId, 1), "host", 124, "url", "oUrl", "diagnostics", YarnApplicationAttemptState.FINISHED, ContainerId.newContainerId( newApplicationReport.getCurrentApplicationAttemptId(), 1)); appAttempts.add(attempt); ApplicationAttemptReport attempt1 = ApplicationAttemptReport.newInstance( ApplicationAttemptId.newInstance(applicationId, 2), "host", 124, "url", "oUrl", "diagnostics", YarnApplicationAttemptState.FINISHED, ContainerId.newContainerId( newApplicationReport.getCurrentApplicationAttemptId(), 2)); appAttempts.add(attempt1); attempts.put(applicationId, appAttempts); List<ContainerReport> containerReports = new ArrayList<ContainerReport>(); ContainerReport container = ContainerReport.newInstance( ContainerId.newContainerId(attempt.getApplicationAttemptId(), 1), null, NodeId.newInstance("host", 1234), Priority.UNDEFINED, 1234, 5678, "diagnosticInfo", "logURL", 0, ContainerState.COMPLETE, "http://" + NodeId.newInstance("host", 2345).toString()); containerReports.add(container); ContainerReport container1 = ContainerReport.newInstance( ContainerId.newContainerId(attempt.getApplicationAttemptId(), 2), null, NodeId.newInstance("host", 1234), Priority.UNDEFINED, 1234, 5678, "diagnosticInfo", "logURL", 0, ContainerState.COMPLETE, "http://" + NodeId.newInstance("host", 2345).toString()); containerReports.add(container1); containers.put(attempt.getApplicationAttemptId(), containerReports); ApplicationId applicationId2 = ApplicationId.newInstance(1234, 6); ApplicationReport newApplicationReport2 = ApplicationReport.newInstance(applicationId2, ApplicationAttemptId.newInstance(applicationId2, 2), "user2", "queue2", "appname2", "host2", 125, null, YarnApplicationState.FINISHED, "diagnostics2", "url2", 2, 2, FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.63789f, "NON-YARN", null); applicationReports.add(newApplicationReport2); ApplicationId applicationId3 = ApplicationId.newInstance(1234, 7); ApplicationReport newApplicationReport3 = ApplicationReport.newInstance(applicationId3, ApplicationAttemptId.newInstance(applicationId3, 3), "user3", "queue3", "appname3", "host3", 126, null, YarnApplicationState.RUNNING, "diagnostics3", "url3", 3, 3, FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.73789f, "MAPREDUCE", null); applicationReports.add(newApplicationReport3); ApplicationId applicationId4 = ApplicationId.newInstance(1234, 8); ApplicationReport newApplicationReport4 = ApplicationReport.newInstance(applicationId4, ApplicationAttemptId.newInstance(applicationId4, 4), "user4", "queue4", "appname4", "host4", 127, null, YarnApplicationState.FAILED, "diagnostics4", "url4", 4, 4, FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.83789f, "NON-MAPREDUCE", null); applicationReports.add(newApplicationReport4); reports = applicationReports; } } }
17,061
39.62381
88
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientContainerRequest.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.client.api.impl; import static org.junit.Assert.assertEquals; import java.util.Arrays; import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.net.DNSToSwitchMapping; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceRequest; import org.apache.hadoop.yarn.client.api.AMRMClient; import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest; import org.apache.hadoop.yarn.client.api.InvalidContainerRequestException; import org.junit.Test; public class TestAMRMClientContainerRequest { @Test public void testFillInRacks() { AMRMClientImpl<ContainerRequest> client = new AMRMClientImpl<ContainerRequest>(); Configuration conf = new Configuration(); conf.setClass( CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, MyResolver.class, DNSToSwitchMapping.class); client.init(conf); Resource capability = Resource.newInstance(1024, 1); ContainerRequest request = new ContainerRequest(capability, new String[] {"host1", "host2"}, new String[] {"/rack2"}, Priority.newInstance(1)); client.addContainerRequest(request); verifyResourceRequest(client, request, "host1", true); verifyResourceRequest(client, request, "host2", true); verifyResourceRequest(client, request, "/rack1", true); verifyResourceRequest(client, request, "/rack2", true); verifyResourceRequest(client, request, ResourceRequest.ANY, true); } @Test public void testDisableLocalityRelaxation() { AMRMClientImpl<ContainerRequest> client = new AMRMClientImpl<ContainerRequest>(); Configuration conf = new Configuration(); conf.setClass( CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, MyResolver.class, DNSToSwitchMapping.class); client.init(conf); Resource capability = Resource.newInstance(1024, 1); ContainerRequest nodeLevelRequest = new ContainerRequest(capability, new String[] {"host1", "host2"}, null, Priority.newInstance(1), false); client.addContainerRequest(nodeLevelRequest); verifyResourceRequest(client, nodeLevelRequest, ResourceRequest.ANY, false); verifyResourceRequest(client, nodeLevelRequest, "/rack1", false); verifyResourceRequest(client, nodeLevelRequest, "host1", true); verifyResourceRequest(client, nodeLevelRequest, "host2", true); // Make sure we don't get any errors with two node-level requests at the // same priority ContainerRequest nodeLevelRequest2 = new ContainerRequest(capability, new String[] {"host2", "host3"}, null, Priority.newInstance(1), false); client.addContainerRequest(nodeLevelRequest2); AMRMClient.ContainerRequest rackLevelRequest = new AMRMClient.ContainerRequest(capability, null, new String[] {"/rack3", "/rack4"}, Priority.newInstance(2), false); client.addContainerRequest(rackLevelRequest); verifyResourceRequest(client, rackLevelRequest, ResourceRequest.ANY, false); verifyResourceRequest(client, rackLevelRequest, "/rack3", true); verifyResourceRequest(client, rackLevelRequest, "/rack4", true); // Make sure we don't get any errors with two rack-level requests at the // same priority AMRMClient.ContainerRequest rackLevelRequest2 = new AMRMClient.ContainerRequest(capability, null, new String[] {"/rack4", "/rack5"}, Priority.newInstance(2), false); client.addContainerRequest(rackLevelRequest2); ContainerRequest bothLevelRequest = new ContainerRequest(capability, new String[] {"host3", "host4"}, new String[] {"rack1", "/otherrack"}, Priority.newInstance(3), false); client.addContainerRequest(bothLevelRequest); verifyResourceRequest(client, bothLevelRequest, ResourceRequest.ANY, false); verifyResourceRequest(client, bothLevelRequest, "rack1", true); verifyResourceRequest(client, bothLevelRequest, "/otherrack", true); verifyResourceRequest(client, bothLevelRequest, "host3", true); verifyResourceRequest(client, bothLevelRequest, "host4", true); // Make sure we don't get any errors with two both-level requests at the // same priority ContainerRequest bothLevelRequest2 = new ContainerRequest(capability, new String[] {"host4", "host5"}, new String[] {"rack1", "/otherrack2"}, Priority.newInstance(3), false); client.addContainerRequest(bothLevelRequest2); } @Test (expected = InvalidContainerRequestException.class) public void testDifferentLocalityRelaxationSamePriority() { AMRMClientImpl<ContainerRequest> client = new AMRMClientImpl<ContainerRequest>(); Configuration conf = new Configuration(); conf.setClass( CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, MyResolver.class, DNSToSwitchMapping.class); client.init(conf); Resource capability = Resource.newInstance(1024, 1); ContainerRequest request1 = new ContainerRequest(capability, new String[] {"host1", "host2"}, null, Priority.newInstance(1), false); client.addContainerRequest(request1); ContainerRequest request2 = new ContainerRequest(capability, new String[] {"host3"}, null, Priority.newInstance(1), true); client.addContainerRequest(request2); } @Test public void testInvalidValidWhenOldRemoved() { AMRMClientImpl<ContainerRequest> client = new AMRMClientImpl<ContainerRequest>(); Configuration conf = new Configuration(); conf.setClass( CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, MyResolver.class, DNSToSwitchMapping.class); client.init(conf); Resource capability = Resource.newInstance(1024, 1); ContainerRequest request1 = new ContainerRequest(capability, new String[] {"host1", "host2"}, null, Priority.newInstance(1), false); client.addContainerRequest(request1); client.removeContainerRequest(request1); ContainerRequest request2 = new ContainerRequest(capability, new String[] {"host3"}, null, Priority.newInstance(1), true); client.addContainerRequest(request2); client.removeContainerRequest(request2); ContainerRequest request3 = new ContainerRequest(capability, new String[] {"host1", "host2"}, null, Priority.newInstance(1), false); client.addContainerRequest(request3); client.removeContainerRequest(request3); ContainerRequest request4 = new ContainerRequest(capability, null, new String[] {"rack1"}, Priority.newInstance(1), true); client.addContainerRequest(request4); } @Test (expected = InvalidContainerRequestException.class) public void testLocalityRelaxationDifferentLevels() { AMRMClientImpl<ContainerRequest> client = new AMRMClientImpl<ContainerRequest>(); Configuration conf = new Configuration(); conf.setClass( CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, MyResolver.class, DNSToSwitchMapping.class); client.init(conf); Resource capability = Resource.newInstance(1024, 1); ContainerRequest request1 = new ContainerRequest(capability, new String[] {"host1", "host2"}, null, Priority.newInstance(1), false); client.addContainerRequest(request1); ContainerRequest request2 = new ContainerRequest(capability, null, new String[] {"rack1"}, Priority.newInstance(1), true); client.addContainerRequest(request2); } private static class MyResolver implements DNSToSwitchMapping { @Override public List<String> resolve(List<String> names) { return Arrays.asList("/rack1"); } @Override public void reloadCachedMappings() {} @Override public void reloadCachedMappings(List<String> names) { } } private void verifyResourceRequest( AMRMClientImpl<ContainerRequest> client, ContainerRequest request, String location, boolean expectedRelaxLocality) { ResourceRequest ask = client.remoteRequestsTable.get(request.getPriority()) .get(location).get(request.getCapability()).remoteRequest; assertEquals(location, ask.getResourceName()); assertEquals(1, ask.getNumContainers()); assertEquals(expectedRelaxLocality, ask.getRelaxLocality()); } }
9,532
39.739316
80
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.client.api.impl; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import static org.mockito.Matchers.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import java.io.IOException; import java.nio.ByteBuffer; import java.security.PrivilegedAction; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Set; import java.util.TreeSet; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.SecretManager.InvalidToken; import org.apache.hadoop.service.Service.STATE; import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest; import org.apache.hadoop.yarn.api.records.ApplicationAccessType; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationReport; import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; import org.apache.hadoop.yarn.api.records.ContainerState; import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.api.records.LocalResource; import org.apache.hadoop.yarn.api.records.NMToken; import org.apache.hadoop.yarn.api.records.NodeReport; import org.apache.hadoop.yarn.api.records.NodeState; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceRequest; import org.apache.hadoop.yarn.api.records.Token; import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.client.ClientRMProxy; import org.apache.hadoop.yarn.client.api.AMRMClient; import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest; import org.apache.hadoop.yarn.client.api.InvalidContainerRequestException; import org.apache.hadoop.yarn.client.api.NMTokenCache; import org.apache.hadoop.yarn.client.api.YarnClient; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.ipc.YarnRPC; import org.apache.hadoop.yarn.security.AMRMTokenIdentifier; import org.apache.hadoop.yarn.server.MiniYARNCluster; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState; import org.apache.hadoop.yarn.server.resourcemanager.security.AMRMTokenSecretManager; import org.apache.hadoop.yarn.server.utils.BuilderUtils; import org.apache.hadoop.yarn.util.Records; import org.junit.After; import org.junit.AfterClass; import org.junit.Assert; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; import org.mortbay.log.Log; import com.google.common.base.Supplier; public class TestAMRMClient { static Configuration conf = null; static MiniYARNCluster yarnCluster = null; static YarnClient yarnClient = null; static List<NodeReport> nodeReports = null; static ApplicationAttemptId attemptId = null; static int nodeCount = 3; static final int rolling_interval_sec = 13; static final long am_expire_ms = 4000; static Resource capability; static Priority priority; static Priority priority2; static String node; static String rack; static String[] nodes; static String[] racks; private final static int DEFAULT_ITERATION = 3; @BeforeClass public static void setup() throws Exception { // start minicluster conf = new YarnConfiguration(); conf.setLong( YarnConfiguration.RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS, rolling_interval_sec); conf.setLong(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS, am_expire_ms); conf.setInt(YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS, 100); conf.setLong(YarnConfiguration.NM_LOG_RETAIN_SECONDS, 1); yarnCluster = new MiniYARNCluster(TestAMRMClient.class.getName(), nodeCount, 1, 1); yarnCluster.init(conf); yarnCluster.start(); // start rm client yarnClient = YarnClient.createYarnClient(); yarnClient.init(conf); yarnClient.start(); // get node info nodeReports = yarnClient.getNodeReports(NodeState.RUNNING); priority = Priority.newInstance(1); priority2 = Priority.newInstance(2); capability = Resource.newInstance(1024, 1); node = nodeReports.get(0).getNodeId().getHost(); rack = nodeReports.get(0).getRackName(); nodes = new String[]{ node }; racks = new String[]{ rack }; } @Before public void startApp() throws Exception { // submit new app ApplicationSubmissionContext appContext = yarnClient.createApplication().getApplicationSubmissionContext(); ApplicationId appId = appContext.getApplicationId(); // set the application name appContext.setApplicationName("Test"); // Set the priority for the application master Priority pri = Records.newRecord(Priority.class); pri.setPriority(0); appContext.setPriority(pri); // Set the queue to which this application is to be submitted in the RM appContext.setQueue("default"); // Set up the container launch context for the application master ContainerLaunchContext amContainer = BuilderUtils.newContainerLaunchContext( Collections.<String, LocalResource> emptyMap(), new HashMap<String, String>(), Arrays.asList("sleep", "100"), new HashMap<String, ByteBuffer>(), null, new HashMap<ApplicationAccessType, String>()); appContext.setAMContainerSpec(amContainer); appContext.setResource(Resource.newInstance(1024, 1)); // Create the request to send to the applications manager SubmitApplicationRequest appRequest = Records .newRecord(SubmitApplicationRequest.class); appRequest.setApplicationSubmissionContext(appContext); // Submit the application to the applications manager yarnClient.submitApplication(appContext); // wait for app to start RMAppAttempt appAttempt = null; while (true) { ApplicationReport appReport = yarnClient.getApplicationReport(appId); if (appReport.getYarnApplicationState() == YarnApplicationState.ACCEPTED) { attemptId = appReport.getCurrentApplicationAttemptId(); appAttempt = yarnCluster.getResourceManager().getRMContext().getRMApps() .get(attemptId.getApplicationId()).getCurrentAppAttempt(); while (true) { if (appAttempt.getAppAttemptState() == RMAppAttemptState.LAUNCHED) { break; } } break; } } // Just dig into the ResourceManager and get the AMRMToken just for the sake // of testing. UserGroupInformation.setLoginUser(UserGroupInformation .createRemoteUser(UserGroupInformation.getCurrentUser().getUserName())); // emulate RM setup of AMRM token in credentials by adding the token // *before* setting the token service UserGroupInformation.getCurrentUser().addToken(appAttempt.getAMRMToken()); appAttempt.getAMRMToken().setService(ClientRMProxy.getAMRMTokenService(conf)); } @After public void cancelApp() throws YarnException, IOException { yarnClient.killApplication(attemptId.getApplicationId()); attemptId = null; } @AfterClass public static void tearDown() { if (yarnClient != null && yarnClient.getServiceState() == STATE.STARTED) { yarnClient.stop(); } if (yarnCluster != null && yarnCluster.getServiceState() == STATE.STARTED) { yarnCluster.stop(); } } @Test (timeout=60000) public void testAMRMClientMatchingFit() throws YarnException, IOException { AMRMClient<ContainerRequest> amClient = null; try { // start am rm client amClient = AMRMClient.<ContainerRequest>createAMRMClient(); amClient.init(conf); amClient.start(); amClient.registerApplicationMaster("Host", 10000, ""); Resource capability1 = Resource.newInstance(1024, 2); Resource capability2 = Resource.newInstance(1024, 1); Resource capability3 = Resource.newInstance(1000, 2); Resource capability4 = Resource.newInstance(2000, 1); Resource capability5 = Resource.newInstance(1000, 3); Resource capability6 = Resource.newInstance(2000, 1); Resource capability7 = Resource.newInstance(2000, 1); ContainerRequest storedContainer1 = new ContainerRequest(capability1, nodes, racks, priority); ContainerRequest storedContainer2 = new ContainerRequest(capability2, nodes, racks, priority); ContainerRequest storedContainer3 = new ContainerRequest(capability3, nodes, racks, priority); ContainerRequest storedContainer4 = new ContainerRequest(capability4, nodes, racks, priority); ContainerRequest storedContainer5 = new ContainerRequest(capability5, nodes, racks, priority); ContainerRequest storedContainer6 = new ContainerRequest(capability6, nodes, racks, priority); ContainerRequest storedContainer7 = new ContainerRequest(capability7, nodes, racks, priority2, false); amClient.addContainerRequest(storedContainer1); amClient.addContainerRequest(storedContainer2); amClient.addContainerRequest(storedContainer3); amClient.addContainerRequest(storedContainer4); amClient.addContainerRequest(storedContainer5); amClient.addContainerRequest(storedContainer6); amClient.addContainerRequest(storedContainer7); // test matching of containers List<? extends Collection<ContainerRequest>> matches; ContainerRequest storedRequest; // exact match Resource testCapability1 = Resource.newInstance(1024, 2); matches = amClient.getMatchingRequests(priority, node, testCapability1); verifyMatches(matches, 1); storedRequest = matches.get(0).iterator().next(); assertEquals(storedContainer1, storedRequest); amClient.removeContainerRequest(storedContainer1); // exact matching with order maintained Resource testCapability2 = Resource.newInstance(2000, 1); matches = amClient.getMatchingRequests(priority, node, testCapability2); verifyMatches(matches, 2); // must be returned in the order they were made int i = 0; for(ContainerRequest storedRequest1 : matches.get(0)) { if(i++ == 0) { assertEquals(storedContainer4, storedRequest1); } else { assertEquals(storedContainer6, storedRequest1); } } amClient.removeContainerRequest(storedContainer6); // matching with larger container. all requests returned Resource testCapability3 = Resource.newInstance(4000, 4); matches = amClient.getMatchingRequests(priority, node, testCapability3); assert(matches.size() == 4); Resource testCapability4 = Resource.newInstance(1024, 2); matches = amClient.getMatchingRequests(priority, node, testCapability4); assert(matches.size() == 2); // verify non-fitting containers are not returned and fitting ones are for(Collection<ContainerRequest> testSet : matches) { assertEquals(1, testSet.size()); ContainerRequest testRequest = testSet.iterator().next(); assertTrue(testRequest != storedContainer4); assertTrue(testRequest != storedContainer5); assert(testRequest == storedContainer2 || testRequest == storedContainer3); } Resource testCapability5 = Resource.newInstance(512, 4); matches = amClient.getMatchingRequests(priority, node, testCapability5); assert(matches.size() == 0); // verify requests without relaxed locality are only returned at specific // locations Resource testCapability7 = Resource.newInstance(2000, 1); matches = amClient.getMatchingRequests(priority2, ResourceRequest.ANY, testCapability7); assert(matches.size() == 0); matches = amClient.getMatchingRequests(priority2, node, testCapability7); assert(matches.size() == 1); amClient.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED, null, null); } finally { if (amClient != null && amClient.getServiceState() == STATE.STARTED) { amClient.stop(); } } } private void verifyMatches( List<? extends Collection<ContainerRequest>> matches, int matchSize) { assertEquals(1, matches.size()); assertEquals(matches.get(0).size(), matchSize); } @Test (timeout=60000) public void testAMRMClientMatchingFitInferredRack() throws YarnException, IOException { AMRMClientImpl<ContainerRequest> amClient = null; try { // start am rm client amClient = new AMRMClientImpl<ContainerRequest>(); amClient.init(conf); amClient.start(); amClient.registerApplicationMaster("Host", 10000, ""); Resource capability = Resource.newInstance(1024, 2); ContainerRequest storedContainer1 = new ContainerRequest(capability, nodes, null, priority); amClient.addContainerRequest(storedContainer1); // verify matching with original node and inferred rack List<? extends Collection<ContainerRequest>> matches; ContainerRequest storedRequest; // exact match node matches = amClient.getMatchingRequests(priority, node, capability); verifyMatches(matches, 1); storedRequest = matches.get(0).iterator().next(); assertEquals(storedContainer1, storedRequest); // inferred match rack matches = amClient.getMatchingRequests(priority, rack, capability); verifyMatches(matches, 1); storedRequest = matches.get(0).iterator().next(); assertEquals(storedContainer1, storedRequest); // inferred rack match no longer valid after request is removed amClient.removeContainerRequest(storedContainer1); matches = amClient.getMatchingRequests(priority, rack, capability); assertTrue(matches.isEmpty()); amClient.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED, null, null); } finally { if (amClient != null && amClient.getServiceState() == STATE.STARTED) { amClient.stop(); } } } @Test //(timeout=60000) public void testAMRMClientMatchStorage() throws YarnException, IOException { AMRMClientImpl<ContainerRequest> amClient = null; try { // start am rm client amClient = (AMRMClientImpl<ContainerRequest>) AMRMClient .<ContainerRequest> createAMRMClient(); amClient.init(conf); amClient.start(); amClient.registerApplicationMaster("Host", 10000, ""); Priority priority1 = Records.newRecord(Priority.class); priority1.setPriority(2); ContainerRequest storedContainer1 = new ContainerRequest(capability, nodes, racks, priority); ContainerRequest storedContainer2 = new ContainerRequest(capability, nodes, racks, priority); ContainerRequest storedContainer3 = new ContainerRequest(capability, null, null, priority1); amClient.addContainerRequest(storedContainer1); amClient.addContainerRequest(storedContainer2); amClient.addContainerRequest(storedContainer3); // test addition and storage int containersRequestedAny = amClient.remoteRequestsTable.get(priority) .get(ResourceRequest.ANY).get(capability).remoteRequest.getNumContainers(); assertEquals(2, containersRequestedAny); containersRequestedAny = amClient.remoteRequestsTable.get(priority1) .get(ResourceRequest.ANY).get(capability).remoteRequest.getNumContainers(); assertEquals(1, containersRequestedAny); List<? extends Collection<ContainerRequest>> matches = amClient.getMatchingRequests(priority, node, capability); verifyMatches(matches, 2); matches = amClient.getMatchingRequests(priority, rack, capability); verifyMatches(matches, 2); matches = amClient.getMatchingRequests(priority, ResourceRequest.ANY, capability); verifyMatches(matches, 2); matches = amClient.getMatchingRequests(priority1, rack, capability); assertTrue(matches.isEmpty()); matches = amClient.getMatchingRequests(priority1, ResourceRequest.ANY, capability); verifyMatches(matches, 1); // test removal amClient.removeContainerRequest(storedContainer3); matches = amClient.getMatchingRequests(priority, node, capability); verifyMatches(matches, 2); amClient.removeContainerRequest(storedContainer2); matches = amClient.getMatchingRequests(priority, node, capability); verifyMatches(matches, 1); matches = amClient.getMatchingRequests(priority, rack, capability); verifyMatches(matches, 1); // test matching of containers ContainerRequest storedRequest = matches.get(0).iterator().next(); assertEquals(storedContainer1, storedRequest); amClient.removeContainerRequest(storedContainer1); matches = amClient.getMatchingRequests(priority, ResourceRequest.ANY, capability); assertTrue(matches.isEmpty()); matches = amClient.getMatchingRequests(priority1, ResourceRequest.ANY, capability); assertTrue(matches.isEmpty()); // 0 requests left. everything got cleaned up assertTrue(amClient.remoteRequestsTable.isEmpty()); // go through an exemplary allocation, matching and release cycle amClient.addContainerRequest(storedContainer1); amClient.addContainerRequest(storedContainer3); // RM should allocate container within 2 calls to allocate() int allocatedContainerCount = 0; int iterationsLeft = 3; while (allocatedContainerCount < 2 && iterationsLeft-- > 0) { Log.info(" == alloc " + allocatedContainerCount + " it left " + iterationsLeft); AllocateResponse allocResponse = amClient.allocate(0.1f); assertEquals(0, amClient.ask.size()); assertEquals(0, amClient.release.size()); assertEquals(nodeCount, amClient.getClusterNodeCount()); allocatedContainerCount += allocResponse.getAllocatedContainers().size(); for(Container container : allocResponse.getAllocatedContainers()) { ContainerRequest expectedRequest = container.getPriority().equals(storedContainer1.getPriority()) ? storedContainer1 : storedContainer3; matches = amClient.getMatchingRequests(container.getPriority(), ResourceRequest.ANY, container.getResource()); // test correct matched container is returned verifyMatches(matches, 1); ContainerRequest matchedRequest = matches.get(0).iterator().next(); assertEquals(matchedRequest, expectedRequest); amClient.removeContainerRequest(matchedRequest); // assign this container, use it and release it amClient.releaseAssignedContainer(container.getId()); } if(allocatedContainerCount < containersRequestedAny) { // sleep to let NM's heartbeat to RM and trigger allocations sleep(100); } } assertEquals(2, allocatedContainerCount); AllocateResponse allocResponse = amClient.allocate(0.1f); assertEquals(0, amClient.release.size()); assertEquals(0, amClient.ask.size()); assertEquals(0, allocResponse.getAllocatedContainers().size()); // 0 requests left. everything got cleaned up assertTrue(amClient.remoteRequestsTable.isEmpty()); amClient.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED, null, null); } finally { if (amClient != null && amClient.getServiceState() == STATE.STARTED) { amClient.stop(); } } } @Test (timeout=60000) public void testAllocationWithBlacklist() throws YarnException, IOException { AMRMClientImpl<ContainerRequest> amClient = null; try { // start am rm client amClient = (AMRMClientImpl<ContainerRequest>) AMRMClient .<ContainerRequest> createAMRMClient(); amClient.init(conf); amClient.start(); amClient.registerApplicationMaster("Host", 10000, ""); assertEquals(0, amClient.ask.size()); assertEquals(0, amClient.release.size()); ContainerRequest storedContainer1 = new ContainerRequest(capability, nodes, racks, priority); amClient.addContainerRequest(storedContainer1); assertEquals(3, amClient.ask.size()); assertEquals(0, amClient.release.size()); List<String> localNodeBlacklist = new ArrayList<String>(); localNodeBlacklist.add(node); // put node in black list, so no container assignment amClient.updateBlacklist(localNodeBlacklist, null); int allocatedContainerCount = getAllocatedContainersNumber(amClient, DEFAULT_ITERATION); // the only node is in blacklist, so no allocation assertEquals(0, allocatedContainerCount); // Remove node from blacklist, so get assigned with 2 amClient.updateBlacklist(null, localNodeBlacklist); ContainerRequest storedContainer2 = new ContainerRequest(capability, nodes, racks, priority); amClient.addContainerRequest(storedContainer2); allocatedContainerCount = getAllocatedContainersNumber(amClient, DEFAULT_ITERATION); assertEquals(2, allocatedContainerCount); // Test in case exception in allocate(), blacklist is kept assertTrue(amClient.blacklistAdditions.isEmpty()); assertTrue(amClient.blacklistRemovals.isEmpty()); // create a invalid ContainerRequest - memory value is minus ContainerRequest invalidContainerRequest = new ContainerRequest(Resource.newInstance(-1024, 1), nodes, racks, priority); amClient.addContainerRequest(invalidContainerRequest); amClient.updateBlacklist(localNodeBlacklist, null); try { // allocate() should complain as ContainerRequest is invalid. amClient.allocate(0.1f); fail("there should be an exception here."); } catch (Exception e) { assertEquals(1, amClient.blacklistAdditions.size()); } } finally { if (amClient != null && amClient.getServiceState() == STATE.STARTED) { amClient.stop(); } } } @Test (timeout=60000) public void testAMRMClientWithBlacklist() throws YarnException, IOException { AMRMClientImpl<ContainerRequest> amClient = null; try { // start am rm client amClient = (AMRMClientImpl<ContainerRequest>) AMRMClient .<ContainerRequest> createAMRMClient(); amClient.init(conf); amClient.start(); amClient.registerApplicationMaster("Host", 10000, ""); String[] nodes = {"node1", "node2", "node3"}; // Add nodes[0] and nodes[1] List<String> nodeList01 = new ArrayList<String>(); nodeList01.add(nodes[0]); nodeList01.add(nodes[1]); amClient.updateBlacklist(nodeList01, null); assertEquals(2, amClient.blacklistAdditions.size()); assertEquals(0, amClient.blacklistRemovals.size()); // Add nodes[0] again, verify it is not added duplicated. List<String> nodeList02 = new ArrayList<String>(); nodeList02.add(nodes[0]); nodeList02.add(nodes[2]); amClient.updateBlacklist(nodeList02, null); assertEquals(3, amClient.blacklistAdditions.size()); assertEquals(0, amClient.blacklistRemovals.size()); // Add nodes[1] and nodes[2] to removal list, // Verify addition list remove these two nodes. List<String> nodeList12 = new ArrayList<String>(); nodeList12.add(nodes[1]); nodeList12.add(nodes[2]); amClient.updateBlacklist(null, nodeList12); assertEquals(1, amClient.blacklistAdditions.size()); assertEquals(2, amClient.blacklistRemovals.size()); // Add nodes[1] again to addition list, // Verify removal list will remove this node. List<String> nodeList1 = new ArrayList<String>(); nodeList1.add(nodes[1]); amClient.updateBlacklist(nodeList1, null); assertEquals(2, amClient.blacklistAdditions.size()); assertEquals(1, amClient.blacklistRemovals.size()); } finally { if (amClient != null && amClient.getServiceState() == STATE.STARTED) { amClient.stop(); } } } private int getAllocatedContainersNumber( AMRMClientImpl<ContainerRequest> amClient, int iterationsLeft) throws YarnException, IOException { int allocatedContainerCount = 0; while (iterationsLeft-- > 0) { Log.info(" == alloc " + allocatedContainerCount + " it left " + iterationsLeft); AllocateResponse allocResponse = amClient.allocate(0.1f); assertEquals(0, amClient.ask.size()); assertEquals(0, amClient.release.size()); assertEquals(nodeCount, amClient.getClusterNodeCount()); allocatedContainerCount += allocResponse.getAllocatedContainers().size(); if(allocatedContainerCount == 0) { // sleep to let NM's heartbeat to RM and trigger allocations sleep(100); } } return allocatedContainerCount; } @Test (timeout=60000) public void testAMRMClient() throws YarnException, IOException { AMRMClient<ContainerRequest> amClient = null; try { // start am rm client amClient = AMRMClient.<ContainerRequest>createAMRMClient(); //setting an instance NMTokenCache amClient.setNMTokenCache(new NMTokenCache()); //asserting we are not using the singleton instance cache Assert.assertNotSame(NMTokenCache.getSingleton(), amClient.getNMTokenCache()); amClient.init(conf); amClient.start(); amClient.registerApplicationMaster("Host", 10000, ""); testAllocation((AMRMClientImpl<ContainerRequest>)amClient); amClient.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED, null, null); } finally { if (amClient != null && amClient.getServiceState() == STATE.STARTED) { amClient.stop(); } } } @Test(timeout=30000) public void testAskWithNodeLabels() { AMRMClientImpl<ContainerRequest> client = new AMRMClientImpl<ContainerRequest>(); // add exp=x to ANY client.addContainerRequest(new ContainerRequest(Resource.newInstance(1024, 1), null, null, Priority.UNDEFINED, true, "x")); Assert.assertEquals(1, client.ask.size()); Assert.assertEquals("x", client.ask.iterator().next() .getNodeLabelExpression()); // add exp=x then add exp=a to ANY in same priority, only exp=a should kept client.addContainerRequest(new ContainerRequest(Resource.newInstance(1024, 1), null, null, Priority.UNDEFINED, true, "x")); client.addContainerRequest(new ContainerRequest(Resource.newInstance(1024, 1), null, null, Priority.UNDEFINED, true, "a")); Assert.assertEquals(1, client.ask.size()); Assert.assertEquals("a", client.ask.iterator().next() .getNodeLabelExpression()); // add exp=x to ANY, rack and node, only resource request has ANY resource // name will be assigned the label expression // add exp=x then add exp=a to ANY in same priority, only exp=a should kept client.addContainerRequest(new ContainerRequest(Resource.newInstance(1024, 1), null, null, Priority.UNDEFINED, true, "y")); Assert.assertEquals(1, client.ask.size()); for (ResourceRequest req : client.ask) { if (ResourceRequest.ANY.equals(req.getResourceName())) { Assert.assertEquals("y", req.getNodeLabelExpression()); } else { Assert.assertNull(req.getNodeLabelExpression()); } } } private void verifyAddRequestFailed(AMRMClient<ContainerRequest> client, ContainerRequest request) { try { client.addContainerRequest(request); } catch (InvalidContainerRequestException e) { return; } Assert.fail(); } @Test(timeout=30000) public void testAskWithInvalidNodeLabels() { AMRMClientImpl<ContainerRequest> client = new AMRMClientImpl<ContainerRequest>(); // specified exp with more than one node labels verifyAddRequestFailed(client, new ContainerRequest(Resource.newInstance(1024, 1), null, null, Priority.UNDEFINED, true, "x && y")); } private void testAllocation(final AMRMClientImpl<ContainerRequest> amClient) throws YarnException, IOException { // setup container request assertEquals(0, amClient.ask.size()); assertEquals(0, amClient.release.size()); amClient.addContainerRequest( new ContainerRequest(capability, nodes, racks, priority)); amClient.addContainerRequest( new ContainerRequest(capability, nodes, racks, priority)); amClient.addContainerRequest( new ContainerRequest(capability, nodes, racks, priority)); amClient.addContainerRequest( new ContainerRequest(capability, nodes, racks, priority)); amClient.removeContainerRequest( new ContainerRequest(capability, nodes, racks, priority)); amClient.removeContainerRequest( new ContainerRequest(capability, nodes, racks, priority)); int containersRequestedNode = amClient.remoteRequestsTable.get(priority) .get(node).get(capability).remoteRequest.getNumContainers(); int containersRequestedRack = amClient.remoteRequestsTable.get(priority) .get(rack).get(capability).remoteRequest.getNumContainers(); int containersRequestedAny = amClient.remoteRequestsTable.get(priority) .get(ResourceRequest.ANY).get(capability).remoteRequest.getNumContainers(); assertEquals(2, containersRequestedNode); assertEquals(2, containersRequestedRack); assertEquals(2, containersRequestedAny); assertEquals(3, amClient.ask.size()); assertEquals(0, amClient.release.size()); // RM should allocate container within 2 calls to allocate() int allocatedContainerCount = 0; int iterationsLeft = 3; Set<ContainerId> releases = new TreeSet<ContainerId>(); amClient.getNMTokenCache().clearCache(); Assert.assertEquals(0, amClient.getNMTokenCache().numberOfTokensInCache()); HashMap<String, Token> receivedNMTokens = new HashMap<String, Token>(); while (allocatedContainerCount < containersRequestedAny && iterationsLeft-- > 0) { AllocateResponse allocResponse = amClient.allocate(0.1f); assertEquals(0, amClient.ask.size()); assertEquals(0, amClient.release.size()); assertEquals(nodeCount, amClient.getClusterNodeCount()); allocatedContainerCount += allocResponse.getAllocatedContainers().size(); for(Container container : allocResponse.getAllocatedContainers()) { ContainerId rejectContainerId = container.getId(); releases.add(rejectContainerId); amClient.releaseAssignedContainer(rejectContainerId); } for (NMToken token : allocResponse.getNMTokens()) { String nodeID = token.getNodeId().toString(); if (receivedNMTokens.containsKey(nodeID)) { Assert.fail("Received token again for : " + nodeID); } receivedNMTokens.put(nodeID, token.getToken()); } if(allocatedContainerCount < containersRequestedAny) { // sleep to let NM's heartbeat to RM and trigger allocations sleep(100); } } // Should receive atleast 1 token Assert.assertTrue(receivedNMTokens.size() > 0 && receivedNMTokens.size() <= nodeCount); assertEquals(allocatedContainerCount, containersRequestedAny); assertEquals(2, amClient.release.size()); assertEquals(0, amClient.ask.size()); // need to tell the AMRMClient that we dont need these resources anymore amClient.removeContainerRequest( new ContainerRequest(capability, nodes, racks, priority)); amClient.removeContainerRequest( new ContainerRequest(capability, nodes, racks, priority)); assertEquals(3, amClient.ask.size()); // send 0 container count request for resources that are no longer needed ResourceRequest snoopRequest = amClient.ask.iterator().next(); assertEquals(0, snoopRequest.getNumContainers()); // test RPC exception handling amClient.addContainerRequest(new ContainerRequest(capability, nodes, racks, priority)); amClient.addContainerRequest(new ContainerRequest(capability, nodes, racks, priority)); snoopRequest = amClient.ask.iterator().next(); assertEquals(2, snoopRequest.getNumContainers()); ApplicationMasterProtocol realRM = amClient.rmClient; try { ApplicationMasterProtocol mockRM = mock(ApplicationMasterProtocol.class); when(mockRM.allocate(any(AllocateRequest.class))).thenAnswer( new Answer<AllocateResponse>() { public AllocateResponse answer(InvocationOnMock invocation) throws Exception { amClient.removeContainerRequest( new ContainerRequest(capability, nodes, racks, priority)); amClient.removeContainerRequest( new ContainerRequest(capability, nodes, racks, priority)); throw new Exception(); } }); amClient.rmClient = mockRM; amClient.allocate(0.1f); }catch (Exception ioe) {} finally { amClient.rmClient = realRM; } assertEquals(2, amClient.release.size()); assertEquals(3, amClient.ask.size()); snoopRequest = amClient.ask.iterator().next(); // verify that the remove request made in between makeRequest and allocate // has not been lost assertEquals(0, snoopRequest.getNumContainers()); iterationsLeft = 3; // do a few iterations to ensure RM is not going send new containers while(!releases.isEmpty() || iterationsLeft-- > 0) { // inform RM of rejection AllocateResponse allocResponse = amClient.allocate(0.1f); // RM did not send new containers because AM does not need any assertEquals(0, allocResponse.getAllocatedContainers().size()); if(allocResponse.getCompletedContainersStatuses().size() > 0) { for(ContainerStatus cStatus :allocResponse .getCompletedContainersStatuses()) { if(releases.contains(cStatus.getContainerId())) { assertEquals(cStatus.getState(), ContainerState.COMPLETE); assertEquals(-100, cStatus.getExitStatus()); releases.remove(cStatus.getContainerId()); } } } if(iterationsLeft > 0) { // sleep to make sure NM's heartbeat sleep(100); } } assertEquals(0, amClient.ask.size()); assertEquals(0, amClient.release.size()); } class CountDownSupplier implements Supplier<Boolean> { int counter = 0; @Override public Boolean get() { counter++; if (counter >= 3) { return true; } else { return false; } } }; @Test public void testWaitFor() throws InterruptedException { AMRMClientImpl<ContainerRequest> amClient = null; CountDownSupplier countDownChecker = new CountDownSupplier(); try { // start am rm client amClient = (AMRMClientImpl<ContainerRequest>) AMRMClient .<ContainerRequest> createAMRMClient(); amClient.init(new YarnConfiguration()); amClient.start(); amClient.waitFor(countDownChecker, 1000); assertEquals(3, countDownChecker.counter); } finally { if (amClient != null) { amClient.stop(); } } } private void sleep(int sleepTime) { try { Thread.sleep(sleepTime); } catch (InterruptedException e) { e.printStackTrace(); } } @Test(timeout = 60000) public void testAMRMClientOnAMRMTokenRollOver() throws YarnException, IOException { AMRMClient<ContainerRequest> amClient = null; try { AMRMTokenSecretManager amrmTokenSecretManager = yarnCluster.getResourceManager().getRMContext() .getAMRMTokenSecretManager(); // start am rm client amClient = AMRMClient.<ContainerRequest> createAMRMClient(); amClient.init(conf); amClient.start(); Long startTime = System.currentTimeMillis(); amClient.registerApplicationMaster("Host", 10000, ""); org.apache.hadoop.security.token.Token<AMRMTokenIdentifier> amrmToken_1 = getAMRMToken(); Assert.assertNotNull(amrmToken_1); Assert.assertEquals(amrmToken_1.decodeIdentifier().getKeyId(), amrmTokenSecretManager.getMasterKey().getMasterKey().getKeyId()); // Wait for enough time and make sure the roll_over happens // At mean time, the old AMRMToken should continue to work while (System.currentTimeMillis() - startTime < rolling_interval_sec * 1000) { amClient.allocate(0.1f); try { Thread.sleep(1000); } catch (InterruptedException e) { // TODO Auto-generated catch block e.printStackTrace(); } } amClient.allocate(0.1f); org.apache.hadoop.security.token.Token<AMRMTokenIdentifier> amrmToken_2 = getAMRMToken(); Assert.assertNotNull(amrmToken_2); Assert.assertEquals(amrmToken_2.decodeIdentifier().getKeyId(), amrmTokenSecretManager.getMasterKey().getMasterKey().getKeyId()); Assert.assertNotEquals(amrmToken_1, amrmToken_2); // can do the allocate call with latest AMRMToken AllocateResponse response = amClient.allocate(0.1f); // Verify latest AMRMToken can be used to send allocation request. UserGroupInformation testUser1 = UserGroupInformation.createRemoteUser("testUser1"); AMRMTokenIdentifierForTest newVersionTokenIdentifier = new AMRMTokenIdentifierForTest(amrmToken_2.decodeIdentifier(), "message"); Assert.assertEquals("Message is changed after set to newVersionTokenIdentifier", "message", newVersionTokenIdentifier.getMessage()); org.apache.hadoop.security.token.Token<AMRMTokenIdentifier> newVersionToken = new org.apache.hadoop.security.token.Token<AMRMTokenIdentifier> ( newVersionTokenIdentifier.getBytes(), amrmTokenSecretManager.retrievePassword(newVersionTokenIdentifier), newVersionTokenIdentifier.getKind(), new Text()); SecurityUtil.setTokenService(newVersionToken, yarnCluster .getResourceManager().getApplicationMasterService().getBindAddress()); testUser1.addToken(newVersionToken); AllocateRequest request = Records.newRecord(AllocateRequest.class); request.setResponseId(response.getResponseId()); testUser1.doAs(new PrivilegedAction<ApplicationMasterProtocol>() { @Override public ApplicationMasterProtocol run() { return (ApplicationMasterProtocol) YarnRPC.create(conf).getProxy( ApplicationMasterProtocol.class, yarnCluster.getResourceManager().getApplicationMasterService() .getBindAddress(), conf); } }).allocate(request); // Make sure previous token has been rolled-over // and can not use this rolled-over token to make a allocate all. while (true) { if (amrmToken_2.decodeIdentifier().getKeyId() != amrmTokenSecretManager .getCurrnetMasterKeyData().getMasterKey().getKeyId()) { if (amrmTokenSecretManager.getNextMasterKeyData() == null) { break; } else if (amrmToken_2.decodeIdentifier().getKeyId() != amrmTokenSecretManager.getNextMasterKeyData().getMasterKey() .getKeyId()) { break; } } amClient.allocate(0.1f); try { Thread.sleep(1000); } catch (InterruptedException e) { // DO NOTHING } } try { UserGroupInformation testUser2 = UserGroupInformation.createRemoteUser("testUser2"); SecurityUtil.setTokenService(amrmToken_2, yarnCluster .getResourceManager().getApplicationMasterService().getBindAddress()); testUser2.addToken(amrmToken_2); testUser2.doAs(new PrivilegedAction<ApplicationMasterProtocol>() { @Override public ApplicationMasterProtocol run() { return (ApplicationMasterProtocol) YarnRPC.create(conf).getProxy( ApplicationMasterProtocol.class, yarnCluster.getResourceManager().getApplicationMasterService() .getBindAddress(), conf); } }).allocate(Records.newRecord(AllocateRequest.class)); Assert.fail("The old Token should not work"); } catch (Exception ex) { Assert.assertTrue(ex instanceof InvalidToken); Assert.assertTrue(ex.getMessage().contains( "Invalid AMRMToken from " + amrmToken_2.decodeIdentifier().getApplicationAttemptId())); } amClient.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED, null, null); } finally { if (amClient != null && amClient.getServiceState() == STATE.STARTED) { amClient.stop(); } } } @SuppressWarnings("unchecked") private org.apache.hadoop.security.token.Token<AMRMTokenIdentifier> getAMRMToken() throws IOException { Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials(); Iterator<org.apache.hadoop.security.token.Token<?>> iter = credentials.getAllTokens().iterator(); org.apache.hadoop.security.token.Token<AMRMTokenIdentifier> result = null; while (iter.hasNext()) { org.apache.hadoop.security.token.Token<?> token = iter.next(); if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) { if (result != null) { Assert.fail("credentials has more than one AMRM token." + " token1: " + result + " token2: " + token); } result = (org.apache.hadoop.security.token.Token<AMRMTokenIdentifier>) token; } } return result; } }
44,368
39.968606
89
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestNMClient.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.client.api.impl; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import java.io.IOException; import java.nio.ByteBuffer; import java.util.Arrays; import java.util.List; import java.util.Set; import java.util.TreeSet; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.service.Service.STATE; import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationReport; import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerExitStatus; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; import org.apache.hadoop.yarn.api.records.ContainerState; import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.api.records.NMToken; import org.apache.hadoop.yarn.api.records.NodeReport; import org.apache.hadoop.yarn.api.records.NodeState; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceRequest; import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.client.api.AMRMClient; import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest; import org.apache.hadoop.yarn.client.api.NMClient; import org.apache.hadoop.yarn.client.api.NMTokenCache; import org.apache.hadoop.yarn.client.api.YarnClient; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.server.MiniYARNCluster; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState; import org.apache.hadoop.yarn.util.Records; import org.junit.After; import org.junit.Before; import org.junit.Test; public class TestNMClient { Configuration conf = null; MiniYARNCluster yarnCluster = null; YarnClientImpl yarnClient = null; AMRMClientImpl<ContainerRequest> rmClient = null; NMClientImpl nmClient = null; List<NodeReport> nodeReports = null; ApplicationAttemptId attemptId = null; int nodeCount = 3; NMTokenCache nmTokenCache = null; @Before public void setup() throws YarnException, IOException { // start minicluster conf = new YarnConfiguration(); yarnCluster = new MiniYARNCluster(TestAMRMClient.class.getName(), nodeCount, 1, 1); yarnCluster.init(conf); yarnCluster.start(); assertNotNull(yarnCluster); assertEquals(STATE.STARTED, yarnCluster.getServiceState()); // start rm client yarnClient = (YarnClientImpl) YarnClient.createYarnClient(); yarnClient.init(conf); yarnClient.start(); assertNotNull(yarnClient); assertEquals(STATE.STARTED, yarnClient.getServiceState()); // get node info nodeReports = yarnClient.getNodeReports(NodeState.RUNNING); // submit new app ApplicationSubmissionContext appContext = yarnClient.createApplication().getApplicationSubmissionContext(); ApplicationId appId = appContext.getApplicationId(); // set the application name appContext.setApplicationName("Test"); // Set the priority for the application master Priority pri = Priority.newInstance(0); appContext.setPriority(pri); // Set the queue to which this application is to be submitted in the RM appContext.setQueue("default"); // Set up the container launch context for the application master ContainerLaunchContext amContainer = Records .newRecord(ContainerLaunchContext.class); appContext.setAMContainerSpec(amContainer); // unmanaged AM appContext.setUnmanagedAM(true); // Create the request to send to the applications manager SubmitApplicationRequest appRequest = Records .newRecord(SubmitApplicationRequest.class); appRequest.setApplicationSubmissionContext(appContext); // Submit the application to the applications manager yarnClient.submitApplication(appContext); // wait for app to start int iterationsLeft = 30; RMAppAttempt appAttempt = null; while (iterationsLeft > 0) { ApplicationReport appReport = yarnClient.getApplicationReport(appId); if (appReport.getYarnApplicationState() == YarnApplicationState.ACCEPTED) { attemptId = appReport.getCurrentApplicationAttemptId(); appAttempt = yarnCluster.getResourceManager().getRMContext().getRMApps() .get(attemptId.getApplicationId()).getCurrentAppAttempt(); while (true) { if (appAttempt.getAppAttemptState() == RMAppAttemptState.LAUNCHED) { break; } } break; } sleep(1000); --iterationsLeft; } if (iterationsLeft == 0) { fail("Application hasn't bee started"); } // Just dig into the ResourceManager and get the AMRMToken just for the sake // of testing. UserGroupInformation.setLoginUser(UserGroupInformation .createRemoteUser(UserGroupInformation.getCurrentUser().getUserName())); UserGroupInformation.getCurrentUser().addToken(appAttempt.getAMRMToken()); //creating an instance NMTokenCase nmTokenCache = new NMTokenCache(); // start am rm client rmClient = (AMRMClientImpl<ContainerRequest>) AMRMClient .<ContainerRequest> createAMRMClient(); //setting an instance NMTokenCase rmClient.setNMTokenCache(nmTokenCache); rmClient.init(conf); rmClient.start(); assertNotNull(rmClient); assertEquals(STATE.STARTED, rmClient.getServiceState()); // start am nm client nmClient = (NMClientImpl) NMClient.createNMClient(); //propagating the AMRMClient NMTokenCache instance nmClient.setNMTokenCache(rmClient.getNMTokenCache()); nmClient.init(conf); nmClient.start(); assertNotNull(nmClient); assertEquals(STATE.STARTED, nmClient.getServiceState()); } @After public void tearDown() { rmClient.stop(); yarnClient.stop(); yarnCluster.stop(); } private void stopNmClient(boolean stopContainers) { assertNotNull("Null nmClient", nmClient); // leave one unclosed assertEquals(1, nmClient.startedContainers.size()); // default true assertTrue(nmClient.getCleanupRunningContainers().get()); nmClient.cleanupRunningContainersOnStop(stopContainers); assertEquals(stopContainers, nmClient.getCleanupRunningContainers().get()); nmClient.stop(); } @Test (timeout = 180000) public void testNMClientNoCleanupOnStop() throws YarnException, IOException { rmClient.registerApplicationMaster("Host", 10000, ""); testContainerManagement(nmClient, allocateContainers(rmClient, 5)); rmClient.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED, null, null); // don't stop the running containers stopNmClient(false); assertFalse(nmClient.startedContainers. isEmpty()); //now cleanup nmClient.cleanupRunningContainers(); assertEquals(0, nmClient.startedContainers.size()); } @Test (timeout = 200000) public void testNMClient() throws YarnException, IOException { rmClient.registerApplicationMaster("Host", 10000, ""); testContainerManagement(nmClient, allocateContainers(rmClient, 5)); rmClient.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED, null, null); // stop the running containers on close assertFalse(nmClient.startedContainers.isEmpty()); nmClient.cleanupRunningContainersOnStop(true); assertTrue(nmClient.getCleanupRunningContainers().get()); nmClient.stop(); } private Set<Container> allocateContainers( AMRMClientImpl<ContainerRequest> rmClient, int num) throws YarnException, IOException { // setup container request Resource capability = Resource.newInstance(1024, 0); Priority priority = Priority.newInstance(0); String node = nodeReports.get(0).getNodeId().getHost(); String rack = nodeReports.get(0).getRackName(); String[] nodes = new String[] {node}; String[] racks = new String[] {rack}; for (int i = 0; i < num; ++i) { rmClient.addContainerRequest(new ContainerRequest(capability, nodes, racks, priority)); } int containersRequestedAny = rmClient.remoteRequestsTable.get(priority) .get(ResourceRequest.ANY).get(capability).remoteRequest .getNumContainers(); // RM should allocate container within 2 calls to allocate() int allocatedContainerCount = 0; int iterationsLeft = 2; Set<Container> containers = new TreeSet<Container>(); while (allocatedContainerCount < containersRequestedAny && iterationsLeft > 0) { AllocateResponse allocResponse = rmClient.allocate(0.1f); allocatedContainerCount += allocResponse.getAllocatedContainers().size(); for(Container container : allocResponse.getAllocatedContainers()) { containers.add(container); } if (!allocResponse.getNMTokens().isEmpty()) { for (NMToken token : allocResponse.getNMTokens()) { rmClient.getNMTokenCache().setToken(token.getNodeId().toString(), token.getToken()); } } if(allocatedContainerCount < containersRequestedAny) { // sleep to let NM's heartbeat to RM and trigger allocations sleep(1000); } --iterationsLeft; } return containers; } private void testContainerManagement(NMClientImpl nmClient, Set<Container> containers) throws YarnException, IOException { int size = containers.size(); int i = 0; for (Container container : containers) { // getContainerStatus shouldn't be called before startContainer, // otherwise, NodeManager cannot find the container try { nmClient.getContainerStatus(container.getId(), container.getNodeId()); fail("Exception is expected"); } catch (YarnException e) { assertTrue("The thrown exception is not expected", e.getMessage().contains("is not handled by this NodeManager")); } // stopContainer shouldn't be called before startContainer, // otherwise, an exception will be thrown try { nmClient.stopContainer(container.getId(), container.getNodeId()); fail("Exception is expected"); } catch (YarnException e) { if (!e.getMessage() .contains("is not handled by this NodeManager")) { throw (AssertionError) (new AssertionError("Exception is not expected: " + e).initCause( e)); } } Credentials ts = new Credentials(); DataOutputBuffer dob = new DataOutputBuffer(); ts.writeTokenStorageToStream(dob); ByteBuffer securityTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength()); ContainerLaunchContext clc = Records.newRecord(ContainerLaunchContext.class); clc.setTokens(securityTokens); try { nmClient.startContainer(container, clc); } catch (YarnException e) { throw (AssertionError) (new AssertionError("Exception is not expected: " + e).initCause(e)); } // leave one container unclosed if (++i < size) { // NodeManager may still need some time to make the container started testGetContainerStatus(container, i, ContainerState.RUNNING, "", Arrays.asList(new Integer[] {-1000})); try { nmClient.stopContainer(container.getId(), container.getNodeId()); } catch (YarnException e) { throw (AssertionError) (new AssertionError("Exception is not expected: " + e) .initCause(e)); } // getContainerStatus can be called after stopContainer try { // O is possible if CLEANUP_CONTAINER is executed too late // -105 is possible if the container is not terminated but killed testGetContainerStatus(container, i, ContainerState.COMPLETE, "Container killed by the ApplicationMaster.", Arrays.asList( new Integer[] {ContainerExitStatus.KILLED_BY_APPMASTER, ContainerExitStatus.SUCCESS})); } catch (YarnException e) { // The exception is possible because, after the container is stopped, // it may be removed from NM's context. if (!e.getMessage() .contains("was recently stopped on node manager")) { throw (AssertionError) (new AssertionError("Exception is not expected: " + e).initCause( e)); } } } } } private void sleep(int sleepTime) { try { Thread.sleep(sleepTime); } catch (InterruptedException e) { e.printStackTrace(); } } private void testGetContainerStatus(Container container, int index, ContainerState state, String diagnostics, List<Integer> exitStatuses) throws YarnException, IOException { while (true) { try { ContainerStatus status = nmClient.getContainerStatus( container.getId(), container.getNodeId()); // NodeManager may still need some time to get the stable // container status if (status.getState() == state) { assertEquals(container.getId(), status.getContainerId()); assertTrue("" + index + ": " + status.getDiagnostics(), status.getDiagnostics().contains(diagnostics)); assertTrue("Exit Statuses are supposed to be in: " + exitStatuses + ", but the actual exit status code is: " + status.getExitStatus(), exitStatuses.contains(status.getExitStatus())); break; } Thread.sleep(100); } catch (InterruptedException e) { e.printStackTrace(); } } } }
15,400
37.406484
85
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestSharedCacheClientImpl.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.client.api.impl; import static org.junit.Assert.assertEquals; import static org.mockito.Matchers.isA; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import java.io.DataOutputStream; import java.io.FileNotFoundException; import java.io.IOException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.yarn.api.ClientSCMProtocol; import org.apache.hadoop.yarn.api.protocolrecords.ReleaseSharedCacheResourceRequest; import org.apache.hadoop.yarn.api.protocolrecords.UseSharedCacheResourceRequest; import org.apache.hadoop.yarn.api.protocolrecords.UseSharedCacheResourceResponse; import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.UseSharedCacheResourceResponsePBImpl; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.exceptions.YarnException; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; public class TestSharedCacheClientImpl { private static final Log LOG = LogFactory .getLog(TestSharedCacheClientImpl.class); public static SharedCacheClientImpl client; public static ClientSCMProtocol cProtocol; private static Path TEST_ROOT_DIR; private static FileSystem localFs; private static String input = "This is a test file."; private static String inputChecksumSHA256 = "f29bc64a9d3732b4b9035125fdb3285f5b6455778edca72414671e0ca3b2e0de"; @BeforeClass public static void beforeClass() throws IOException { localFs = FileSystem.getLocal(new Configuration()); TEST_ROOT_DIR = new Path("target", TestSharedCacheClientImpl.class.getName() + "-tmpDir").makeQualified(localFs.getUri(), localFs.getWorkingDirectory()); } @AfterClass public static void afterClass() { try { if (localFs != null) { localFs.close(); } } catch (IOException ioe) { LOG.info("IO exception in closing file system)"); ioe.printStackTrace(); } } @Before public void setup() { cProtocol = mock(ClientSCMProtocol.class); client = new SharedCacheClientImpl() { @Override protected ClientSCMProtocol createClientProxy() { return cProtocol; } @Override protected void stopClientProxy() { // do nothing because it is mocked } }; client.init(new Configuration()); client.start(); } @After public void cleanup() { if (client != null) { client.stop(); client = null; } } @Test public void testUse() throws Exception { Path file = new Path("viewfs://test/path"); UseSharedCacheResourceResponse response = new UseSharedCacheResourceResponsePBImpl(); response.setPath(file.toString()); when(cProtocol.use(isA(UseSharedCacheResourceRequest.class))).thenReturn( response); Path newPath = client.use(mock(ApplicationId.class), "key"); assertEquals(file, newPath); } @Test(expected = YarnException.class) public void testUseError() throws Exception { String message = "Mock IOExcepiton!"; when(cProtocol.use(isA(UseSharedCacheResourceRequest.class))).thenThrow( new IOException(message)); client.use(mock(ApplicationId.class), "key"); } @Test public void testRelease() throws Exception { // Release does not care about the return value because it is empty when(cProtocol.release(isA(ReleaseSharedCacheResourceRequest.class))) .thenReturn(null); client.release(mock(ApplicationId.class), "key"); } @Test(expected = YarnException.class) public void testReleaseError() throws Exception { String message = "Mock IOExcepiton!"; when(cProtocol.release(isA(ReleaseSharedCacheResourceRequest.class))) .thenThrow(new IOException(message)); client.release(mock(ApplicationId.class), "key"); } @Test public void testChecksum() throws Exception { String filename = "test1.txt"; Path file = makeFile(filename); assertEquals(inputChecksumSHA256, client.getFileChecksum(file)); } @Test(expected = FileNotFoundException.class) public void testNonexistantFileChecksum() throws Exception { Path file = new Path(TEST_ROOT_DIR, "non-existant-file"); client.getFileChecksum(file); } private Path makeFile(String filename) throws Exception { Path file = new Path(TEST_ROOT_DIR, filename); DataOutputStream out = null; try { out = localFs.create(file); out.write(input.getBytes("UTF-8")); } finally { if(out != null) { out.close(); } } return file; } }
5,615
31.842105
95
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/AMRMTokenIdentifierForTest.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.client.api.impl; import java.io.DataInput; import java.io.DataInputStream; import java.io.DataOutput; import java.io.IOException; import org.apache.commons.io.IOUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationAttemptIdPBImpl; import org.apache.hadoop.yarn.api.records.impl.pb.NodeIdPBImpl; import org.apache.hadoop.yarn.proto.YarnSecurityTestAMRMTokenProtos.AMRMTokenIdentifierForTestProto; import org.apache.hadoop.yarn.security.AMRMTokenIdentifier; import com.google.protobuf.TextFormat; public class AMRMTokenIdentifierForTest extends AMRMTokenIdentifier { private static Log LOG = LogFactory.getLog(AMRMTokenIdentifierForTest.class); public static final Text KIND = new Text("YARN_AM_RM_TOKEN"); private AMRMTokenIdentifierForTestProto proto; private AMRMTokenIdentifierForTestProto.Builder builder; public AMRMTokenIdentifierForTest(){ builder = AMRMTokenIdentifierForTestProto.newBuilder(); } public AMRMTokenIdentifierForTest(AMRMTokenIdentifierForTestProto proto) { this.proto = proto; } public AMRMTokenIdentifierForTest(AMRMTokenIdentifier tokenIdentifier, String message) { builder = AMRMTokenIdentifierForTestProto.newBuilder(); builder.setAppAttemptId(tokenIdentifier.getProto().getAppAttemptId()); builder.setKeyId(tokenIdentifier.getKeyId()); builder.setMessage(message); proto = builder.build(); builder = null; } @Override public void write(DataOutput out) throws IOException { out.write(proto.toByteArray()); } @Override public void readFields(DataInput in) throws IOException { DataInputStream dis = (DataInputStream)in; byte[] buffer = IOUtils.toByteArray(dis); proto = AMRMTokenIdentifierForTestProto.parseFrom(buffer); } @Override public Text getKind() { return KIND; } public String getMessage() { return proto.getMessage(); } public void setMessage(String message) { builder.setMessage(message); } public void build() { proto = builder.build(); builder = null; } public ApplicationAttemptId getApplicationAttemptId() { return new ApplicationAttemptIdPBImpl(proto.getAppAttemptId()); } public int getKeyId() { return proto.getKeyId(); } public AMRMTokenIdentifierForTestProto getNewProto(){ return this.proto; } @Override public int hashCode() { return this.proto.hashCode(); } @Override public boolean equals(Object other) { if (other == null) return false; if (other.getClass().isAssignableFrom(this.getClass())) { return this.getNewProto().equals(this.getClass().cast(other).getNewProto()); } return false; } @Override public String toString() { return TextFormat.shortDebugString(this.proto); } }
3,911
28.862595
100
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientOnRMRestart.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.client.api.impl; import java.io.IOException; import java.security.PrivilegedAction; import java.util.ArrayList; import java.util.Arrays; import java.util.Iterator; import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.SecretManager.InvalidToken; import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerState; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceRequest; import org.apache.hadoop.yarn.client.api.AMRMClient; import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.Dispatcher; import org.apache.hadoop.yarn.event.DrainDispatcher; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.ipc.YarnRPC; import org.apache.hadoop.yarn.security.AMRMTokenIdentifier; import org.apache.hadoop.yarn.server.api.protocolrecords.NMContainerStatus; import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse; import org.apache.hadoop.yarn.server.api.records.NodeAction; import org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService; import org.apache.hadoop.yarn.server.resourcemanager.MockNM; import org.apache.hadoop.yarn.server.resourcemanager.MockRM; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.recovery.MemoryRMStateStore; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Allocation; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler; import org.apache.hadoop.yarn.server.resourcemanager.security.AMRMTokenSecretManager; import org.apache.hadoop.yarn.util.Records; import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Test; public class TestAMRMClientOnRMRestart { static Configuration conf = null; static final int rolling_interval_sec = 13; static final long am_expire_ms = 4000; @BeforeClass public static void setup() throws Exception { conf = new Configuration(); conf.set(YarnConfiguration.RECOVERY_ENABLED, "true"); conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName()); conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS); conf.setBoolean(YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_ENABLED, true); conf.setLong(YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_SCHEDULING_WAIT_MS, 0); } // Test does major 6 steps verification. // Step-1 : AMRMClient send allocate request for 2 container requests // Step-2 : 2 containers are allocated by RM. // Step-3 : AM Send 1 containerRequest(cRequest3) and 1 releaseRequests to // RM // Step-4 : On RM restart, AM(does not know RM is restarted) sends additional // containerRequest(cRequest4) and blacklisted nodes. // Intern RM send resync command // Step-5 : Allocater after resync command & new containerRequest(cRequest5) // Step-6 : RM allocates containers i.e cRequest3,cRequest4 and cRequest5 @Test(timeout = 60000) public void testAMRMClientResendsRequestsOnRMRestart() throws Exception { UserGroupInformation.setLoginUser(null); MemoryRMStateStore memStore = new MemoryRMStateStore(); memStore.init(conf); // Phase-1 Start 1st RM MyResourceManager rm1 = new MyResourceManager(conf, memStore); rm1.start(); DrainDispatcher dispatcher = (DrainDispatcher) rm1.getRMContext().getDispatcher(); // Submit the application RMApp app = rm1.submitApp(1024); dispatcher.await(); MockNM nm1 = new MockNM("h1:1234", 15120, rm1.getResourceTrackerService()); nm1.registerNode(); nm1.nodeHeartbeat(true); // Node heartbeat dispatcher.await(); ApplicationAttemptId appAttemptId = app.getCurrentAppAttempt().getAppAttemptId(); rm1.sendAMLaunched(appAttemptId); dispatcher.await(); org.apache.hadoop.security.token.Token<AMRMTokenIdentifier> token = rm1.getRMContext().getRMApps().get(appAttemptId.getApplicationId()) .getRMAppAttempt(appAttemptId).getAMRMToken(); UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); ugi.addTokenIdentifier(token.decodeIdentifier()); // Step-1 : AMRMClient send allocate request for 2 ContainerRequest // cRequest1 = h1 and cRequest2 = h1,h2 // blacklisted nodes = h2 AMRMClient<ContainerRequest> amClient = new MyAMRMClientImpl(rm1); amClient.init(conf); amClient.start(); amClient.registerApplicationMaster("Host", 10000, ""); ContainerRequest cRequest1 = createReq(1, 1024, new String[] { "h1" }); amClient.addContainerRequest(cRequest1); ContainerRequest cRequest2 = createReq(1, 1024, new String[] { "h1", "h2" }); amClient.addContainerRequest(cRequest2); List<String> blacklistAdditions = new ArrayList<String>(); List<String> blacklistRemoval = new ArrayList<String>(); blacklistAdditions.add("h2"); blacklistRemoval.add("h10"); amClient.updateBlacklist(blacklistAdditions, blacklistRemoval); blacklistAdditions.remove("h2");// remove from local list AllocateResponse allocateResponse = amClient.allocate(0.1f); dispatcher.await(); Assert.assertEquals("No of assignments must be 0", 0, allocateResponse .getAllocatedContainers().size()); // Why 4 ask, why not 3 ask even h2 is blacklisted? // On blacklisting host,applicationmaster has to remove ask request from // remoterequest table.Here,test does not remove explicitely assertAsksAndReleases(4, 0, rm1); assertBlacklistAdditionsAndRemovals(1, 1, rm1); // Step-2 : NM heart beat is sent. // On 2nd AM allocate request, RM allocates 2 containers to AM nm1.nodeHeartbeat(true); // Node heartbeat dispatcher.await(); allocateResponse = amClient.allocate(0.2f); dispatcher.await(); // 2 containers are allocated i.e for cRequest1 and cRequest2. Assert.assertEquals("No of assignments must be 0", 2, allocateResponse .getAllocatedContainers().size()); assertAsksAndReleases(0, 0, rm1); assertBlacklistAdditionsAndRemovals(0, 0, rm1); List<Container> allocatedContainers = allocateResponse.getAllocatedContainers(); // removed allocated container requests amClient.removeContainerRequest(cRequest1); amClient.removeContainerRequest(cRequest2); allocateResponse = amClient.allocate(0.2f); dispatcher.await(); Assert.assertEquals("No of assignments must be 0", 0, allocateResponse .getAllocatedContainers().size()); assertAsksAndReleases(4, 0, rm1); assertBlacklistAdditionsAndRemovals(0, 0, rm1); // Step-3 : Send 1 containerRequest and 1 releaseRequests to RM ContainerRequest cRequest3 = createReq(1, 1024, new String[] { "h1" }); amClient.addContainerRequest(cRequest3); int pendingRelease = 0; Iterator<Container> it = allocatedContainers.iterator(); while (it.hasNext()) { amClient.releaseAssignedContainer(it.next().getId()); pendingRelease++; it.remove(); break;// remove one container } allocateResponse = amClient.allocate(0.3f); dispatcher.await(); Assert.assertEquals("No of assignments must be 0", 0, allocateResponse .getAllocatedContainers().size()); assertAsksAndReleases(3, pendingRelease, rm1); assertBlacklistAdditionsAndRemovals(0, 0, rm1); int completedContainer = allocateResponse.getCompletedContainersStatuses().size(); pendingRelease -= completedContainer; // Phase-2 start 2nd RM is up MyResourceManager rm2 = new MyResourceManager(conf, memStore); rm2.start(); nm1.setResourceTrackerService(rm2.getResourceTrackerService()); ((MyAMRMClientImpl) amClient).updateRMProxy(rm2); dispatcher = (DrainDispatcher) rm2.getRMContext().getDispatcher(); // NM should be rebooted on heartbeat, even first heartbeat for nm2 NodeHeartbeatResponse hbResponse = nm1.nodeHeartbeat(true); Assert.assertEquals(NodeAction.RESYNC, hbResponse.getNodeAction()); // new NM to represent NM re-register nm1 = new MockNM("h1:1234", 10240, rm2.getResourceTrackerService()); nm1.registerNode(); nm1.nodeHeartbeat(true); dispatcher.await(); blacklistAdditions.add("h3"); amClient.updateBlacklist(blacklistAdditions, null); blacklistAdditions.remove("h3"); it = allocatedContainers.iterator(); while (it.hasNext()) { amClient.releaseAssignedContainer(it.next().getId()); pendingRelease++; it.remove(); } ContainerRequest cRequest4 = createReq(1, 1024, new String[] { "h1", "h2" }); amClient.addContainerRequest(cRequest4); // Step-4 : On RM restart, AM(does not know RM is restarted) sends // additional // containerRequest and blacklisted nodes. // Intern RM send resync command,AMRMClient resend allocate request allocateResponse = amClient.allocate(0.3f); dispatcher.await(); completedContainer = allocateResponse.getCompletedContainersStatuses().size(); pendingRelease -= completedContainer; assertAsksAndReleases(4, pendingRelease, rm2); assertBlacklistAdditionsAndRemovals(2, 0, rm2); ContainerRequest cRequest5 = createReq(1, 1024, new String[] { "h1", "h2", "h3" }); amClient.addContainerRequest(cRequest5); // Step-5 : Allocater after resync command allocateResponse = amClient.allocate(0.5f); dispatcher.await(); Assert.assertEquals("No of assignments must be 0", 0, allocateResponse .getAllocatedContainers().size()); assertAsksAndReleases(5, 0, rm2); assertBlacklistAdditionsAndRemovals(0, 0, rm2); int noAssignedContainer = 0; int count = 5; while (count-- > 0) { nm1.nodeHeartbeat(true); dispatcher.await(); allocateResponse = amClient.allocate(0.5f); dispatcher.await(); noAssignedContainer += allocateResponse.getAllocatedContainers().size(); if (noAssignedContainer == 3) { break; } Thread.sleep(1000); } // Step-6 : RM allocates containers i.e cRequest3,cRequest4 and cRequest5 Assert.assertEquals("Number of container should be 3", 3, noAssignedContainer); amClient.stop(); rm1.stop(); rm2.stop(); } // Test verify for // 1. AM try to unregister without registering // 2. AM register to RM, and try to unregister immediately after RM restart @Test(timeout = 60000) public void testAMRMClientForUnregisterAMOnRMRestart() throws Exception { MemoryRMStateStore memStore = new MemoryRMStateStore(); memStore.init(conf); // Phase-1 Start 1st RM MyResourceManager rm1 = new MyResourceManager(conf, memStore); rm1.start(); DrainDispatcher dispatcher = (DrainDispatcher) rm1.getRMContext().getDispatcher(); // Submit the application RMApp app = rm1.submitApp(1024); dispatcher.await(); MockNM nm1 = new MockNM("h1:1234", 15120, rm1.getResourceTrackerService()); nm1.registerNode(); nm1.nodeHeartbeat(true); // Node heartbeat dispatcher.await(); ApplicationAttemptId appAttemptId = app.getCurrentAppAttempt().getAppAttemptId(); rm1.sendAMLaunched(appAttemptId); dispatcher.await(); org.apache.hadoop.security.token.Token<AMRMTokenIdentifier> token = rm1.getRMContext().getRMApps().get(appAttemptId.getApplicationId()) .getRMAppAttempt(appAttemptId).getAMRMToken(); UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); ugi.addTokenIdentifier(token.decodeIdentifier()); AMRMClient<ContainerRequest> amClient = new MyAMRMClientImpl(rm1); amClient.init(conf); amClient.start(); amClient.registerApplicationMaster("h1", 10000, ""); amClient.allocate(0.1f); // Phase-2 start 2nd RM is up MyResourceManager rm2 = new MyResourceManager(conf, memStore); rm2.start(); nm1.setResourceTrackerService(rm2.getResourceTrackerService()); ((MyAMRMClientImpl) amClient).updateRMProxy(rm2); dispatcher = (DrainDispatcher) rm2.getRMContext().getDispatcher(); // NM should be rebooted on heartbeat, even first heartbeat for nm2 NodeHeartbeatResponse hbResponse = nm1.nodeHeartbeat(true); Assert.assertEquals(NodeAction.RESYNC, hbResponse.getNodeAction()); // new NM to represent NM re-register nm1 = new MockNM("h1:1234", 10240, rm2.getResourceTrackerService()); ContainerId containerId = ContainerId.newContainerId(appAttemptId, 1); NMContainerStatus containerReport = NMContainerStatus.newInstance(containerId, ContainerState.RUNNING, Resource.newInstance(1024, 1), "recover container", 0, Priority.newInstance(0), 0); nm1.registerNode(Arrays.asList(containerReport), null); nm1.nodeHeartbeat(true); dispatcher.await(); amClient.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED, null, null); rm2.waitForState(appAttemptId, RMAppAttemptState.FINISHING); nm1.nodeHeartbeat(appAttemptId, 1, ContainerState.COMPLETE); rm2.waitForState(appAttemptId, RMAppAttemptState.FINISHED); rm2.waitForState(app.getApplicationId(), RMAppState.FINISHED); amClient.stop(); rm1.stop(); rm2.stop(); } // Test verify for AM issued with rolled-over AMRMToken // is still able to communicate with restarted RM. @Test(timeout = 30000) public void testAMRMClientOnAMRMTokenRollOverOnRMRestart() throws Exception { conf.setLong( YarnConfiguration.RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS, rolling_interval_sec); conf.setLong(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS, am_expire_ms); MemoryRMStateStore memStore = new MemoryRMStateStore(); memStore.init(conf); // start first RM MyResourceManager2 rm1 = new MyResourceManager2(conf, memStore); rm1.start(); DrainDispatcher dispatcher = (DrainDispatcher) rm1.getRMContext().getDispatcher(); Long startTime = System.currentTimeMillis(); // Submit the application RMApp app = rm1.submitApp(1024); dispatcher.await(); MockNM nm1 = new MockNM("h1:1234", 15120, rm1.getResourceTrackerService()); nm1.registerNode(); nm1.nodeHeartbeat(true); // Node heartbeat dispatcher.await(); ApplicationAttemptId appAttemptId = app.getCurrentAppAttempt().getAppAttemptId(); rm1.sendAMLaunched(appAttemptId); dispatcher.await(); AMRMTokenSecretManager amrmTokenSecretManagerForRM1 = rm1.getRMContext().getAMRMTokenSecretManager(); org.apache.hadoop.security.token.Token<AMRMTokenIdentifier> token = amrmTokenSecretManagerForRM1.createAndGetAMRMToken(appAttemptId); UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); ugi.addTokenIdentifier(token.decodeIdentifier()); AMRMClient<ContainerRequest> amClient = new MyAMRMClientImpl(rm1); amClient.init(conf); amClient.start(); amClient.registerApplicationMaster("h1", 10000, ""); amClient.allocate(0.1f); // Wait for enough time and make sure the roll_over happens // At mean time, the old AMRMToken should continue to work while (System.currentTimeMillis() - startTime < rolling_interval_sec * 1000) { amClient.allocate(0.1f); try { Thread.sleep(1000); } catch (InterruptedException e) { // DO NOTHING } } Assert.assertTrue(amrmTokenSecretManagerForRM1.getMasterKey() .getMasterKey().getKeyId() != token.decodeIdentifier().getKeyId()); amClient.allocate(0.1f); // active the nextMasterKey, and replace the currentMasterKey org.apache.hadoop.security.token.Token<AMRMTokenIdentifier> newToken = amrmTokenSecretManagerForRM1.createAndGetAMRMToken(appAttemptId); int waitCount = 0; while (waitCount++ <= 50) { if (amrmTokenSecretManagerForRM1.getCurrnetMasterKeyData().getMasterKey() .getKeyId() != token.decodeIdentifier().getKeyId()) { break; } try { amClient.allocate(0.1f); } catch (Exception ex) { break; } Thread.sleep(500); } Assert .assertTrue(amrmTokenSecretManagerForRM1.getNextMasterKeyData() == null); Assert.assertTrue(amrmTokenSecretManagerForRM1.getCurrnetMasterKeyData() .getMasterKey().getKeyId() == newToken.decodeIdentifier().getKeyId()); // start 2nd RM conf.set(YarnConfiguration.RM_SCHEDULER_ADDRESS, "0.0.0.0:9030"); final MyResourceManager2 rm2 = new MyResourceManager2(conf, memStore); rm2.start(); nm1.setResourceTrackerService(rm2.getResourceTrackerService()); ((MyAMRMClientImpl) amClient).updateRMProxy(rm2); dispatcher = (DrainDispatcher) rm2.getRMContext().getDispatcher(); AMRMTokenSecretManager amrmTokenSecretManagerForRM2 = rm2.getRMContext().getAMRMTokenSecretManager(); Assert.assertTrue(amrmTokenSecretManagerForRM2.getCurrnetMasterKeyData() .getMasterKey().getKeyId() == newToken.decodeIdentifier().getKeyId()); Assert .assertTrue(amrmTokenSecretManagerForRM2.getNextMasterKeyData() == null); try { UserGroupInformation testUser = UserGroupInformation.createRemoteUser("testUser"); SecurityUtil.setTokenService(token, rm2.getApplicationMasterService() .getBindAddress()); testUser.addToken(token); testUser.doAs(new PrivilegedAction<ApplicationMasterProtocol>() { @Override public ApplicationMasterProtocol run() { return (ApplicationMasterProtocol) YarnRPC.create(conf).getProxy( ApplicationMasterProtocol.class, rm2.getApplicationMasterService().getBindAddress(), conf); } }).allocate(Records.newRecord(AllocateRequest.class)); Assert.fail("The old Token should not work"); } catch (Exception ex) { Assert.assertTrue(ex instanceof InvalidToken); Assert.assertTrue(ex.getMessage().contains( "Invalid AMRMToken from " + token.decodeIdentifier().getApplicationAttemptId())); } // make sure the recovered AMRMToken works for new RM amClient.allocate(0.1f); amClient.unregisterApplicationMaster(FinalApplicationStatus.SUCCEEDED, null, null); amClient.stop(); rm1.stop(); rm2.stop(); } private static class MyFifoScheduler extends FifoScheduler { public MyFifoScheduler(RMContext rmContext) { super(); try { Configuration conf = new Configuration(); reinitialize(conf, rmContext); } catch (IOException ie) { assert (false); } } List<ResourceRequest> lastAsk = null; List<ContainerId> lastRelease = null; List<String> lastBlacklistAdditions; List<String> lastBlacklistRemovals; // override this to copy the objects otherwise FifoScheduler updates the // numContainers in same objects as kept by RMContainerAllocator @Override public synchronized Allocation allocate( ApplicationAttemptId applicationAttemptId, List<ResourceRequest> ask, List<ContainerId> release, List<String> blacklistAdditions, List<String> blacklistRemovals) { List<ResourceRequest> askCopy = new ArrayList<ResourceRequest>(); for (ResourceRequest req : ask) { ResourceRequest reqCopy = ResourceRequest.newInstance(req.getPriority(), req.getResourceName(), req.getCapability(), req.getNumContainers(), req.getRelaxLocality()); askCopy.add(reqCopy); } lastAsk = ask; lastRelease = release; lastBlacklistAdditions = blacklistAdditions; lastBlacklistRemovals = blacklistRemovals; return super.allocate(applicationAttemptId, askCopy, release, blacklistAdditions, blacklistRemovals); } } private static class MyResourceManager extends MockRM { private static long fakeClusterTimeStamp = System.currentTimeMillis(); public MyResourceManager(Configuration conf, RMStateStore store) { super(conf, store); } @Override public void serviceStart() throws Exception { super.serviceStart(); // Ensure that the application attempt IDs for all the tests are the same // The application attempt IDs will be used as the login user names MyResourceManager.setClusterTimeStamp(fakeClusterTimeStamp); } @Override protected Dispatcher createDispatcher() { return new DrainDispatcher(); } @Override protected EventHandler<SchedulerEvent> createSchedulerEventDispatcher() { // Dispatch inline for test sanity return new EventHandler<SchedulerEvent>() { @Override public void handle(SchedulerEvent event) { scheduler.handle(event); } }; } @Override protected ResourceScheduler createScheduler() { return new MyFifoScheduler(this.getRMContext()); } MyFifoScheduler getMyFifoScheduler() { return (MyFifoScheduler) scheduler; } } private static class MyResourceManager2 extends MyResourceManager { public MyResourceManager2(Configuration conf, RMStateStore store) { super(conf, store); } @Override protected ApplicationMasterService createApplicationMasterService() { return new ApplicationMasterService(getRMContext(), scheduler); } } private static class MyAMRMClientImpl extends AMRMClientImpl<ContainerRequest> { private MyResourceManager rm; public MyAMRMClientImpl(MyResourceManager rm) { this.rm = rm; } @Override protected void serviceInit(Configuration conf) throws Exception { super.serviceInit(conf); } @Override protected void serviceStart() throws Exception { this.rmClient = this.rm.getApplicationMasterService(); } @Override protected void serviceStop() throws Exception { rmClient = null; super.serviceStop(); } public void updateRMProxy(MyResourceManager rm) { rmClient = rm.getApplicationMasterService(); } } private static void assertBlacklistAdditionsAndRemovals( int expectedAdditions, int expectedRemovals, MyResourceManager rm) { Assert.assertEquals(expectedAdditions, rm.getMyFifoScheduler().lastBlacklistAdditions.size()); Assert.assertEquals(expectedRemovals, rm.getMyFifoScheduler().lastBlacklistRemovals.size()); } private static void assertAsksAndReleases(int expectedAsk, int expectedRelease, MyResourceManager rm) { Assert.assertEquals(expectedAsk, rm.getMyFifoScheduler().lastAsk.size()); Assert.assertEquals(expectedRelease, rm.getMyFifoScheduler().lastRelease.size()); } private ContainerRequest createReq(int priority, int memory, String[] hosts) { Resource capability = Resource.newInstance(memory, 1); Priority priorityOfContainer = Priority.newInstance(priority); return new ContainerRequest(capability, hosts, new String[] { NetworkTopology.DEFAULT_RACK }, priorityOfContainer); } }
25,038
37.403374
86
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.client.api.impl; import static org.mockito.Matchers.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import java.io.IOException; import java.nio.ByteBuffer; import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.EnumSet; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; import org.apache.commons.io.IOUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.DataInputByteBuffer; import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.yarn.api.ApplicationClientProtocol; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetContainersRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetContainersResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetLabelsToNodesRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetLabelsToNodesResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetNodesToLabelsRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetNodesToLabelsResponse; import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest; import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationResponse; import org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteRequest; import org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteResponse; import org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionRequest; import org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionResponse; import org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateRequest; import org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateResponse; import org.apache.hadoop.yarn.api.records.ApplicationAccessType; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationReport; import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; import org.apache.hadoop.yarn.api.records.ContainerReport; import org.apache.hadoop.yarn.api.records.ContainerState; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.NodeLabel; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.ReservationDefinition; import org.apache.hadoop.yarn.api.records.ReservationId; import org.apache.hadoop.yarn.api.records.ReservationRequest; import org.apache.hadoop.yarn.api.records.ReservationRequestInterpreter; import org.apache.hadoop.yarn.api.records.ReservationRequests; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState; import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.client.api.AHSClient; import org.apache.hadoop.yarn.client.api.TimelineClient; import org.apache.hadoop.yarn.client.api.YarnClient; import org.apache.hadoop.yarn.client.api.YarnClientApplication; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.ApplicationIdNotProvidedException; import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException; import org.apache.hadoop.yarn.exceptions.ContainerNotFoundException; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier; import org.apache.hadoop.yarn.server.MiniYARNCluster; import org.apache.hadoop.yarn.server.resourcemanager.MockRM; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSystemTestUtil; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration; import org.apache.hadoop.yarn.util.Clock; import org.apache.hadoop.yarn.util.Records; import org.apache.hadoop.yarn.util.UTCClock; import org.apache.log4j.Level; import org.apache.log4j.LogManager; import org.apache.log4j.Logger; import org.junit.Assert; import org.junit.Test; public class TestYarnClient { @Test public void test() { // More to come later. } @Test public void testClientStop() { Configuration conf = new Configuration(); ResourceManager rm = new ResourceManager(); rm.init(conf); rm.start(); YarnClient client = YarnClient.createYarnClient(); client.init(conf); client.start(); client.stop(); rm.stop(); } @SuppressWarnings("deprecation") @Test (timeout = 30000) public void testSubmitApplication() { Configuration conf = new Configuration(); conf.setLong(YarnConfiguration.YARN_CLIENT_APP_SUBMISSION_POLL_INTERVAL_MS, 100); // speed up tests final YarnClient client = new MockYarnClient(); client.init(conf); client.start(); YarnApplicationState[] exitStates = new YarnApplicationState[] { YarnApplicationState.ACCEPTED, YarnApplicationState.RUNNING, YarnApplicationState.FINISHED }; // Submit an application without ApplicationId provided // Should get ApplicationIdNotProvidedException ApplicationSubmissionContext contextWithoutApplicationId = mock(ApplicationSubmissionContext.class); try { client.submitApplication(contextWithoutApplicationId); Assert.fail("Should throw the ApplicationIdNotProvidedException"); } catch (YarnException e) { Assert.assertTrue(e instanceof ApplicationIdNotProvidedException); Assert.assertTrue(e.getMessage().contains( "ApplicationId is not provided in ApplicationSubmissionContext")); } catch (IOException e) { Assert.fail("IOException is not expected."); } // Submit the application with applicationId provided // Should be successful for (int i = 0; i < exitStates.length; ++i) { ApplicationSubmissionContext context = mock(ApplicationSubmissionContext.class); ApplicationId applicationId = ApplicationId.newInstance( System.currentTimeMillis(), i); when(context.getApplicationId()).thenReturn(applicationId); ((MockYarnClient) client).setYarnApplicationState(exitStates[i]); try { client.submitApplication(context); } catch (YarnException e) { Assert.fail("Exception is not expected."); } catch (IOException e) { Assert.fail("Exception is not expected."); } verify(((MockYarnClient) client).mockReport,times(4 * i + 4)) .getYarnApplicationState(); } client.stop(); } @Test (timeout = 30000) public void testSubmitIncorrectQueue() throws IOException { MiniYARNCluster cluster = new MiniYARNCluster("testMRAMTokens", 1, 1, 1); YarnClient rmClient = null; try { cluster.init(new YarnConfiguration()); cluster.start(); final Configuration yarnConf = cluster.getConfig(); rmClient = YarnClient.createYarnClient(); rmClient.init(yarnConf); rmClient.start(); YarnClientApplication newApp = rmClient.createApplication(); ApplicationId appId = newApp.getNewApplicationResponse().getApplicationId(); // Create launch context for app master ApplicationSubmissionContext appContext = Records.newRecord(ApplicationSubmissionContext.class); // set the application id appContext.setApplicationId(appId); // set the application name appContext.setApplicationName("test"); // Set the queue to which this application is to be submitted in the RM appContext.setQueue("nonexist"); // Set up the container launch context for the application master ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class); appContext.setAMContainerSpec(amContainer); appContext.setResource(Resource.newInstance(1024, 1)); // appContext.setUnmanagedAM(unmanaged); // Submit the application to the applications manager rmClient.submitApplication(appContext); Assert.fail("Job submission should have thrown an exception"); } catch (YarnException e) { Assert.assertTrue(e.getMessage().contains("Failed to submit")); } finally { if (rmClient != null) { rmClient.stop(); } cluster.stop(); } } @Test public void testKillApplication() throws Exception { MockRM rm = new MockRM(); rm.start(); RMApp app = rm.submitApp(2000); Configuration conf = new Configuration(); @SuppressWarnings("resource") final YarnClient client = new MockYarnClient(); client.init(conf); client.start(); client.killApplication(app.getApplicationId()); verify(((MockYarnClient) client).getRMClient(), times(2)) .forceKillApplication(any(KillApplicationRequest.class)); } @Test(timeout = 30000) public void testApplicationType() throws Exception { Logger rootLogger = LogManager.getRootLogger(); rootLogger.setLevel(Level.DEBUG); MockRM rm = new MockRM(); rm.start(); RMApp app = rm.submitApp(2000); RMApp app1 = rm.submitApp(200, "name", "user", new HashMap<ApplicationAccessType, String>(), false, "default", -1, null, "MAPREDUCE"); Assert.assertEquals("YARN", app.getApplicationType()); Assert.assertEquals("MAPREDUCE", app1.getApplicationType()); rm.stop(); } @Test(timeout = 30000) public void testApplicationTypeLimit() throws Exception { Logger rootLogger = LogManager.getRootLogger(); rootLogger.setLevel(Level.DEBUG); MockRM rm = new MockRM(); rm.start(); RMApp app1 = rm.submitApp(200, "name", "user", new HashMap<ApplicationAccessType, String>(), false, "default", -1, null, "MAPREDUCE-LENGTH-IS-20"); Assert.assertEquals("MAPREDUCE-LENGTH-IS-", app1.getApplicationType()); rm.stop(); } @Test (timeout = 10000) public void testGetApplications() throws YarnException, IOException { Configuration conf = new Configuration(); final YarnClient client = new MockYarnClient(); client.init(conf); client.start(); List<ApplicationReport> expectedReports = ((MockYarnClient)client).getReports(); List<ApplicationReport> reports = client.getApplications(); Assert.assertEquals(reports, expectedReports); Set<String> appTypes = new HashSet<String>(); appTypes.add("YARN"); appTypes.add("NON-YARN"); reports = client.getApplications(appTypes, null); Assert.assertEquals(reports.size(), 2); Assert .assertTrue((reports.get(0).getApplicationType().equals("YARN") && reports .get(1).getApplicationType().equals("NON-YARN")) || (reports.get(1).getApplicationType().equals("YARN") && reports .get(0).getApplicationType().equals("NON-YARN"))); for(ApplicationReport report : reports) { Assert.assertTrue(expectedReports.contains(report)); } EnumSet<YarnApplicationState> appStates = EnumSet.noneOf(YarnApplicationState.class); appStates.add(YarnApplicationState.FINISHED); appStates.add(YarnApplicationState.FAILED); reports = client.getApplications(null, appStates); Assert.assertEquals(reports.size(), 2); Assert .assertTrue((reports.get(0).getApplicationType().equals("NON-YARN") && reports .get(1).getApplicationType().equals("NON-MAPREDUCE")) || (reports.get(1).getApplicationType().equals("NON-YARN") && reports .get(0).getApplicationType().equals("NON-MAPREDUCE"))); for (ApplicationReport report : reports) { Assert.assertTrue(expectedReports.contains(report)); } reports = client.getApplications(appTypes, appStates); Assert.assertEquals(reports.size(), 1); Assert .assertTrue((reports.get(0).getApplicationType().equals("NON-YARN"))); for (ApplicationReport report : reports) { Assert.assertTrue(expectedReports.contains(report)); } client.stop(); } @Test(timeout = 10000) public void testGetApplicationAttempts() throws YarnException, IOException { Configuration conf = new Configuration(); final YarnClient client = new MockYarnClient(); client.init(conf); client.start(); ApplicationId applicationId = ApplicationId.newInstance(1234, 5); List<ApplicationAttemptReport> reports = client .getApplicationAttempts(applicationId); Assert.assertNotNull(reports); Assert.assertEquals(reports.get(0).getApplicationAttemptId(), ApplicationAttemptId.newInstance(applicationId, 1)); Assert.assertEquals(reports.get(1).getApplicationAttemptId(), ApplicationAttemptId.newInstance(applicationId, 2)); client.stop(); } @Test(timeout = 10000) public void testGetApplicationAttempt() throws YarnException, IOException { Configuration conf = new Configuration(); final YarnClient client = new MockYarnClient(); client.init(conf); client.start(); List<ApplicationReport> expectedReports = ((MockYarnClient) client) .getReports(); ApplicationId applicationId = ApplicationId.newInstance(1234, 5); ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance( applicationId, 1); ApplicationAttemptReport report = client .getApplicationAttemptReport(appAttemptId); Assert.assertNotNull(report); Assert.assertEquals(report.getApplicationAttemptId().toString(), expectedReports.get(0).getCurrentApplicationAttemptId().toString()); client.stop(); } @Test(timeout = 10000) public void testGetContainers() throws YarnException, IOException { Configuration conf = new Configuration(); conf.setBoolean(YarnConfiguration.APPLICATION_HISTORY_ENABLED, true); final YarnClient client = new MockYarnClient(); client.init(conf); client.start(); ApplicationId applicationId = ApplicationId.newInstance(1234, 5); ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance( applicationId, 1); List<ContainerReport> reports = client.getContainers(appAttemptId); Assert.assertNotNull(reports); Assert.assertEquals(reports.get(0).getContainerId(), (ContainerId.newContainerId(appAttemptId, 1))); Assert.assertEquals(reports.get(1).getContainerId(), (ContainerId.newContainerId(appAttemptId, 2))); Assert.assertEquals(reports.get(2).getContainerId(), (ContainerId.newContainerId(appAttemptId, 3))); //First2 containers should come from RM with updated state information and // 3rd container is not there in RM and should Assert.assertEquals(ContainerState.RUNNING, (reports.get(0).getContainerState())); Assert.assertEquals(ContainerState.RUNNING, (reports.get(1).getContainerState())); Assert.assertEquals(ContainerState.COMPLETE, (reports.get(2).getContainerState())); client.stop(); } @Test(timeout = 10000) public void testGetContainerReport() throws YarnException, IOException { Configuration conf = new Configuration(); conf.setBoolean(YarnConfiguration.APPLICATION_HISTORY_ENABLED, true); final YarnClient client = new MockYarnClient(); client.init(conf); client.start(); List<ApplicationReport> expectedReports = ((MockYarnClient) client) .getReports(); ApplicationId applicationId = ApplicationId.newInstance(1234, 5); ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance( applicationId, 1); ContainerId containerId = ContainerId.newContainerId(appAttemptId, 1); ContainerReport report = client.getContainerReport(containerId); Assert.assertNotNull(report); Assert.assertEquals(report.getContainerId().toString(), (ContainerId.newContainerId(expectedReports.get(0) .getCurrentApplicationAttemptId(), 1)).toString()); containerId = ContainerId.newContainerId(appAttemptId, 3); report = client.getContainerReport(containerId); Assert.assertNotNull(report); Assert.assertEquals(report.getContainerId().toString(), (ContainerId.newContainerId(expectedReports.get(0) .getCurrentApplicationAttemptId(), 3)).toString()); client.stop(); } @Test (timeout = 10000) public void testGetLabelsToNodes() throws YarnException, IOException { Configuration conf = new Configuration(); final YarnClient client = new MockYarnClient(); client.init(conf); client.start(); // Get labels to nodes mapping Map<NodeLabel, Set<NodeId>> expectedLabelsToNodes = ((MockYarnClient)client).getLabelsToNodesMap(); Map<NodeLabel, Set<NodeId>> labelsToNodes = client.getLabelsToNodes(); Assert.assertEquals(labelsToNodes, expectedLabelsToNodes); Assert.assertEquals(labelsToNodes.size(), 3); // Get labels to nodes for selected labels Set<String> setLabels = new HashSet<String>(Arrays.asList("x", "z")); expectedLabelsToNodes = ((MockYarnClient)client).getLabelsToNodesMap(setLabels); labelsToNodes = client.getLabelsToNodes(setLabels); Assert.assertEquals(labelsToNodes, expectedLabelsToNodes); Assert.assertEquals(labelsToNodes.size(), 2); client.stop(); client.close(); } @Test (timeout = 10000) public void testGetNodesToLabels() throws YarnException, IOException { Configuration conf = new Configuration(); final YarnClient client = new MockYarnClient(); client.init(conf); client.start(); // Get labels to nodes mapping Map<NodeId, Set<NodeLabel>> expectedNodesToLabels = ((MockYarnClient) client) .getNodeToLabelsMap(); Map<NodeId, Set<NodeLabel>> nodesToLabels = client.getNodeToLabels(); Assert.assertEquals(nodesToLabels, expectedNodesToLabels); Assert.assertEquals(nodesToLabels.size(), 1); // Verify exclusivity Set<NodeLabel> labels = nodesToLabels.get(NodeId.newInstance("host", 0)); for (NodeLabel label : labels) { Assert.assertFalse(label.isExclusive()); } client.stop(); client.close(); } private static class MockYarnClient extends YarnClientImpl { private ApplicationReport mockReport; private List<ApplicationReport> reports; private HashMap<ApplicationId, List<ApplicationAttemptReport>> attempts = new HashMap<ApplicationId, List<ApplicationAttemptReport>>(); private HashMap<ApplicationAttemptId, List<ContainerReport>> containers = new HashMap<ApplicationAttemptId, List<ContainerReport>>(); private HashMap<ApplicationAttemptId, List<ContainerReport>> containersFromAHS = new HashMap<ApplicationAttemptId, List<ContainerReport>>(); GetApplicationsResponse mockAppResponse = mock(GetApplicationsResponse.class); GetApplicationAttemptsResponse mockAppAttemptsResponse = mock(GetApplicationAttemptsResponse.class); GetApplicationAttemptReportResponse mockAttemptResponse = mock(GetApplicationAttemptReportResponse.class); GetContainersResponse mockContainersResponse = mock(GetContainersResponse.class); GetContainerReportResponse mockContainerResponse = mock(GetContainerReportResponse.class); GetLabelsToNodesResponse mockLabelsToNodesResponse = mock(GetLabelsToNodesResponse.class); GetNodesToLabelsResponse mockNodeToLabelsResponse = mock(GetNodesToLabelsResponse.class); public MockYarnClient() { super(); reports = createAppReports(); } @Override public void start() { rmClient = mock(ApplicationClientProtocol.class); GetApplicationReportResponse mockResponse = mock(GetApplicationReportResponse.class); mockReport = mock(ApplicationReport.class); try{ when(rmClient.getApplicationReport(any( GetApplicationReportRequest.class))).thenReturn(mockResponse); when(rmClient.getApplications(any(GetApplicationsRequest.class))) .thenReturn(mockAppResponse); // return false for 1st kill request, and true for the 2nd. when(rmClient.forceKillApplication(any( KillApplicationRequest.class))) .thenReturn(KillApplicationResponse.newInstance(false)).thenReturn( KillApplicationResponse.newInstance(true)); when( rmClient .getApplicationAttemptReport(any(GetApplicationAttemptReportRequest.class))) .thenReturn(mockAttemptResponse); when( rmClient .getApplicationAttempts(any(GetApplicationAttemptsRequest.class))) .thenReturn(mockAppAttemptsResponse); when(rmClient.getContainers(any(GetContainersRequest.class))) .thenReturn(mockContainersResponse); when(rmClient.getContainerReport(any(GetContainerReportRequest.class))) .thenReturn(mockContainerResponse); when(rmClient.getLabelsToNodes(any(GetLabelsToNodesRequest.class))) .thenReturn(mockLabelsToNodesResponse); when(rmClient.getNodeToLabels(any(GetNodesToLabelsRequest.class))) .thenReturn(mockNodeToLabelsResponse); historyClient = mock(AHSClient.class); } catch (YarnException e) { Assert.fail("Exception is not expected."); } catch (IOException e) { Assert.fail("Exception is not expected."); } when(mockResponse.getApplicationReport()).thenReturn(mockReport); } public ApplicationClientProtocol getRMClient() { return rmClient; } @Override public List<ApplicationReport> getApplications( Set<String> applicationTypes, EnumSet<YarnApplicationState> applicationStates) throws YarnException, IOException { when(mockAppResponse.getApplicationList()).thenReturn( getApplicationReports(reports, applicationTypes, applicationStates)); return super.getApplications(applicationTypes, applicationStates); } @Override public void stop() { } public void setYarnApplicationState(YarnApplicationState state) { when(mockReport.getYarnApplicationState()).thenReturn( YarnApplicationState.NEW, YarnApplicationState.NEW_SAVING, YarnApplicationState.NEW_SAVING, state); } public List<ApplicationReport> getReports() { return this.reports; } private List<ApplicationReport> createAppReports() { ApplicationId applicationId = ApplicationId.newInstance(1234, 5); ApplicationReport newApplicationReport = ApplicationReport.newInstance( applicationId, ApplicationAttemptId.newInstance(applicationId, 1), "user", "queue", "appname", "host", 124, null, YarnApplicationState.RUNNING, "diagnostics", "url", 0, 0, FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null); List<ApplicationReport> applicationReports = new ArrayList<ApplicationReport>(); applicationReports.add(newApplicationReport); List<ApplicationAttemptReport> appAttempts = new ArrayList<ApplicationAttemptReport>(); ApplicationAttemptReport attempt = ApplicationAttemptReport.newInstance( ApplicationAttemptId.newInstance(applicationId, 1), "host", 124, "url", "oUrl", "diagnostics", YarnApplicationAttemptState.FINISHED, ContainerId.newContainerId( newApplicationReport.getCurrentApplicationAttemptId(), 1), 0, 0); appAttempts.add(attempt); ApplicationAttemptReport attempt1 = ApplicationAttemptReport.newInstance( ApplicationAttemptId.newInstance(applicationId, 2), "host", 124, "url", "oUrl", "diagnostics", YarnApplicationAttemptState.FINISHED, ContainerId.newContainerId( newApplicationReport.getCurrentApplicationAttemptId(), 2)); appAttempts.add(attempt1); attempts.put(applicationId, appAttempts); List<ContainerReport> containerReports = new ArrayList<ContainerReport>(); ContainerReport container = ContainerReport.newInstance( ContainerId.newContainerId(attempt.getApplicationAttemptId(), 1), null, NodeId.newInstance("host", 1234), Priority.UNDEFINED, 1234, 5678, "diagnosticInfo", "logURL", 0, ContainerState.RUNNING, "http://" + NodeId.newInstance("host", 2345).toString()); containerReports.add(container); ContainerReport container1 = ContainerReport.newInstance( ContainerId.newContainerId(attempt.getApplicationAttemptId(), 2), null, NodeId.newInstance("host", 1234), Priority.UNDEFINED, 1234, 5678, "diagnosticInfo", "logURL", 0, ContainerState.RUNNING, "http://" + NodeId.newInstance("host", 2345).toString()); containerReports.add(container1); containers.put(attempt.getApplicationAttemptId(), containerReports); //add containers to be sent from AHS List<ContainerReport> containerReportsForAHS = new ArrayList<ContainerReport>(); container = ContainerReport.newInstance( ContainerId.newContainerId(attempt.getApplicationAttemptId(), 1), null, NodeId.newInstance("host", 1234), Priority.UNDEFINED, 1234, 5678, "diagnosticInfo", "logURL", 0, null, "http://" + NodeId.newInstance("host", 2345).toString()); containerReportsForAHS.add(container); container1 = ContainerReport.newInstance( ContainerId.newContainerId(attempt.getApplicationAttemptId(), 2), null, NodeId.newInstance("host", 1234), Priority.UNDEFINED, 1234, 5678, "diagnosticInfo", "HSlogURL", 0, null, "http://" + NodeId.newInstance("host", 2345).toString()); containerReportsForAHS.add(container1); ContainerReport container2 = ContainerReport.newInstance( ContainerId.newContainerId(attempt.getApplicationAttemptId(),3), null, NodeId.newInstance("host", 1234), Priority.UNDEFINED, 1234, 5678, "diagnosticInfo", "HSlogURL", 0, ContainerState.COMPLETE, "http://" + NodeId.newInstance("host", 2345).toString()); containerReportsForAHS.add(container2); containersFromAHS.put(attempt.getApplicationAttemptId(), containerReportsForAHS); ApplicationId applicationId2 = ApplicationId.newInstance(1234, 6); ApplicationReport newApplicationReport2 = ApplicationReport.newInstance( applicationId2, ApplicationAttemptId.newInstance(applicationId2, 2), "user2", "queue2", "appname2", "host2", 125, null, YarnApplicationState.FINISHED, "diagnostics2", "url2", 2, 2, FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.63789f, "NON-YARN", null); applicationReports.add(newApplicationReport2); ApplicationId applicationId3 = ApplicationId.newInstance(1234, 7); ApplicationReport newApplicationReport3 = ApplicationReport.newInstance( applicationId3, ApplicationAttemptId.newInstance(applicationId3, 3), "user3", "queue3", "appname3", "host3", 126, null, YarnApplicationState.RUNNING, "diagnostics3", "url3", 3, 3, FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.73789f, "MAPREDUCE", null); applicationReports.add(newApplicationReport3); ApplicationId applicationId4 = ApplicationId.newInstance(1234, 8); ApplicationReport newApplicationReport4 = ApplicationReport.newInstance( applicationId4, ApplicationAttemptId.newInstance(applicationId4, 4), "user4", "queue4", "appname4", "host4", 127, null, YarnApplicationState.FAILED, "diagnostics4", "url4", 4, 4, FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.83789f, "NON-MAPREDUCE", null); applicationReports.add(newApplicationReport4); return applicationReports; } private List<ApplicationReport> getApplicationReports( List<ApplicationReport> applicationReports, Set<String> applicationTypes, EnumSet<YarnApplicationState> applicationStates) { List<ApplicationReport> appReports = new ArrayList<ApplicationReport>(); for (ApplicationReport appReport : applicationReports) { if (applicationTypes != null && !applicationTypes.isEmpty()) { if (!applicationTypes.contains(appReport.getApplicationType())) { continue; } } if (applicationStates != null && !applicationStates.isEmpty()) { if (!applicationStates.contains(appReport.getYarnApplicationState())) { continue; } } appReports.add(appReport); } return appReports; } @Override public Map<NodeLabel, Set<NodeId>> getLabelsToNodes() throws YarnException, IOException { when(mockLabelsToNodesResponse.getLabelsToNodes()).thenReturn( getLabelsToNodesMap()); return super.getLabelsToNodes(); } @Override public Map<NodeLabel, Set<NodeId>> getLabelsToNodes(Set<String> labels) throws YarnException, IOException { when(mockLabelsToNodesResponse.getLabelsToNodes()).thenReturn( getLabelsToNodesMap(labels)); return super.getLabelsToNodes(labels); } public Map<NodeLabel, Set<NodeId>> getLabelsToNodesMap() { Map<NodeLabel, Set<NodeId>> map = new HashMap<NodeLabel, Set<NodeId>>(); Set<NodeId> setNodeIds = new HashSet<NodeId>(Arrays.asList( NodeId.newInstance("host1", 0), NodeId.newInstance("host2", 0))); map.put(NodeLabel.newInstance("x"), setNodeIds); map.put(NodeLabel.newInstance("y"), setNodeIds); map.put(NodeLabel.newInstance("z"), setNodeIds); return map; } public Map<NodeLabel, Set<NodeId>> getLabelsToNodesMap(Set<String> labels) { Map<NodeLabel, Set<NodeId>> map = new HashMap<NodeLabel, Set<NodeId>>(); Set<NodeId> setNodeIds = new HashSet<NodeId>(Arrays.asList( NodeId.newInstance("host1", 0), NodeId.newInstance("host2", 0))); for(String label : labels) { map.put(NodeLabel.newInstance(label), setNodeIds); } return map; } @Override public Map<NodeId, Set<NodeLabel>> getNodeToLabels() throws YarnException, IOException { when(mockNodeToLabelsResponse.getNodeToLabels()).thenReturn( getNodeToLabelsMap()); return super.getNodeToLabels(); } public Map<NodeId, Set<NodeLabel>> getNodeToLabelsMap() { Map<NodeId, Set<NodeLabel>> map = new HashMap<NodeId, Set<NodeLabel>>(); Set<NodeLabel> setNodeLabels = new HashSet<NodeLabel>(Arrays.asList( NodeLabel.newInstance("x", false), NodeLabel.newInstance("y", false))); map.put(NodeId.newInstance("host", 0), setNodeLabels); return map; } @Override public List<ApplicationAttemptReport> getApplicationAttempts( ApplicationId appId) throws YarnException, IOException { when(mockAppAttemptsResponse.getApplicationAttemptList()).thenReturn( getAttempts(appId)); return super.getApplicationAttempts(appId); } @Override public ApplicationAttemptReport getApplicationAttemptReport( ApplicationAttemptId appAttemptId) throws YarnException, IOException { when(mockAttemptResponse.getApplicationAttemptReport()).thenReturn( getAttempt(appAttemptId)); return super.getApplicationAttemptReport(appAttemptId); } @Override public List<ContainerReport> getContainers(ApplicationAttemptId appAttemptId) throws YarnException, IOException { when(mockContainersResponse.getContainerList()).thenReturn( getContainersReport(appAttemptId)); when(historyClient.getContainers(any(ApplicationAttemptId.class))) .thenReturn(getContainersFromAHS(appAttemptId)); return super.getContainers(appAttemptId); } private List<ContainerReport> getContainersFromAHS( ApplicationAttemptId appAttemptId) { return containersFromAHS.get(appAttemptId); } @Override public ContainerReport getContainerReport(ContainerId containerId) throws YarnException, IOException { try { ContainerReport container = getContainer(containerId, containers); when(mockContainerResponse.getContainerReport()).thenReturn(container); } catch (YarnException e) { when(rmClient.getContainerReport(any(GetContainerReportRequest.class))) .thenThrow(e).thenReturn(mockContainerResponse); } try { ContainerReport container = getContainer(containerId, containersFromAHS); when(historyClient.getContainerReport(any(ContainerId.class))) .thenReturn(container); } catch (YarnException e) { when(historyClient.getContainerReport(any(ContainerId.class))) .thenThrow(e); } return super.getContainerReport(containerId); } public List<ApplicationAttemptReport> getAttempts(ApplicationId appId) { return attempts.get(appId); } public ApplicationAttemptReport getAttempt(ApplicationAttemptId appAttemptId) { return attempts.get(appAttemptId.getApplicationId()).get(0); } public List<ContainerReport> getContainersReport( ApplicationAttemptId appAttemptId) { return containers.get(appAttemptId); } private ContainerReport getContainer( ContainerId containerId, HashMap<ApplicationAttemptId, List<ContainerReport>> containersToAppAttemptMapping) throws YarnException, IOException { List<ContainerReport> containersForAppAttempt = containersToAppAttemptMapping.get(containerId .getApplicationAttemptId()); if (containersForAppAttempt == null) { throw new ApplicationNotFoundException(containerId .getApplicationAttemptId().getApplicationId() + " is not found "); } Iterator<ContainerReport> iterator = containersForAppAttempt.iterator(); while (iterator.hasNext()) { ContainerReport next = iterator.next(); if (next.getContainerId().equals(containerId)) { return next; } } throw new ContainerNotFoundException(containerId + " is not found "); } } @Test(timeout = 30000) public void testAMMRTokens() throws Exception { MiniYARNCluster cluster = new MiniYARNCluster("testMRAMTokens", 1, 1, 1); YarnClient rmClient = null; try { cluster.init(new YarnConfiguration()); cluster.start(); final Configuration yarnConf = cluster.getConfig(); rmClient = YarnClient.createYarnClient(); rmClient.init(yarnConf); rmClient.start(); ApplicationId appId = createApp(rmClient, false); waitTillAccepted(rmClient, appId, false); //managed AMs don't return AMRM token Assert.assertNull(rmClient.getAMRMToken(appId)); appId = createApp(rmClient, true); waitTillAccepted(rmClient, appId, true); long start = System.currentTimeMillis(); while (rmClient.getAMRMToken(appId) == null) { if (System.currentTimeMillis() - start > 20 * 1000) { Assert.fail("AMRM token is null"); } Thread.sleep(100); } //unmanaged AMs do return AMRM token Assert.assertNotNull(rmClient.getAMRMToken(appId)); UserGroupInformation other = UserGroupInformation.createUserForTesting("foo", new String[]{}); appId = other.doAs( new PrivilegedExceptionAction<ApplicationId>() { @Override public ApplicationId run() throws Exception { YarnClient rmClient = YarnClient.createYarnClient(); rmClient.init(yarnConf); rmClient.start(); ApplicationId appId = createApp(rmClient, true); waitTillAccepted(rmClient, appId, true); long start = System.currentTimeMillis(); while (rmClient.getAMRMToken(appId) == null) { if (System.currentTimeMillis() - start > 20 * 1000) { Assert.fail("AMRM token is null"); } Thread.sleep(100); } //unmanaged AMs do return AMRM token Assert.assertNotNull(rmClient.getAMRMToken(appId)); return appId; } }); //other users don't get AMRM token Assert.assertNull(rmClient.getAMRMToken(appId)); } finally { if (rmClient != null) { rmClient.stop(); } cluster.stop(); } } private ApplicationId createApp(YarnClient rmClient, boolean unmanaged) throws Exception { YarnClientApplication newApp = rmClient.createApplication(); ApplicationId appId = newApp.getNewApplicationResponse().getApplicationId(); // Create launch context for app master ApplicationSubmissionContext appContext = Records.newRecord(ApplicationSubmissionContext.class); // set the application id appContext.setApplicationId(appId); // set the application name appContext.setApplicationName("test"); // Set the priority for the application master Priority pri = Records.newRecord(Priority.class); pri.setPriority(1); appContext.setPriority(pri); // Set the queue to which this application is to be submitted in the RM appContext.setQueue("default"); // Set up the container launch context for the application master ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class); appContext.setAMContainerSpec(amContainer); appContext.setResource(Resource.newInstance(1024, 1)); appContext.setUnmanagedAM(unmanaged); // Submit the application to the applications manager rmClient.submitApplication(appContext); return appId; } private void waitTillAccepted(YarnClient rmClient, ApplicationId appId, boolean unmanagedApplication) throws Exception { try { long start = System.currentTimeMillis(); ApplicationReport report = rmClient.getApplicationReport(appId); while (YarnApplicationState.ACCEPTED != report.getYarnApplicationState()) { if (System.currentTimeMillis() - start > 20 * 1000) { throw new Exception("App '" + appId + "' time out, failed to reach ACCEPTED state"); } Thread.sleep(200); report = rmClient.getApplicationReport(appId); } Assert.assertEquals(unmanagedApplication, report.isUnmanagedApp()); } catch (Exception ex) { throw new Exception(ex); } } @Test public void testAsyncAPIPollTimeout() { testAsyncAPIPollTimeoutHelper(null, false); testAsyncAPIPollTimeoutHelper(0L, true); testAsyncAPIPollTimeoutHelper(1L, true); } private void testAsyncAPIPollTimeoutHelper(Long valueForTimeout, boolean expectedTimeoutEnforcement) { YarnClientImpl client = new YarnClientImpl(); try { Configuration conf = new Configuration(); if (valueForTimeout != null) { conf.setLong( YarnConfiguration.YARN_CLIENT_APPLICATION_CLIENT_PROTOCOL_POLL_TIMEOUT_MS, valueForTimeout); } client.init(conf); Assert.assertEquals( expectedTimeoutEnforcement, client.enforceAsyncAPITimeout()); } finally { IOUtils.closeQuietly(client); } } @Test public void testBestEffortTimelineDelegationToken() throws Exception { Configuration conf = new YarnConfiguration(); conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true); SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, conf); YarnClientImpl client = spy(new YarnClientImpl() { @Override TimelineClient createTimelineClient() throws IOException, YarnException { timelineClient = mock(TimelineClient.class); when(timelineClient.getDelegationToken(any(String.class))) .thenThrow(new IOException("Best effort test exception")); return timelineClient; } }); client.init(conf); try { conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_CLIENT_BEST_EFFORT, true); client.serviceInit(conf); client.getTimelineDelegationToken(); } catch (Exception e) { Assert.fail("Should not have thrown an exception"); } try { conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_CLIENT_BEST_EFFORT, false); client.serviceInit(conf); client.getTimelineDelegationToken(); Assert.fail("Get delegation token should have thrown an exception"); } catch (Exception e) { // Success } } @Test public void testAutomaticTimelineDelegationTokenLoading() throws Exception { Configuration conf = new YarnConfiguration(); conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true); SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, conf); TimelineDelegationTokenIdentifier timelineDT = new TimelineDelegationTokenIdentifier(); final Token<TimelineDelegationTokenIdentifier> dToken = new Token<TimelineDelegationTokenIdentifier>( timelineDT.getBytes(), new byte[0], timelineDT.getKind(), new Text()); // create a mock client YarnClientImpl client = spy(new YarnClientImpl() { @Override TimelineClient createTimelineClient() throws IOException, YarnException { timelineClient = mock(TimelineClient.class); when(timelineClient.getDelegationToken(any(String.class))) .thenReturn(dToken); return timelineClient; } @Override protected void serviceStart() throws Exception { rmClient = mock(ApplicationClientProtocol.class); } @Override protected void serviceStop() throws Exception { } @Override public ApplicationReport getApplicationReport(ApplicationId appId) { ApplicationReport report = mock(ApplicationReport.class); when(report.getYarnApplicationState()) .thenReturn(YarnApplicationState.RUNNING); return report; } @Override public boolean isSecurityEnabled() { return true; } }); client.init(conf); client.start(); try { // when i == 0, timeline DT already exists, no need to get one more // when i == 1, timeline DT doesn't exist, need to get one more for (int i = 0; i < 2; ++i) { ApplicationSubmissionContext context = mock(ApplicationSubmissionContext.class); ApplicationId applicationId = ApplicationId.newInstance(0, i + 1); when(context.getApplicationId()).thenReturn(applicationId); DataOutputBuffer dob = new DataOutputBuffer(); Credentials credentials = new Credentials(); if (i == 0) { credentials.addToken(client.timelineService, dToken); } credentials.writeTokenStorageToStream(dob); ByteBuffer tokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength()); ContainerLaunchContext clc = ContainerLaunchContext.newInstance( null, null, null, null, tokens, null); when(context.getAMContainerSpec()).thenReturn(clc); client.submitApplication(context); if (i == 0) { // GetTimelineDelegationToken shouldn't be called verify(client, never()).getTimelineDelegationToken(); } // In either way, token should be there credentials = new Credentials(); DataInputByteBuffer dibb = new DataInputByteBuffer(); tokens = clc.getTokens(); if (tokens != null) { dibb.reset(tokens); credentials.readTokenStorageStream(dibb); tokens.rewind(); } Collection<Token<? extends TokenIdentifier>> dTokens = credentials.getAllTokens(); Assert.assertEquals(1, dTokens.size()); Assert.assertEquals(dToken, dTokens.iterator().next()); } } finally { client.stop(); } } @Test public void testParseTimelineDelegationTokenRenewer() throws Exception { // Client side YarnClientImpl client = (YarnClientImpl) YarnClient.createYarnClient(); Configuration conf = new YarnConfiguration(); conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true); conf.set(YarnConfiguration.RM_PRINCIPAL, "rm/[email protected]"); conf.set( YarnConfiguration.RM_ADDRESS, "localhost:8188"); try { client.init(conf); client.start(); Assert.assertEquals("rm/[email protected]", client.timelineDTRenewer); } finally { client.stop(); } } @Test public void testReservationAPIs() { // initialize CapacitySchedulerConfiguration conf = new CapacitySchedulerConfiguration(); ReservationSystemTestUtil.setupQueueConfiguration(conf); conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class, ResourceScheduler.class); conf.setBoolean(YarnConfiguration.RM_RESERVATION_SYSTEM_ENABLE, true); MiniYARNCluster cluster = new MiniYARNCluster("testReservationAPIs", 2, 1, 1); YarnClient client = null; try { cluster.init(conf); cluster.start(); final Configuration yarnConf = cluster.getConfig(); client = YarnClient.createYarnClient(); client.init(yarnConf); client.start(); // create a reservation Clock clock = new UTCClock(); long arrival = clock.getTime(); long duration = 60000; long deadline = (long) (arrival + 1.05 * duration); ReservationSubmissionRequest sRequest = createSimpleReservationRequest(4, arrival, deadline, duration); ReservationSubmissionResponse sResponse = null; try { sResponse = client.submitReservation(sRequest); } catch (Exception e) { Assert.fail(e.getMessage()); } Assert.assertNotNull(sResponse); ReservationId reservationID = sResponse.getReservationId(); Assert.assertNotNull(reservationID); System.out.println("Submit reservation response: " + reservationID); // Update the reservation ReservationDefinition rDef = sRequest.getReservationDefinition(); ReservationRequest rr = rDef.getReservationRequests().getReservationResources().get(0); rr.setNumContainers(5); arrival = clock.getTime(); duration = 30000; deadline = (long) (arrival + 1.05 * duration); rr.setDuration(duration); rDef.setArrival(arrival); rDef.setDeadline(deadline); ReservationUpdateRequest uRequest = ReservationUpdateRequest.newInstance(rDef, reservationID); ReservationUpdateResponse uResponse = null; try { uResponse = client.updateReservation(uRequest); } catch (Exception e) { Assert.fail(e.getMessage()); } Assert.assertNotNull(sResponse); System.out.println("Update reservation response: " + uResponse); // Delete the reservation ReservationDeleteRequest dRequest = ReservationDeleteRequest.newInstance(reservationID); ReservationDeleteResponse dResponse = null; try { dResponse = client.deleteReservation(dRequest); } catch (Exception e) { Assert.fail(e.getMessage()); } Assert.assertNotNull(sResponse); System.out.println("Delete reservation response: " + dResponse); } finally { // clean-up if (client != null) { client.stop(); } cluster.stop(); } } private ReservationSubmissionRequest createSimpleReservationRequest( int numContainers, long arrival, long deadline, long duration) { // create a request with a single atomic ask ReservationRequest r = ReservationRequest.newInstance(Resource.newInstance(1024, 1), numContainers, 1, duration); ReservationRequests reqs = ReservationRequests.newInstance(Collections.singletonList(r), ReservationRequestInterpreter.R_ALL); ReservationDefinition rDef = ReservationDefinition.newInstance(arrival, deadline, reqs, "testYarnClient#reservation"); ReservationSubmissionRequest request = ReservationSubmissionRequest.newInstance(rDef, ReservationSystemTestUtil.reservationQ); return request; } @Test(timeout = 30000, expected = ApplicationNotFoundException.class) public void testShouldNotRetryForeverForNonNetworkExceptions() throws Exception { YarnConfiguration conf = new YarnConfiguration(); conf.setInt(YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_MS, -1); ResourceManager rm = null; YarnClient yarnClient = null; try { // start rm rm = new ResourceManager(); rm.init(conf); rm.start(); yarnClient = YarnClient.createYarnClient(); yarnClient.init(conf); yarnClient.start(); // create invalid application id ApplicationId appId = ApplicationId.newInstance(1430126768L, 10645); // RM should throw ApplicationNotFoundException exception yarnClient.getApplicationReport(appId); } finally { if (yarnClient != null) { yarnClient.stop(); } if (rm != null) { rm.stop(); } } } }
52,030
38.931696
103
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/SCMAdmin.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.client; import java.io.IOException; import java.net.InetSocketAddress; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.ipc.YarnRPC; import org.apache.hadoop.yarn.server.api.SCMAdminProtocol; import org.apache.hadoop.yarn.server.api.protocolrecords.RunSharedCacheCleanerTaskRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RunSharedCacheCleanerTaskResponse; public class SCMAdmin extends Configured implements Tool { private final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null); public SCMAdmin() { super(); } public SCMAdmin(Configuration conf) { super(conf); } private static void printHelp(String cmd) { String summary = "scmadmin is the command to execute shared cache manager" + "administrative commands.\n" + "The full syntax is: \n\n" + "hadoop scmadmin" + " [-runCleanerTask]" + " [-help [cmd]]\n"; String runCleanerTask = "-runCleanerTask: Run cleaner task right away.\n"; String help = "-help [cmd]: \tDisplays help for the given command or all commands if none\n" + "\t\tis specified.\n"; if ("runCleanerTask".equals(cmd)) { System.out.println(runCleanerTask); } else if ("help".equals(cmd)) { System.out.println(help); } else { System.out.println(summary); System.out.println(runCleanerTask); System.out.println(help); System.out.println(); ToolRunner.printGenericCommandUsage(System.out); } } /** * Displays format of commands. * @param cmd The command that is being executed. */ private static void printUsage(String cmd) { if ("-runCleanerTask".equals(cmd)) { System.err.println("Usage: yarn scmadmin" + " [-runCleanerTask]"); } else { System.err.println("Usage: yarn scmadmin"); System.err.println(" [-runCleanerTask]"); System.err.println(" [-help [cmd]]"); System.err.println(); ToolRunner.printGenericCommandUsage(System.err); } } protected SCMAdminProtocol createSCMAdminProtocol() throws IOException { // Get the current configuration final YarnConfiguration conf = new YarnConfiguration(getConf()); // Create the admin client final InetSocketAddress addr = conf.getSocketAddr( YarnConfiguration.SCM_ADMIN_ADDRESS, YarnConfiguration.DEFAULT_SCM_ADMIN_ADDRESS, YarnConfiguration.DEFAULT_SCM_ADMIN_PORT); final YarnRPC rpc = YarnRPC.create(conf); SCMAdminProtocol scmAdminProtocol = (SCMAdminProtocol) rpc.getProxy(SCMAdminProtocol.class, addr, conf); return scmAdminProtocol; } private int runCleanerTask() throws YarnException, IOException { // run cleaner task right away SCMAdminProtocol scmAdminProtocol = createSCMAdminProtocol(); RunSharedCacheCleanerTaskRequest request = recordFactory.newRecordInstance(RunSharedCacheCleanerTaskRequest.class); RunSharedCacheCleanerTaskResponse response = scmAdminProtocol.runCleanerTask(request); if (response.getAccepted()) { System.out.println("request accepted by shared cache manager"); return 0; } else { System.out.println("request rejected by shared cache manager"); return 1; } } @Override public int run(String[] args) throws Exception { if (args.length < 1) { printUsage(""); return -1; } int i = 0; String cmd = args[i++]; try { if ("-runCleanerTask".equals(cmd)) { if (args.length != 1) { printUsage(cmd); return -1; } else { return runCleanerTask(); } } else if ("-help".equals(cmd)) { if (i < args.length) { printUsage(args[i]); } else { printHelp(""); } return 0; } else { System.err.println(cmd.substring(1) + ": Unknown command"); printUsage(""); return -1; } } catch (IllegalArgumentException arge) { System.err.println(cmd.substring(1) + ": " + arge.getLocalizedMessage()); printUsage(cmd); } catch (RemoteException e) { // // This is a error returned by hadoop server. Print // out the first line of the error message, ignore the stack trace. try { String[] content; content = e.getLocalizedMessage().split("\n"); System.err.println(cmd.substring(1) + ": " + content[0]); } catch (Exception ex) { System.err.println(cmd.substring(1) + ": " + ex.getLocalizedMessage()); } } catch (Exception e) { System.err.println(cmd.substring(1) + ": " + e.getLocalizedMessage()); } return -1; } public static void main(String[] args) throws Exception { int result = ToolRunner.run(new SCMAdmin(), args); System.exit(result); } }
6,189
32.641304
98
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/package-info.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ @InterfaceAudience.Public package org.apache.hadoop.yarn.client.cli; import org.apache.hadoop.classification.InterfaceAudience;
935
41.545455
75
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/LogsCLI.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.client.cli; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.List; import javax.ws.rs.core.MediaType; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.CommandLineParser; import org.apache.commons.cli.GnuParser; import org.apache.commons.cli.HelpFormatter; import org.apache.commons.cli.Option; import org.apache.commons.cli.Options; import org.apache.commons.cli.ParseException; import org.apache.commons.lang.StringUtils; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Evolving; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Tool; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationReport; import org.apache.hadoop.yarn.api.records.ContainerReport; import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.client.api.YarnClient; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.logaggregation.LogCLIHelpers; import org.apache.hadoop.yarn.util.ConverterUtils; import org.apache.hadoop.yarn.util.Times; import org.apache.hadoop.yarn.webapp.util.WebAppUtils; import org.codehaus.jettison.json.JSONArray; import org.codehaus.jettison.json.JSONException; import org.codehaus.jettison.json.JSONObject; import com.google.common.annotations.VisibleForTesting; import com.sun.jersey.api.client.Client; import com.sun.jersey.api.client.ClientHandlerException; import com.sun.jersey.api.client.ClientResponse; import com.sun.jersey.api.client.UniformInterfaceException; import com.sun.jersey.api.client.WebResource; @Public @Evolving public class LogsCLI extends Configured implements Tool { private static final String CONTAINER_ID_OPTION = "containerId"; private static final String APPLICATION_ID_OPTION = "applicationId"; private static final String NODE_ADDRESS_OPTION = "nodeAddress"; private static final String APP_OWNER_OPTION = "appOwner"; private static final String AM_CONTAINER_OPTION = "am"; private static final String CONTAINER_LOG_FILES = "logFiles"; public static final String HELP_CMD = "help"; @Override public int run(String[] args) throws Exception { Options opts = new Options(); opts.addOption(HELP_CMD, false, "Displays help for all commands."); Option appIdOpt = new Option(APPLICATION_ID_OPTION, true, "ApplicationId (required)"); appIdOpt.setRequired(true); opts.addOption(appIdOpt); opts.addOption(CONTAINER_ID_OPTION, true, "ContainerId. " + "By default, it will only print syslog if the application is runing." + " Work with -logFiles to get other logs."); opts.addOption(NODE_ADDRESS_OPTION, true, "NodeAddress in the format " + "nodename:port"); opts.addOption(APP_OWNER_OPTION, true, "AppOwner (assumed to be current user if not specified)"); Option amOption = new Option(AM_CONTAINER_OPTION, true, "Prints the AM Container logs for this application. " + "Specify comma-separated value to get logs for related AM Container. " + "For example, If we specify -am 1,2, we will get the logs for " + "the first AM Container as well as the second AM Container. " + "To get logs for all AM Containers, use -am ALL. " + "To get logs for the latest AM Container, use -am -1. " + "By default, it will only print out syslog. Work with -logFiles " + "to get other logs"); amOption.setValueSeparator(','); amOption.setArgs(Option.UNLIMITED_VALUES); amOption.setArgName("AM Containers"); opts.addOption(amOption); Option logFileOpt = new Option(CONTAINER_LOG_FILES, true, "Work with -am/-containerId and specify comma-separated value " + "to get specified Container log files"); logFileOpt.setValueSeparator(','); logFileOpt.setArgs(Option.UNLIMITED_VALUES); logFileOpt.setArgName("Log File Name"); opts.addOption(logFileOpt); opts.getOption(APPLICATION_ID_OPTION).setArgName("Application ID"); opts.getOption(CONTAINER_ID_OPTION).setArgName("Container ID"); opts.getOption(NODE_ADDRESS_OPTION).setArgName("Node Address"); opts.getOption(APP_OWNER_OPTION).setArgName("Application Owner"); opts.getOption(AM_CONTAINER_OPTION).setArgName("AM Containers"); Options printOpts = new Options(); printOpts.addOption(opts.getOption(HELP_CMD)); printOpts.addOption(opts.getOption(CONTAINER_ID_OPTION)); printOpts.addOption(opts.getOption(NODE_ADDRESS_OPTION)); printOpts.addOption(opts.getOption(APP_OWNER_OPTION)); printOpts.addOption(opts.getOption(AM_CONTAINER_OPTION)); printOpts.addOption(opts.getOption(CONTAINER_LOG_FILES)); if (args.length < 1) { printHelpMessage(printOpts); return -1; } if (args[0].equals("-help")) { printHelpMessage(printOpts); return 0; } CommandLineParser parser = new GnuParser(); String appIdStr = null; String containerIdStr = null; String nodeAddress = null; String appOwner = null; boolean getAMContainerLogs = false; String[] logFiles = null; List<String> amContainersList = new ArrayList<String>(); try { CommandLine commandLine = parser.parse(opts, args, true); appIdStr = commandLine.getOptionValue(APPLICATION_ID_OPTION); containerIdStr = commandLine.getOptionValue(CONTAINER_ID_OPTION); nodeAddress = commandLine.getOptionValue(NODE_ADDRESS_OPTION); appOwner = commandLine.getOptionValue(APP_OWNER_OPTION); getAMContainerLogs = commandLine.hasOption(AM_CONTAINER_OPTION); if (getAMContainerLogs) { String[] amContainers = commandLine.getOptionValues(AM_CONTAINER_OPTION); for (String am : amContainers) { boolean errorInput = false; if (!am.trim().equalsIgnoreCase("ALL")) { try { int id = Integer.parseInt(am.trim()); if (id != -1 && id <= 0) { errorInput = true; } } catch (NumberFormatException ex) { errorInput = true; } if (errorInput) { System.err.println( "Invalid input for option -am. Valid inputs are 'ALL', -1 " + "and any other integer which is larger than 0."); printHelpMessage(printOpts); return -1; } amContainersList.add(am.trim()); } else { amContainersList.add("ALL"); break; } } } if (commandLine.hasOption(CONTAINER_LOG_FILES)) { logFiles = commandLine.getOptionValues(CONTAINER_LOG_FILES); } } catch (ParseException e) { System.err.println("options parsing failed: " + e.getMessage()); printHelpMessage(printOpts); return -1; } if (appIdStr == null) { System.err.println("ApplicationId cannot be null!"); printHelpMessage(printOpts); return -1; } ApplicationId appId = null; try { appId = ConverterUtils.toApplicationId(appIdStr); } catch (Exception e) { System.err.println("Invalid ApplicationId specified"); return -1; } LogCLIHelpers logCliHelper = new LogCLIHelpers(); logCliHelper.setConf(getConf()); if (appOwner == null || appOwner.isEmpty()) { appOwner = UserGroupInformation.getCurrentUser().getShortUserName(); } YarnApplicationState appState = YarnApplicationState.NEW; try { appState = getApplicationState(appId); if (appState == YarnApplicationState.NEW || appState == YarnApplicationState.NEW_SAVING || appState == YarnApplicationState.SUBMITTED) { System.out.println("Logs are not avaiable right now."); return -1; } } catch (IOException | YarnException e) { System.err.println("Unable to get ApplicationState." + " Attempting to fetch logs directly from the filesystem."); } // To get am logs if (getAMContainerLogs) { // if we do not specify the value for CONTAINER_LOG_FILES option, // we will only output syslog if (logFiles == null || logFiles.length == 0) { logFiles = new String[] { "syslog" }; } // If the application is running, we will call the RM WebService // to get the AppAttempts which includes the nodeHttpAddress // and containerId for all the AM Containers. // After that, we will call NodeManager webService to get the // related logs if (appState == YarnApplicationState.ACCEPTED || appState == YarnApplicationState.RUNNING) { return printAMContainerLogs(getConf(), appIdStr, amContainersList, logFiles, logCliHelper, appOwner, false); } else { // If the application is in the final state, we will call RM webservice // to get all AppAttempts information first. If we get nothing, // we will try to call AHS webservice to get related AppAttempts // which includes nodeAddress for the AM Containers. // After that, we will use nodeAddress and containerId // to get logs from HDFS directly. if (getConf().getBoolean(YarnConfiguration.APPLICATION_HISTORY_ENABLED, YarnConfiguration.DEFAULT_APPLICATION_HISTORY_ENABLED)) { return printAMContainerLogs(getConf(), appIdStr, amContainersList, logFiles, logCliHelper, appOwner, true); } else { System.out .println("Can not get AMContainers logs for the application:" + appId); System.out.println("This application:" + appId + " is finished." + " Please enable the application history service. Or Using " + "yarn logs -applicationId <appId> -containerId <containerId> " + "--nodeAddress <nodeHttpAddress> to get the container logs"); return -1; } } } int resultCode = 0; if (containerIdStr != null) { // if we provide the node address and the application is in the final // state, we could directly get logs from HDFS. if (nodeAddress != null && isApplicationFinished(appState)) { return logCliHelper.dumpAContainersLogsForALogType(appIdStr, containerIdStr, nodeAddress, appOwner, logFiles == null ? null : Arrays.asList(logFiles)); } try { // If the nodeAddress is not provided, we will try to get // the ContainerReport. In the containerReport, we could get // nodeAddress and nodeHttpAddress ContainerReport report = getContainerReport(containerIdStr); String nodeHttpAddress = report.getNodeHttpAddress().replaceFirst( WebAppUtils.getHttpSchemePrefix(getConf()), ""); String nodeId = report.getAssignedNode().toString(); // If the application is not in the final state, // we will provide the NodeHttpAddress and get the container logs // by calling NodeManager webservice. if (!isApplicationFinished(appState)) { if (logFiles == null || logFiles.length == 0) { logFiles = new String[] { "syslog" }; } printContainerLogsFromRunningApplication(getConf(), appIdStr, containerIdStr, nodeHttpAddress, nodeId, logFiles, logCliHelper, appOwner); } else { // If the application is in the final state, we will directly // get the container logs from HDFS. printContainerLogsForFinishedApplication(appIdStr, containerIdStr, nodeId, logFiles, logCliHelper, appOwner); } return resultCode; } catch (IOException | YarnException ex) { System.err.println("Unable to get logs for this container:" + containerIdStr + "for the application:" + appId); if (!getConf().getBoolean(YarnConfiguration.APPLICATION_HISTORY_ENABLED, YarnConfiguration.DEFAULT_APPLICATION_HISTORY_ENABLED)) { System.out.println("Please enable the application history service. Or "); } System.out.println("Using " + "yarn logs -applicationId <appId> -containerId <containerId> " + "--nodeAddress <nodeHttpAddress> to get the container logs"); return -1; } } else { if (nodeAddress == null) { resultCode = logCliHelper.dumpAllContainersLogs(appId, appOwner, System.out); } else { System.out.println("Should at least provide ContainerId!"); printHelpMessage(printOpts); resultCode = -1; } } return resultCode; } private YarnApplicationState getApplicationState(ApplicationId appId) throws IOException, YarnException { YarnClient yarnClient = createYarnClient(); try { ApplicationReport appReport = yarnClient.getApplicationReport(appId); return appReport.getYarnApplicationState(); } finally { yarnClient.close(); } } @VisibleForTesting protected YarnClient createYarnClient() { YarnClient yarnClient = YarnClient.createYarnClient(); yarnClient.init(getConf()); yarnClient.start(); return yarnClient; } public static void main(String[] args) throws Exception { Configuration conf = new YarnConfiguration(); LogsCLI logDumper = new LogsCLI(); logDumper.setConf(conf); int exitCode = logDumper.run(args); System.exit(exitCode); } private void printHelpMessage(Options options) { System.out.println("Retrieve logs for completed YARN applications."); HelpFormatter formatter = new HelpFormatter(); formatter.printHelp("yarn logs -applicationId <application ID> [OPTIONS]", new Options()); formatter.setSyntaxPrefix(""); formatter.printHelp("general options are:", options); } private List<JSONObject> getAMContainerInfoForRMWebService( Configuration conf, String appId) throws ClientHandlerException, UniformInterfaceException, JSONException { Client webServiceClient = Client.create(); String webAppAddress = WebAppUtils.getWebAppBindURL(conf, YarnConfiguration.RM_BIND_HOST, WebAppUtils.getRMWebAppURLWithScheme(conf)); WebResource webResource = webServiceClient.resource(webAppAddress); ClientResponse response = webResource.path("ws").path("v1").path("cluster").path("apps") .path(appId).path("appattempts").accept(MediaType.APPLICATION_JSON) .get(ClientResponse.class); JSONObject json = response.getEntity(JSONObject.class).getJSONObject("appAttempts"); JSONArray requests = json.getJSONArray("appAttempt"); List<JSONObject> amContainersList = new ArrayList<JSONObject>(); for (int i = 0; i < requests.length(); i++) { amContainersList.add(requests.getJSONObject(i)); } return amContainersList; } private List<JSONObject> getAMContainerInfoForAHSWebService(Configuration conf, String appId) throws ClientHandlerException, UniformInterfaceException, JSONException { Client webServiceClient = Client.create(); String webAppAddress = WebAppUtils.getHttpSchemePrefix(conf) + WebAppUtils.getAHSWebAppURLWithoutScheme(conf); WebResource webResource = webServiceClient.resource(webAppAddress); ClientResponse response = webResource.path("ws").path("v1").path("applicationhistory").path("apps") .path(appId).path("appattempts").accept(MediaType.APPLICATION_JSON) .get(ClientResponse.class); JSONObject json = response.getEntity(JSONObject.class); JSONArray requests = json.getJSONArray("appAttempt"); List<JSONObject> amContainersList = new ArrayList<JSONObject>(); for (int i = 0; i < requests.length(); i++) { amContainersList.add(requests.getJSONObject(i)); } Collections.reverse(amContainersList); return amContainersList; } private void printContainerLogsFromRunningApplication(Configuration conf, String appId, String containerIdStr, String nodeHttpAddress, String nodeId, String[] logFiles, LogCLIHelpers logCliHelper, String appOwner) throws IOException { Client webServiceClient = Client.create(); String containerString = "\n\nContainer: " + containerIdStr; System.out.println(containerString); System.out.println(StringUtils.repeat("=", containerString.length())); for (String logFile : logFiles) { System.out.println("LogType:" + logFile); System.out.println("Log Upload Time:" + Times.format(System.currentTimeMillis())); System.out.println("Log Contents:"); try { WebResource webResource = webServiceClient.resource(WebAppUtils.getHttpSchemePrefix(conf) + nodeHttpAddress); ClientResponse response = webResource.path("ws").path("v1").path("node") .path("containerlogs").path(containerIdStr).path(logFile) .accept(MediaType.TEXT_PLAIN).get(ClientResponse.class); System.out.println(response.getEntity(String.class)); System.out.println("End of LogType:" + logFile); } catch (ClientHandlerException | UniformInterfaceException ex) { System.out.println("Can not find the log file:" + logFile + " for the container:" + containerIdStr + " in NodeManager:" + nodeId); } } // for the case, we have already uploaded partial logs in HDFS logCliHelper.dumpAContainersLogsForALogType(appId, containerIdStr, nodeId, appOwner, Arrays.asList(logFiles)); } private void printContainerLogsForFinishedApplication(String appId, String containerId, String nodeAddress, String[] logFiles, LogCLIHelpers logCliHelper, String appOwner) throws IOException { String containerString = "\n\nContainer: " + containerId; System.out.println(containerString); System.out.println(StringUtils.repeat("=", containerString.length())); logCliHelper.dumpAContainersLogsForALogType(appId, containerId, nodeAddress, appOwner, logFiles != null ? Arrays.asList(logFiles) : null); } private ContainerReport getContainerReport(String containerIdStr) throws YarnException, IOException { YarnClient yarnClient = createYarnClient(); try { return yarnClient.getContainerReport(ConverterUtils .toContainerId(containerIdStr)); } finally { yarnClient.close(); } } private boolean isApplicationFinished(YarnApplicationState appState) { return appState == YarnApplicationState.FINISHED || appState == YarnApplicationState.FAILED || appState == YarnApplicationState.KILLED; } private int printAMContainerLogs(Configuration conf, String appId, List<String> amContainers, String[] logFiles, LogCLIHelpers logCliHelper, String appOwner, boolean applicationFinished) throws Exception { List<JSONObject> amContainersList = null; List<AMLogsRequest> requests = new ArrayList<AMLogsRequest>(); boolean getAMContainerLists = false; String errorMessage = ""; try { amContainersList = getAMContainerInfoForRMWebService(conf, appId); if (amContainersList != null && !amContainersList.isEmpty()) { getAMContainerLists = true; for (JSONObject amContainer : amContainersList) { AMLogsRequest request = new AMLogsRequest(applicationFinished); request.setAmContainerId(amContainer.getString("containerId")); request.setNodeHttpAddress(amContainer.getString("nodeHttpAddress")); request.setNodeId(amContainer.getString("nodeId")); requests.add(request); } } } catch (Exception ex) { errorMessage = ex.getMessage(); if (applicationFinished) { try { amContainersList = getAMContainerInfoForAHSWebService(conf, appId); if (amContainersList != null && !amContainersList.isEmpty()) { getAMContainerLists = true; for (JSONObject amContainer : amContainersList) { AMLogsRequest request = new AMLogsRequest(applicationFinished); request.setAmContainerId(amContainer.getString("amContainerId")); requests.add(request); } } } catch (Exception e) { errorMessage = e.getMessage(); } } } if (!getAMContainerLists) { System.err.println("Unable to get AM container informations " + "for the application:" + appId); System.err.println(errorMessage); return -1; } if (amContainers.contains("ALL")) { for (AMLogsRequest request : requests) { outputAMContainerLogs(request, conf, appId, logFiles, logCliHelper, appOwner); } System.out.println(); System.out.println("Specified ALL for -am option. " + "Printed logs for all am containers."); } else { for (String amContainer : amContainers) { int amContainerId = Integer.parseInt(amContainer.trim()); if (amContainerId == -1) { outputAMContainerLogs(requests.get(requests.size() - 1), conf, appId, logFiles, logCliHelper, appOwner); } else { if (amContainerId <= requests.size()) { outputAMContainerLogs(requests.get(amContainerId - 1), conf, appId, logFiles, logCliHelper, appOwner); } } } } return 0; } private void outputAMContainerLogs(AMLogsRequest request, Configuration conf, String appId, String[] logFiles, LogCLIHelpers logCliHelper, String appOwner) throws Exception { String nodeHttpAddress = request.getNodeHttpAddress(); String containerId = request.getAmContainerId(); String nodeId = request.getNodeId(); if (request.isAppFinished()) { if (containerId != null && !containerId.isEmpty()) { if (nodeId == null || nodeId.isEmpty()) { try { nodeId = getContainerReport(containerId).getAssignedNode().toString(); } catch (Exception ex) { System.err.println(ex); nodeId = null; } } if (nodeId != null && !nodeId.isEmpty()) { printContainerLogsForFinishedApplication(appId, containerId, nodeId, logFiles, logCliHelper, appOwner); } } } else { if (nodeHttpAddress != null && containerId != null && !nodeHttpAddress.isEmpty() && !containerId.isEmpty()) { printContainerLogsFromRunningApplication(conf, appId, containerId, nodeHttpAddress, nodeId, logFiles, logCliHelper, appOwner); } } } private static class AMLogsRequest { private String amContainerId; private String nodeId; private String nodeHttpAddress; private final boolean isAppFinished; AMLogsRequest(boolean isAppFinished) { this.isAppFinished = isAppFinished; this.setAmContainerId(""); this.setNodeId(""); this.setNodeHttpAddress(""); } public String getAmContainerId() { return amContainerId; } public void setAmContainerId(String amContainerId) { this.amContainerId = amContainerId; } public String getNodeId() { return nodeId; } public void setNodeId(String nodeId) { this.nodeId = nodeId; } public String getNodeHttpAddress() { return nodeHttpAddress; } public void setNodeHttpAddress(String nodeHttpAddress) { this.nodeHttpAddress = nodeHttpAddress; } public boolean isAppFinished() { return isAppFinished; } } }
24,704
39.566502
94
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/YarnCLI.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.client.cli; import java.io.PrintStream; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.util.Tool; import org.apache.hadoop.yarn.client.api.YarnClient; import org.apache.hadoop.yarn.conf.YarnConfiguration; @Private @Unstable public abstract class YarnCLI extends Configured implements Tool { public static final String STATUS_CMD = "status"; public static final String LIST_CMD = "list"; public static final String KILL_CMD = "kill"; public static final String MOVE_TO_QUEUE_CMD = "movetoqueue"; public static final String HELP_CMD = "help"; protected PrintStream sysout; protected PrintStream syserr; protected YarnClient client; public YarnCLI() { super(new YarnConfiguration()); client = YarnClient.createYarnClient(); client.init(getConf()); client.start(); } public void setSysOutPrintStream(PrintStream sysout) { this.sysout = sysout; } public void setSysErrPrintStream(PrintStream syserr) { this.syserr = syserr; } public YarnClient getClient() { return client; } public void setClient(YarnClient client) { this.client = client; } public void stop() { this.client.stop(); } }
2,161
30.794118
75
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.client.cli; import java.io.IOException; import java.io.PrintStream; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.ha.HAAdmin; import org.apache.hadoop.ha.HAServiceTarget; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.ToolRunner; import org.apache.hadoop.yarn.api.records.DecommissionType; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.NodeLabel; import org.apache.hadoop.yarn.client.ClientRMProxy; import org.apache.hadoop.yarn.client.RMHAServiceTarget; import org.apache.hadoop.yarn.conf.HAUtil; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager; import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocol; import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshQueuesRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshServiceAclsRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshSuperUserGroupsConfigurationRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshUserToGroupsMappingsRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RemoveFromClusterNodeLabelsRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.ReplaceLabelsOnNodeRequest; import org.apache.hadoop.yarn.util.ConverterUtils; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; @Private @Unstable public class RMAdminCLI extends HAAdmin { private final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null); private boolean directlyAccessNodeLabelStore = false; static CommonNodeLabelsManager localNodeLabelsManager = null; private static final String NO_LABEL_ERR_MSG = "No cluster node-labels are specified"; private static final String NO_MAPPING_ERR_MSG = "No node-to-labels mappings are specified"; private static final String INVALID_TIMEOUT_ERR_MSG = "Invalid timeout specified : "; private static final String ADD_LABEL_FORMAT_ERR_MSG = "Input format for adding node-labels is not correct, it should be " + "labelName1[(exclusive=true/false)],LabelName2[] .."; protected final static Map<String, UsageInfo> ADMIN_USAGE = ImmutableMap.<String, UsageInfo>builder() .put("-refreshQueues", new UsageInfo("", "Reload the queues' acls, states and scheduler specific " + "properties. \n\t\tResourceManager will reload the " + "mapred-queues configuration file.")) .put("-refreshNodes", new UsageInfo("[-g [timeout in seconds]]", "Refresh the hosts information at the ResourceManager. Here " + "[-g [timeout in seconds] is optional, if we specify the " + "timeout then ResourceManager will wait for timeout before " + "marking the NodeManager as decommissioned.")) .put("-refreshSuperUserGroupsConfiguration", new UsageInfo("", "Refresh superuser proxy groups mappings")) .put("-refreshUserToGroupsMappings", new UsageInfo("", "Refresh user-to-groups mappings")) .put("-refreshAdminAcls", new UsageInfo("", "Refresh acls for administration of ResourceManager")) .put("-refreshServiceAcl", new UsageInfo("", "Reload the service-level authorization policy file. \n\t\t" + "ResoureceManager will reload the authorization policy file.")) .put("-getGroups", new UsageInfo("[username]", "Get the groups which given user belongs to.")) .put("-addToClusterNodeLabels", new UsageInfo("<\"label1(exclusive=true)," + "label2(exclusive=false),label3\">", "add to cluster node labels. Default exclusivity is true")) .put("-removeFromClusterNodeLabels", new UsageInfo("<label1,label2,label3> (label splitted by \",\")", "remove from cluster node labels")) .put("-replaceLabelsOnNode", new UsageInfo( "<\"node1[:port]=label1,label2 node2[:port]=label1,label2\">", "replace labels on nodes" + " (please note that we do not support specifying multiple" + " labels on a single host for now.)")) .put("-directlyAccessNodeLabelStore", new UsageInfo("", "This is DEPRECATED, will be removed in future releases. Directly access node label store, " + "with this option, all node label related operations" + " will not connect RM. Instead, they will" + " access/modify stored node labels directly." + " By default, it is false (access via RM)." + " AND PLEASE NOTE: if you configured" + " yarn.node-labels.fs-store.root-dir to a local directory" + " (instead of NFS or HDFS), this option will only work" + " when the command run on the machine where RM is running.")) .build(); public RMAdminCLI() { super(); } public RMAdminCLI(Configuration conf) { super(conf); } protected void setErrOut(PrintStream errOut) { this.errOut = errOut; } private static void appendHAUsage(final StringBuilder usageBuilder) { for (Map.Entry<String,UsageInfo> cmdEntry : USAGE.entrySet()) { if (cmdEntry.getKey().equals("-help") || cmdEntry.getKey().equals("-failover")) { continue; } UsageInfo usageInfo = cmdEntry.getValue(); usageBuilder.append(" [" + cmdEntry.getKey() + " " + usageInfo.args + "]"); } } private static void buildHelpMsg(String cmd, StringBuilder builder) { UsageInfo usageInfo = ADMIN_USAGE.get(cmd); if (usageInfo == null) { usageInfo = USAGE.get(cmd); if (usageInfo == null) { return; } } String space = (usageInfo.args == "") ? "" : " "; builder.append(" " + cmd + space + usageInfo.args + ": " + usageInfo.help); } private static void buildIndividualUsageMsg(String cmd, StringBuilder builder ) { boolean isHACommand = false; UsageInfo usageInfo = ADMIN_USAGE.get(cmd); if (usageInfo == null) { usageInfo = USAGE.get(cmd); if (usageInfo == null) { return; } isHACommand = true; } String space = (usageInfo.args == "") ? "" : " "; builder.append("Usage: yarn rmadmin [" + cmd + space + usageInfo.args + "]\n"); if (isHACommand) { builder.append(cmd + " can only be used when RM HA is enabled"); } } private static void buildUsageMsg(StringBuilder builder, boolean isHAEnabled) { builder.append("Usage: yarn rmadmin\n"); for (Map.Entry<String,UsageInfo> cmdEntry : ADMIN_USAGE.entrySet()) { UsageInfo usageInfo = cmdEntry.getValue(); builder.append(" " + cmdEntry.getKey() + " " + usageInfo.args + "\n"); } if (isHAEnabled) { for (Map.Entry<String,UsageInfo> cmdEntry : USAGE.entrySet()) { String cmdKey = cmdEntry.getKey(); if (!cmdKey.equals("-help")) { UsageInfo usageInfo = cmdEntry.getValue(); builder.append(" " + cmdKey + " " + usageInfo.args + "\n"); } } } builder.append(" -help" + " [cmd]\n"); } private static void printHelp(String cmd, boolean isHAEnabled) { StringBuilder summary = new StringBuilder(); summary.append("rmadmin is the command to execute YARN administrative " + "commands.\n"); summary.append("The full syntax is: \n\n" + "yarn rmadmin" + " [-refreshQueues]" + " [-refreshNodes [-g [timeout in seconds]]]" + " [-refreshSuperUserGroupsConfiguration]" + " [-refreshUserToGroupsMappings]" + " [-refreshAdminAcls]" + " [-refreshServiceAcl]" + " [-getGroup [username]]" + " [-addToClusterNodeLabels <\"label1(exclusive=true)," + "label2(exclusive=false),label3\">]" + " [-removeFromClusterNodeLabels <label1,label2,label3>]" + " [-replaceLabelsOnNode <\"node1[:port]=label1,label2 node2[:port]=label1\">]" + " [-directlyAccessNodeLabelStore]]"); if (isHAEnabled) { appendHAUsage(summary); } summary.append(" [-help [cmd]]"); summary.append("\n"); StringBuilder helpBuilder = new StringBuilder(); System.out.println(summary); for (String cmdKey : ADMIN_USAGE.keySet()) { buildHelpMsg(cmdKey, helpBuilder); helpBuilder.append("\n"); } if (isHAEnabled) { for (String cmdKey : USAGE.keySet()) { if (!cmdKey.equals("-help") && !cmdKey.equals("-failover")) { buildHelpMsg(cmdKey, helpBuilder); helpBuilder.append("\n"); } } } helpBuilder.append(" -help [cmd]: Displays help for the given command or all commands" + " if none is specified."); System.out.println(helpBuilder); System.out.println(); ToolRunner.printGenericCommandUsage(System.out); } /** * Displays format of commands. * @param cmd The command that is being executed. */ private static void printUsage(String cmd, boolean isHAEnabled) { StringBuilder usageBuilder = new StringBuilder(); if (ADMIN_USAGE.containsKey(cmd) || USAGE.containsKey(cmd)) { buildIndividualUsageMsg(cmd, usageBuilder); } else { buildUsageMsg(usageBuilder, isHAEnabled); } System.err.println(usageBuilder); ToolRunner.printGenericCommandUsage(System.err); } protected ResourceManagerAdministrationProtocol createAdminProtocol() throws IOException { // Get the current configuration final YarnConfiguration conf = new YarnConfiguration(getConf()); return ClientRMProxy.createRMProxy(conf, ResourceManagerAdministrationProtocol.class); } private int refreshQueues() throws IOException, YarnException { // Refresh the queue properties ResourceManagerAdministrationProtocol adminProtocol = createAdminProtocol(); RefreshQueuesRequest request = recordFactory.newRecordInstance(RefreshQueuesRequest.class); adminProtocol.refreshQueues(request); return 0; } private int refreshNodes() throws IOException, YarnException { // Refresh the nodes ResourceManagerAdministrationProtocol adminProtocol = createAdminProtocol(); RefreshNodesRequest request = RefreshNodesRequest .newInstance(DecommissionType.NORMAL); adminProtocol.refreshNodes(request); return 0; } private int refreshNodes(long timeout) throws IOException, YarnException { // Graceful decommissioning with timeout ResourceManagerAdministrationProtocol adminProtocol = createAdminProtocol(); RefreshNodesRequest gracefulRequest = RefreshNodesRequest .newInstance(DecommissionType.GRACEFUL); adminProtocol.refreshNodes(gracefulRequest); CheckForDecommissioningNodesRequest checkForDecommissioningNodesRequest = recordFactory .newRecordInstance(CheckForDecommissioningNodesRequest.class); long waitingTime; boolean nodesDecommissioning = true; // timeout=-1 means wait for all the nodes to be gracefully // decommissioned for (waitingTime = 0; waitingTime < timeout || timeout == -1; waitingTime++) { // wait for one second to check nodes decommissioning status try { Thread.sleep(1000); } catch (InterruptedException e) { // Ignore the InterruptedException } CheckForDecommissioningNodesResponse checkForDecommissioningNodes = adminProtocol .checkForDecommissioningNodes(checkForDecommissioningNodesRequest); Set<NodeId> decommissioningNodes = checkForDecommissioningNodes .getDecommissioningNodes(); if (decommissioningNodes.isEmpty()) { nodesDecommissioning = false; break; } else { StringBuilder nodes = new StringBuilder(); for (NodeId nodeId : decommissioningNodes) { nodes.append(nodeId).append(","); } nodes.deleteCharAt(nodes.length() - 1); System.out.println("Nodes '" + nodes + "' are still decommissioning."); } } if (nodesDecommissioning) { System.out.println("Graceful decommissioning not completed in " + timeout + " seconds, issueing forceful decommissioning command."); RefreshNodesRequest forcefulRequest = RefreshNodesRequest .newInstance(DecommissionType.FORCEFUL); adminProtocol.refreshNodes(forcefulRequest); } else { System.out.println("Graceful decommissioning completed in " + waitingTime + " seconds."); } return 0; } private int refreshUserToGroupsMappings() throws IOException, YarnException { // Refresh the user-to-groups mappings ResourceManagerAdministrationProtocol adminProtocol = createAdminProtocol(); RefreshUserToGroupsMappingsRequest request = recordFactory.newRecordInstance(RefreshUserToGroupsMappingsRequest.class); adminProtocol.refreshUserToGroupsMappings(request); return 0; } private int refreshSuperUserGroupsConfiguration() throws IOException, YarnException { // Refresh the super-user groups ResourceManagerAdministrationProtocol adminProtocol = createAdminProtocol(); RefreshSuperUserGroupsConfigurationRequest request = recordFactory.newRecordInstance(RefreshSuperUserGroupsConfigurationRequest.class); adminProtocol.refreshSuperUserGroupsConfiguration(request); return 0; } private int refreshAdminAcls() throws IOException, YarnException { // Refresh the admin acls ResourceManagerAdministrationProtocol adminProtocol = createAdminProtocol(); RefreshAdminAclsRequest request = recordFactory.newRecordInstance(RefreshAdminAclsRequest.class); adminProtocol.refreshAdminAcls(request); return 0; } private int refreshServiceAcls() throws IOException, YarnException { // Refresh the service acls ResourceManagerAdministrationProtocol adminProtocol = createAdminProtocol(); RefreshServiceAclsRequest request = recordFactory.newRecordInstance(RefreshServiceAclsRequest.class); adminProtocol.refreshServiceAcls(request); return 0; } private int getGroups(String[] usernames) throws IOException { // Get groups users belongs to ResourceManagerAdministrationProtocol adminProtocol = createAdminProtocol(); if (usernames.length == 0) { usernames = new String[] { UserGroupInformation.getCurrentUser().getUserName() }; } for (String username : usernames) { StringBuilder sb = new StringBuilder(); sb.append(username + " :"); for (String group : adminProtocol.getGroupsForUser(username)) { sb.append(" "); sb.append(group); } System.out.println(sb); } return 0; } // Make it protected to make unit test can change it. protected static synchronized CommonNodeLabelsManager getNodeLabelManagerInstance(Configuration conf) { if (localNodeLabelsManager == null) { localNodeLabelsManager = new CommonNodeLabelsManager(); localNodeLabelsManager.init(conf); localNodeLabelsManager.start(); } return localNodeLabelsManager; } private List<NodeLabel> buildNodeLabelsFromStr(String args) { List<NodeLabel> nodeLabels = new ArrayList<>(); for (String p : args.split(",")) { if (!p.trim().isEmpty()) { String labelName = p; // Try to parse exclusive boolean exclusive = NodeLabel.DEFAULT_NODE_LABEL_EXCLUSIVITY; int leftParenthesisIdx = p.indexOf("("); int rightParenthesisIdx = p.indexOf(")"); if ((leftParenthesisIdx == -1 && rightParenthesisIdx != -1) || (leftParenthesisIdx != -1 && rightParenthesisIdx == -1)) { // Parenthese not match throw new IllegalArgumentException(ADD_LABEL_FORMAT_ERR_MSG); } if (leftParenthesisIdx > 0 && rightParenthesisIdx > 0) { if (leftParenthesisIdx > rightParenthesisIdx) { // Parentese not match throw new IllegalArgumentException(ADD_LABEL_FORMAT_ERR_MSG); } String property = p.substring(p.indexOf("(") + 1, p.indexOf(")")); if (property.contains("=")) { String key = property.substring(0, property.indexOf("=")).trim(); String value = property .substring(property.indexOf("=") + 1, property.length()) .trim(); // Now we only support one property, which is exclusive, so check if // key = exclusive and value = {true/false} if (key.equals("exclusive") && ImmutableSet.of("true", "false").contains(value)) { exclusive = Boolean.parseBoolean(value); } else { throw new IllegalArgumentException(ADD_LABEL_FORMAT_ERR_MSG); } } else if (!property.trim().isEmpty()) { throw new IllegalArgumentException(ADD_LABEL_FORMAT_ERR_MSG); } } // Try to get labelName if there's "(..)" if (labelName.contains("(")) { labelName = labelName.substring(0, labelName.indexOf("(")).trim(); } nodeLabels.add(NodeLabel.newInstance(labelName, exclusive)); } } if (nodeLabels.isEmpty()) { throw new IllegalArgumentException(NO_LABEL_ERR_MSG); } return nodeLabels; } private Set<String> buildNodeLabelNamesFromStr(String args) { Set<String> labels = new HashSet<String>(); for (String p : args.split(",")) { if (!p.trim().isEmpty()) { labels.add(p.trim()); } } if (labels.isEmpty()) { throw new IllegalArgumentException(NO_LABEL_ERR_MSG); } return labels; } private int addToClusterNodeLabels(String args) throws IOException, YarnException { List<NodeLabel> labels = buildNodeLabelsFromStr(args); if (directlyAccessNodeLabelStore) { getNodeLabelManagerInstance(getConf()).addToCluserNodeLabels(labels); } else { ResourceManagerAdministrationProtocol adminProtocol = createAdminProtocol(); AddToClusterNodeLabelsRequest request = AddToClusterNodeLabelsRequest.newInstance(labels); adminProtocol.addToClusterNodeLabels(request); } return 0; } private int removeFromClusterNodeLabels(String args) throws IOException, YarnException { Set<String> labels = buildNodeLabelNamesFromStr(args); if (directlyAccessNodeLabelStore) { getNodeLabelManagerInstance(getConf()).removeFromClusterNodeLabels( labels); } else { ResourceManagerAdministrationProtocol adminProtocol = createAdminProtocol(); RemoveFromClusterNodeLabelsRequest request = RemoveFromClusterNodeLabelsRequest.newInstance(labels); adminProtocol.removeFromClusterNodeLabels(request); } return 0; } private Map<NodeId, Set<String>> buildNodeLabelsMapFromStr(String args) { Map<NodeId, Set<String>> map = new HashMap<NodeId, Set<String>>(); for (String nodeToLabels : args.split("[ \n]")) { nodeToLabels = nodeToLabels.trim(); if (nodeToLabels.isEmpty() || nodeToLabels.startsWith("#")) { continue; } // "," also supported for compatibility String[] splits = nodeToLabels.split("="); int index = 0; if (splits.length != 2) { splits = nodeToLabels.split(","); index = 1; } String nodeIdStr = splits[0]; if (index == 0) { splits = splits[1].split(","); } Preconditions.checkArgument(!nodeIdStr.trim().isEmpty(), "node name cannot be empty"); NodeId nodeId = ConverterUtils.toNodeIdWithDefaultPort(nodeIdStr); map.put(nodeId, new HashSet<String>()); for (int i = index; i < splits.length; i++) { if (!splits[i].trim().isEmpty()) { map.get(nodeId).add(splits[i].trim()); } } int nLabels = map.get(nodeId).size(); Preconditions.checkArgument(nLabels <= 1, "%d labels specified on host=%s" + ", please note that we do not support specifying multiple" + " labels on a single host for now.", nLabels, nodeIdStr); } if (map.isEmpty()) { throw new IllegalArgumentException(NO_MAPPING_ERR_MSG); } return map; } private int replaceLabelsOnNodes(String args) throws IOException, YarnException { Map<NodeId, Set<String>> map = buildNodeLabelsMapFromStr(args); return replaceLabelsOnNodes(map); } private int replaceLabelsOnNodes(Map<NodeId, Set<String>> map) throws IOException, YarnException { if (directlyAccessNodeLabelStore) { getNodeLabelManagerInstance(getConf()).replaceLabelsOnNode(map); } else { ResourceManagerAdministrationProtocol adminProtocol = createAdminProtocol(); ReplaceLabelsOnNodeRequest request = ReplaceLabelsOnNodeRequest.newInstance(map); adminProtocol.replaceLabelsOnNode(request); } return 0; } @Override public int run(String[] args) throws Exception { // -directlyAccessNodeLabelStore is a additional option for node label // access, so just search if we have specified this option, and remove it List<String> argsList = new ArrayList<String>(); for (int i = 0; i < args.length; i++) { if (args[i].equals("-directlyAccessNodeLabelStore")) { directlyAccessNodeLabelStore = true; } else { argsList.add(args[i]); } } args = argsList.toArray(new String[0]); YarnConfiguration yarnConf = getConf() == null ? new YarnConfiguration() : new YarnConfiguration( getConf()); boolean isHAEnabled = yarnConf.getBoolean(YarnConfiguration.RM_HA_ENABLED, YarnConfiguration.DEFAULT_RM_HA_ENABLED); if (args.length < 1) { printUsage("", isHAEnabled); return -1; } int exitCode = -1; int i = 0; String cmd = args[i++]; exitCode = 0; if ("-help".equals(cmd)) { if (i < args.length) { printUsage(args[i], isHAEnabled); } else { printHelp("", isHAEnabled); } return exitCode; } if (USAGE.containsKey(cmd)) { if (isHAEnabled) { return super.run(args); } System.out.println("Cannot run " + cmd + " when ResourceManager HA is not enabled"); return -1; } // // verify that we have enough command line parameters // if ("-refreshAdminAcls".equals(cmd) || "-refreshQueues".equals(cmd) || "-refreshServiceAcl".equals(cmd) || "-refreshUserToGroupsMappings".equals(cmd) || "-refreshSuperUserGroupsConfiguration".equals(cmd)) { if (args.length != 1) { printUsage(cmd, isHAEnabled); return exitCode; } } try { if ("-refreshQueues".equals(cmd)) { exitCode = refreshQueues(); } else if ("-refreshNodes".equals(cmd)) { if (args.length == 1) { exitCode = refreshNodes(); } else if (args.length == 3) { // if the graceful timeout specified if ("-g".equals(args[1])) { long timeout = validateTimeout(args[2]); exitCode = refreshNodes(timeout); } else { printUsage(cmd, isHAEnabled); return -1; } } else { printUsage(cmd, isHAEnabled); return -1; } } else if ("-refreshUserToGroupsMappings".equals(cmd)) { exitCode = refreshUserToGroupsMappings(); } else if ("-refreshSuperUserGroupsConfiguration".equals(cmd)) { exitCode = refreshSuperUserGroupsConfiguration(); } else if ("-refreshAdminAcls".equals(cmd)) { exitCode = refreshAdminAcls(); } else if ("-refreshServiceAcl".equals(cmd)) { exitCode = refreshServiceAcls(); } else if ("-getGroups".equals(cmd)) { String[] usernames = Arrays.copyOfRange(args, i, args.length); exitCode = getGroups(usernames); } else if ("-addToClusterNodeLabels".equals(cmd)) { if (i >= args.length) { System.err.println(NO_LABEL_ERR_MSG); printUsage("", isHAEnabled); exitCode = -1; } else { exitCode = addToClusterNodeLabels(args[i]); } } else if ("-removeFromClusterNodeLabels".equals(cmd)) { if (i >= args.length) { System.err.println(NO_LABEL_ERR_MSG); printUsage("", isHAEnabled); exitCode = -1; } else { exitCode = removeFromClusterNodeLabels(args[i]); } } else if ("-replaceLabelsOnNode".equals(cmd)) { if (i >= args.length) { System.err.println(NO_MAPPING_ERR_MSG); printUsage("", isHAEnabled); exitCode = -1; } else { exitCode = replaceLabelsOnNodes(args[i]); } } else { exitCode = -1; System.err.println(cmd.substring(1) + ": Unknown command"); printUsage("", isHAEnabled); } } catch (IllegalArgumentException arge) { exitCode = -1; System.err.println(cmd.substring(1) + ": " + arge.getLocalizedMessage()); printUsage(cmd, isHAEnabled); } catch (RemoteException e) { // // This is a error returned by hadoop server. Print // out the first line of the error message, ignore the stack trace. exitCode = -1; try { String[] content; content = e.getLocalizedMessage().split("\n"); System.err.println(cmd.substring(1) + ": " + content[0]); } catch (Exception ex) { System.err.println(cmd.substring(1) + ": " + ex.getLocalizedMessage()); } } catch (Exception e) { exitCode = -1; System.err.println(cmd.substring(1) + ": " + e.getLocalizedMessage()); } if (null != localNodeLabelsManager) { localNodeLabelsManager.stop(); } return exitCode; } private long validateTimeout(String strTimeout) { long timeout; try { timeout = Long.parseLong(strTimeout); } catch (NumberFormatException ex) { throw new IllegalArgumentException(INVALID_TIMEOUT_ERR_MSG + strTimeout); } if (timeout < -1) { throw new IllegalArgumentException(INVALID_TIMEOUT_ERR_MSG + timeout); } return timeout; } @Override public void setConf(Configuration conf) { if (conf != null) { conf = addSecurityConfiguration(conf); } super.setConf(conf); } /** * Add the requisite security principal settings to the given Configuration, * returning a copy. * @param conf the original config * @return a copy with the security settings added */ private static Configuration addSecurityConfiguration(Configuration conf) { // Make a copy so we don't mutate it. Also use an YarnConfiguration to // force loading of yarn-site.xml. conf = new YarnConfiguration(conf); conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, conf.get(YarnConfiguration.RM_PRINCIPAL, "")); return conf; } @Override protected HAServiceTarget resolveTarget(String rmId) { Collection<String> rmIds = HAUtil.getRMHAIds(getConf()); if (!rmIds.contains(rmId)) { StringBuilder msg = new StringBuilder(); msg.append(rmId + " is not a valid serviceId. It should be one of "); for (String id : rmIds) { msg.append(id + " "); } throw new IllegalArgumentException(msg.toString()); } try { YarnConfiguration conf = new YarnConfiguration(getConf()); conf.set(YarnConfiguration.RM_HA_ID, rmId); return new RMHAServiceTarget(conf); } catch (IllegalArgumentException iae) { throw new YarnRuntimeException("Could not connect to " + rmId + "; the configuration for it might be missing"); } catch (IOException ioe) { throw new YarnRuntimeException( "Could not connect to RM HA Admin for node " + rmId); } } /** * returns the list of all resourcemanager ids for the given configuration. */ @Override protected Collection<String> getTargetIds(String targetNodeToActivate) { return HAUtil.getRMHAIds(getConf()); } @Override protected String getUsageString() { return "Usage: rmadmin"; } public static void main(String[] args) throws Exception { int result = ToolRunner.run(new RMAdminCLI(), args); System.exit(result); } }
30,851
36.67033
124
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.client.cli; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.OutputStreamWriter; import java.io.PrintWriter; import java.nio.charset.Charset; import java.text.DecimalFormat; import java.util.EnumSet; import java.util.HashSet; import java.util.List; import java.util.Set; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.GnuParser; import org.apache.commons.cli.HelpFormatter; import org.apache.commons.cli.MissingArgumentException; import org.apache.commons.cli.Option; import org.apache.commons.cli.Options; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.ToolRunner; import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationReport; import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport; import org.apache.hadoop.yarn.api.records.ContainerReport; import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.exceptions.ApplicationAttemptNotFoundException; import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException; import org.apache.hadoop.yarn.exceptions.ContainerNotFoundException; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.util.ConverterUtils; import org.apache.hadoop.yarn.util.Times; import com.google.common.annotations.VisibleForTesting; @Private @Unstable public class ApplicationCLI extends YarnCLI { private static final String APPLICATIONS_PATTERN = "%30s\t%20s\t%20s\t%10s\t%10s\t%18s\t%18s\t%15s\t%35s" + System.getProperty("line.separator"); private static final String APPLICATION_ATTEMPTS_PATTERN = "%30s\t%20s\t%35s\t%35s" + System.getProperty("line.separator"); private static final String CONTAINER_PATTERN = "%30s\t%20s\t%20s\t%20s\t%20s\t%20s\t%35s" + System.getProperty("line.separator"); private static final String APP_TYPE_CMD = "appTypes"; private static final String APP_STATE_CMD = "appStates"; private static final String ALLSTATES_OPTION = "ALL"; private static final String QUEUE_CMD = "queue"; public static final String APPLICATION = "application"; public static final String APPLICATION_ATTEMPT = "applicationattempt"; public static final String CONTAINER = "container"; private boolean allAppStates; public static void main(String[] args) throws Exception { ApplicationCLI cli = new ApplicationCLI(); cli.setSysOutPrintStream(System.out); cli.setSysErrPrintStream(System.err); int res = ToolRunner.run(cli, args); cli.stop(); System.exit(res); } @Override public int run(String[] args) throws Exception { Options opts = new Options(); String title = null; if (args.length > 0 && args[0].equalsIgnoreCase(APPLICATION)) { title = APPLICATION; opts.addOption(STATUS_CMD, true, "Prints the status of the application."); opts.addOption(LIST_CMD, false, "List applications. " + "Supports optional use of -appTypes to filter applications " + "based on application type, " + "and -appStates to filter applications based on application state."); opts.addOption(KILL_CMD, true, "Kills the application."); opts.addOption(MOVE_TO_QUEUE_CMD, true, "Moves the application to a " + "different queue."); opts.addOption(QUEUE_CMD, true, "Works with the movetoqueue command to" + " specify which queue to move an application to."); opts.addOption(HELP_CMD, false, "Displays help for all commands."); Option appTypeOpt = new Option(APP_TYPE_CMD, true, "Works with -list to " + "filter applications based on " + "input comma-separated list of application types."); appTypeOpt.setValueSeparator(','); appTypeOpt.setArgs(Option.UNLIMITED_VALUES); appTypeOpt.setArgName("Types"); opts.addOption(appTypeOpt); Option appStateOpt = new Option(APP_STATE_CMD, true, "Works with -list " + "to filter applications based on input comma-separated list of " + "application states. " + getAllValidApplicationStates()); appStateOpt.setValueSeparator(','); appStateOpt.setArgs(Option.UNLIMITED_VALUES); appStateOpt.setArgName("States"); opts.addOption(appStateOpt); opts.getOption(KILL_CMD).setArgName("Application ID"); opts.getOption(MOVE_TO_QUEUE_CMD).setArgName("Application ID"); opts.getOption(QUEUE_CMD).setArgName("Queue Name"); opts.getOption(STATUS_CMD).setArgName("Application ID"); } else if (args.length > 0 && args[0].equalsIgnoreCase(APPLICATION_ATTEMPT)) { title = APPLICATION_ATTEMPT; opts.addOption(STATUS_CMD, true, "Prints the status of the application attempt."); opts.addOption(LIST_CMD, true, "List application attempts for aplication."); opts.addOption(HELP_CMD, false, "Displays help for all commands."); opts.getOption(STATUS_CMD).setArgName("Application Attempt ID"); opts.getOption(LIST_CMD).setArgName("Application ID"); } else if (args.length > 0 && args[0].equalsIgnoreCase(CONTAINER)) { title = CONTAINER; opts.addOption(STATUS_CMD, true, "Prints the status of the container."); opts.addOption(LIST_CMD, true, "List containers for application attempt."); opts.addOption(HELP_CMD, false, "Displays help for all commands."); opts.getOption(STATUS_CMD).setArgName("Container ID"); opts.getOption(LIST_CMD).setArgName("Application Attempt ID"); } int exitCode = -1; CommandLine cliParser = null; try { cliParser = new GnuParser().parse(opts, args); } catch (MissingArgumentException ex) { sysout.println("Missing argument for options"); printUsage(title, opts); return exitCode; } if (cliParser.hasOption(STATUS_CMD)) { if (args.length != 3) { printUsage(title, opts); return exitCode; } if (args[0].equalsIgnoreCase(APPLICATION)) { exitCode = printApplicationReport(cliParser.getOptionValue(STATUS_CMD)); } else if (args[0].equalsIgnoreCase(APPLICATION_ATTEMPT)) { exitCode = printApplicationAttemptReport(cliParser .getOptionValue(STATUS_CMD)); } else if (args[0].equalsIgnoreCase(CONTAINER)) { exitCode = printContainerReport(cliParser.getOptionValue(STATUS_CMD)); } return exitCode; } else if (cliParser.hasOption(LIST_CMD)) { if (args[0].equalsIgnoreCase(APPLICATION)) { allAppStates = false; Set<String> appTypes = new HashSet<String>(); if (cliParser.hasOption(APP_TYPE_CMD)) { String[] types = cliParser.getOptionValues(APP_TYPE_CMD); if (types != null) { for (String type : types) { if (!type.trim().isEmpty()) { appTypes.add(StringUtils.toUpperCase(type).trim()); } } } } EnumSet<YarnApplicationState> appStates = EnumSet .noneOf(YarnApplicationState.class); if (cliParser.hasOption(APP_STATE_CMD)) { String[] states = cliParser.getOptionValues(APP_STATE_CMD); if (states != null) { for (String state : states) { if (!state.trim().isEmpty()) { if (state.trim().equalsIgnoreCase(ALLSTATES_OPTION)) { allAppStates = true; break; } try { appStates.add(YarnApplicationState.valueOf( StringUtils.toUpperCase(state).trim())); } catch (IllegalArgumentException ex) { sysout.println("The application state " + state + " is invalid."); sysout.println(getAllValidApplicationStates()); return exitCode; } } } } } listApplications(appTypes, appStates); } else if (args[0].equalsIgnoreCase(APPLICATION_ATTEMPT)) { if (args.length != 3) { printUsage(title, opts); return exitCode; } listApplicationAttempts(cliParser.getOptionValue(LIST_CMD)); } else if (args[0].equalsIgnoreCase(CONTAINER)) { if (args.length != 3) { printUsage(title, opts); return exitCode; } listContainers(cliParser.getOptionValue(LIST_CMD)); } } else if (cliParser.hasOption(KILL_CMD)) { if (args.length != 3) { printUsage(title, opts); return exitCode; } try{ killApplication(cliParser.getOptionValue(KILL_CMD)); } catch (ApplicationNotFoundException e) { return exitCode; } } else if (cliParser.hasOption(MOVE_TO_QUEUE_CMD)) { if (!cliParser.hasOption(QUEUE_CMD)) { printUsage(title, opts); return exitCode; } moveApplicationAcrossQueues(cliParser.getOptionValue(MOVE_TO_QUEUE_CMD), cliParser.getOptionValue(QUEUE_CMD)); } else if (cliParser.hasOption(HELP_CMD)) { printUsage(title, opts); return 0; } else { syserr.println("Invalid Command Usage : "); printUsage(title, opts); } return 0; } /** * It prints the usage of the command * * @param opts */ @VisibleForTesting void printUsage(String title, Options opts) { new HelpFormatter().printHelp(title, opts); } /** * Prints the application attempt report for an application attempt id. * * @param applicationAttemptId * @return exitCode * @throws YarnException */ private int printApplicationAttemptReport(String applicationAttemptId) throws YarnException, IOException { ApplicationAttemptReport appAttemptReport = null; try { appAttemptReport = client.getApplicationAttemptReport(ConverterUtils .toApplicationAttemptId(applicationAttemptId)); } catch (ApplicationNotFoundException e) { sysout.println("Application for AppAttempt with id '" + applicationAttemptId + "' doesn't exist in RM or Timeline Server."); return -1; } catch (ApplicationAttemptNotFoundException e) { sysout.println("Application Attempt with id '" + applicationAttemptId + "' doesn't exist in RM or Timeline Server."); return -1; } // Use PrintWriter.println, which uses correct platform line ending. ByteArrayOutputStream baos = new ByteArrayOutputStream(); PrintWriter appAttemptReportStr = new PrintWriter( new OutputStreamWriter(baos, Charset.forName("UTF-8"))); if (appAttemptReport != null) { appAttemptReportStr.println("Application Attempt Report : "); appAttemptReportStr.print("\tApplicationAttempt-Id : "); appAttemptReportStr.println(appAttemptReport.getApplicationAttemptId()); appAttemptReportStr.print("\tState : "); appAttemptReportStr.println(appAttemptReport .getYarnApplicationAttemptState()); appAttemptReportStr.print("\tAMContainer : "); appAttemptReportStr.println(appAttemptReport.getAMContainerId() .toString()); appAttemptReportStr.print("\tTracking-URL : "); appAttemptReportStr.println(appAttemptReport.getTrackingUrl()); appAttemptReportStr.print("\tRPC Port : "); appAttemptReportStr.println(appAttemptReport.getRpcPort()); appAttemptReportStr.print("\tAM Host : "); appAttemptReportStr.println(appAttemptReport.getHost()); appAttemptReportStr.print("\tDiagnostics : "); appAttemptReportStr.print(appAttemptReport.getDiagnostics()); } else { appAttemptReportStr.print("Application Attempt with id '" + applicationAttemptId + "' doesn't exist in Timeline Server."); appAttemptReportStr.close(); sysout.println(baos.toString("UTF-8")); return -1; } appAttemptReportStr.close(); sysout.println(baos.toString("UTF-8")); return 0; } /** * Prints the container report for an container id. * * @param containerId * @return exitCode * @throws YarnException */ private int printContainerReport(String containerId) throws YarnException, IOException { ContainerReport containerReport = null; try { containerReport = client.getContainerReport((ConverterUtils .toContainerId(containerId))); } catch (ApplicationNotFoundException e) { sysout.println("Application for Container with id '" + containerId + "' doesn't exist in RM or Timeline Server."); return -1; } catch (ApplicationAttemptNotFoundException e) { sysout.println("Application Attempt for Container with id '" + containerId + "' doesn't exist in RM or Timeline Server."); return -1; } catch (ContainerNotFoundException e) { sysout.println("Container with id '" + containerId + "' doesn't exist in RM or Timeline Server."); return -1; } // Use PrintWriter.println, which uses correct platform line ending. ByteArrayOutputStream baos = new ByteArrayOutputStream(); PrintWriter containerReportStr = new PrintWriter( new OutputStreamWriter(baos, Charset.forName("UTF-8"))); if (containerReport != null) { containerReportStr.println("Container Report : "); containerReportStr.print("\tContainer-Id : "); containerReportStr.println(containerReport.getContainerId()); containerReportStr.print("\tStart-Time : "); containerReportStr.println(containerReport.getCreationTime()); containerReportStr.print("\tFinish-Time : "); containerReportStr.println(containerReport.getFinishTime()); containerReportStr.print("\tState : "); containerReportStr.println(containerReport.getContainerState()); containerReportStr.print("\tLOG-URL : "); containerReportStr.println(containerReport.getLogUrl()); containerReportStr.print("\tHost : "); containerReportStr.println(containerReport.getAssignedNode()); containerReportStr.print("\tNodeHttpAddress : "); containerReportStr.println(containerReport.getNodeHttpAddress() == null ? "N/A" : containerReport.getNodeHttpAddress()); containerReportStr.print("\tDiagnostics : "); containerReportStr.print(containerReport.getDiagnosticsInfo()); } else { containerReportStr.print("Container with id '" + containerId + "' doesn't exist in Timeline Server."); containerReportStr.close(); sysout.println(baos.toString("UTF-8")); return -1; } containerReportStr.close(); sysout.println(baos.toString("UTF-8")); return 0; } /** * Lists the applications matching the given application Types And application * States present in the Resource Manager * * @param appTypes * @param appStates * @throws YarnException * @throws IOException */ private void listApplications(Set<String> appTypes, EnumSet<YarnApplicationState> appStates) throws YarnException, IOException { PrintWriter writer = new PrintWriter( new OutputStreamWriter(sysout, Charset.forName("UTF-8"))); if (allAppStates) { for (YarnApplicationState appState : YarnApplicationState.values()) { appStates.add(appState); } } else { if (appStates.isEmpty()) { appStates.add(YarnApplicationState.RUNNING); appStates.add(YarnApplicationState.ACCEPTED); appStates.add(YarnApplicationState.SUBMITTED); } } List<ApplicationReport> appsReport = client.getApplications(appTypes, appStates); writer.println("Total number of applications (application-types: " + appTypes + " and states: " + appStates + ")" + ":" + appsReport.size()); writer.printf(APPLICATIONS_PATTERN, "Application-Id", "Application-Name", "Application-Type", "User", "Queue", "State", "Final-State", "Progress", "Tracking-URL"); for (ApplicationReport appReport : appsReport) { DecimalFormat formatter = new DecimalFormat("###.##%"); String progress = formatter.format(appReport.getProgress()); writer.printf(APPLICATIONS_PATTERN, appReport.getApplicationId(), appReport.getName(), appReport.getApplicationType(), appReport .getUser(), appReport.getQueue(), appReport .getYarnApplicationState(), appReport.getFinalApplicationStatus(), progress, appReport .getOriginalTrackingUrl()); } writer.flush(); } /** * Kills the application with the application id as appId * * @param applicationId * @throws YarnException * @throws IOException */ private void killApplication(String applicationId) throws YarnException, IOException { ApplicationId appId = ConverterUtils.toApplicationId(applicationId); ApplicationReport appReport = null; try { appReport = client.getApplicationReport(appId); } catch (ApplicationNotFoundException e) { sysout.println("Application with id '" + applicationId + "' doesn't exist in RM."); throw e; } if (appReport.getYarnApplicationState() == YarnApplicationState.FINISHED || appReport.getYarnApplicationState() == YarnApplicationState.KILLED || appReport.getYarnApplicationState() == YarnApplicationState.FAILED) { sysout.println("Application " + applicationId + " has already finished "); } else { sysout.println("Killing application " + applicationId); client.killApplication(appId); } } /** * Moves the application with the given ID to the given queue. */ private void moveApplicationAcrossQueues(String applicationId, String queue) throws YarnException, IOException { ApplicationId appId = ConverterUtils.toApplicationId(applicationId); ApplicationReport appReport = client.getApplicationReport(appId); if (appReport.getYarnApplicationState() == YarnApplicationState.FINISHED || appReport.getYarnApplicationState() == YarnApplicationState.KILLED || appReport.getYarnApplicationState() == YarnApplicationState.FAILED) { sysout.println("Application " + applicationId + " has already finished "); } else { sysout.println("Moving application " + applicationId + " to queue " + queue); client.moveApplicationAcrossQueues(appId, queue); sysout.println("Successfully completed move."); } } /** * Prints the application report for an application id. * * @param applicationId * @return exitCode * @throws YarnException */ private int printApplicationReport(String applicationId) throws YarnException, IOException { ApplicationReport appReport = null; try { appReport = client.getApplicationReport(ConverterUtils .toApplicationId(applicationId)); } catch (ApplicationNotFoundException e) { sysout.println("Application with id '" + applicationId + "' doesn't exist in RM or Timeline Server."); return -1; } // Use PrintWriter.println, which uses correct platform line ending. ByteArrayOutputStream baos = new ByteArrayOutputStream(); PrintWriter appReportStr = new PrintWriter( new OutputStreamWriter(baos, Charset.forName("UTF-8"))); if (appReport != null) { appReportStr.println("Application Report : "); appReportStr.print("\tApplication-Id : "); appReportStr.println(appReport.getApplicationId()); appReportStr.print("\tApplication-Name : "); appReportStr.println(appReport.getName()); appReportStr.print("\tApplication-Type : "); appReportStr.println(appReport.getApplicationType()); appReportStr.print("\tUser : "); appReportStr.println(appReport.getUser()); appReportStr.print("\tQueue : "); appReportStr.println(appReport.getQueue()); appReportStr.print("\tApplication Priority : "); appReportStr.println(appReport.getPriority()); appReportStr.print("\tStart-Time : "); appReportStr.println(appReport.getStartTime()); appReportStr.print("\tFinish-Time : "); appReportStr.println(appReport.getFinishTime()); appReportStr.print("\tProgress : "); DecimalFormat formatter = new DecimalFormat("###.##%"); String progress = formatter.format(appReport.getProgress()); appReportStr.println(progress); appReportStr.print("\tState : "); appReportStr.println(appReport.getYarnApplicationState()); appReportStr.print("\tFinal-State : "); appReportStr.println(appReport.getFinalApplicationStatus()); appReportStr.print("\tTracking-URL : "); appReportStr.println(appReport.getOriginalTrackingUrl()); appReportStr.print("\tRPC Port : "); appReportStr.println(appReport.getRpcPort()); appReportStr.print("\tAM Host : "); appReportStr.println(appReport.getHost()); appReportStr.print("\tAggregate Resource Allocation : "); ApplicationResourceUsageReport usageReport = appReport.getApplicationResourceUsageReport(); if (usageReport != null) { //completed app report in the timeline server doesn't have usage report appReportStr.print(usageReport.getMemorySeconds() + " MB-seconds, "); appReportStr.println(usageReport.getVcoreSeconds() + " vcore-seconds"); } else { appReportStr.println("N/A"); } appReportStr.print("\tLog Aggregation Status : "); appReportStr.println(appReport.getLogAggregationStatus() == null ? "N/A" : appReport.getLogAggregationStatus()); appReportStr.print("\tDiagnostics : "); appReportStr.println(appReport.getDiagnostics()); appReportStr.print("\tUnmanaged Application : "); appReportStr.print(appReport.isUnmanagedApp()); } else { appReportStr.print("Application with id '" + applicationId + "' doesn't exist in RM."); appReportStr.close(); sysout.println(baos.toString("UTF-8")); return -1; } appReportStr.close(); sysout.println(baos.toString("UTF-8")); return 0; } private String getAllValidApplicationStates() { StringBuilder sb = new StringBuilder(); sb.append("The valid application state can be" + " one of the following: "); sb.append(ALLSTATES_OPTION + ","); for (YarnApplicationState appState : YarnApplicationState.values()) { sb.append(appState + ","); } String output = sb.toString(); return output.substring(0, output.length() - 1); } /** * Lists the application attempts matching the given applicationid * * @param applicationId * @throws YarnException * @throws IOException */ private void listApplicationAttempts(String applicationId) throws YarnException, IOException { PrintWriter writer = new PrintWriter( new OutputStreamWriter(sysout, Charset.forName("UTF-8"))); List<ApplicationAttemptReport> appAttemptsReport = client .getApplicationAttempts(ConverterUtils.toApplicationId(applicationId)); writer.println("Total number of application attempts " + ":" + appAttemptsReport.size()); writer.printf(APPLICATION_ATTEMPTS_PATTERN, "ApplicationAttempt-Id", "State", "AM-Container-Id", "Tracking-URL"); for (ApplicationAttemptReport appAttemptReport : appAttemptsReport) { writer.printf(APPLICATION_ATTEMPTS_PATTERN, appAttemptReport .getApplicationAttemptId(), appAttemptReport .getYarnApplicationAttemptState(), appAttemptReport .getAMContainerId().toString(), appAttemptReport.getTrackingUrl()); } writer.flush(); } /** * Lists the containers matching the given application attempts * * @param appAttemptId * @throws YarnException * @throws IOException */ private void listContainers(String appAttemptId) throws YarnException, IOException { PrintWriter writer = new PrintWriter( new OutputStreamWriter(sysout, Charset.forName("UTF-8"))); List<ContainerReport> appsReport = client .getContainers(ConverterUtils.toApplicationAttemptId(appAttemptId)); writer.println("Total number of containers " + ":" + appsReport.size()); writer.printf(CONTAINER_PATTERN, "Container-Id", "Start Time", "Finish Time", "State", "Host", "Node Http Address", "LOG-URL"); for (ContainerReport containerReport : appsReport) { writer.printf( CONTAINER_PATTERN, containerReport.getContainerId(), Times.format(containerReport.getCreationTime()), Times.format(containerReport.getFinishTime()), containerReport.getContainerState(), containerReport .getAssignedNode(), containerReport.getNodeHttpAddress() == null ? "N/A" : containerReport.getNodeHttpAddress(), containerReport.getLogUrl()); } writer.flush(); } }
25,966
40.680578
83
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/TopCLI.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.client.cli; import java.io.IOException; import java.io.InputStream; import java.net.URL; import java.net.URLConnection; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.Comparator; import java.util.EnumMap; import java.util.EnumSet; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Scanner; import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import com.google.common.cache.Cache; import com.google.common.cache.CacheBuilder; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.GnuParser; import org.apache.commons.cli.HelpFormatter; import org.apache.commons.cli.Options; import org.apache.commons.cli.ParseException; import org.apache.commons.io.IOUtils; import org.apache.commons.lang.StringUtils; import org.apache.commons.lang.time.DateFormatUtils; import org.apache.commons.lang.time.DurationFormatUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Time; import org.apache.hadoop.util.ToolRunner; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest; import org.apache.hadoop.yarn.api.records.ApplicationReport; import org.apache.hadoop.yarn.api.records.QueueInfo; import org.apache.hadoop.yarn.api.records.QueueStatistics; import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.api.records.YarnClusterMetrics; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; import org.codehaus.jettison.json.JSONObject; public class TopCLI extends YarnCLI { private static final Log LOG = LogFactory.getLog(TopCLI.class); private String CLEAR = "\u001b[2J"; private String CLEAR_LINE = "\u001b[2K"; private String SET_CURSOR_HOME = "\u001b[H"; private String CHANGE_BACKGROUND = "\u001b[7m"; private String RESET_BACKGROUND = "\u001b[0m"; private String SET_CURSOR_LINE_7_COLUMN_0 = "\u001b[7;0f"; // guava cache for getapplications call protected Cache<GetApplicationsRequest, List<ApplicationReport>> applicationReportsCache = CacheBuilder.newBuilder().maximumSize(1000) .expireAfterWrite(5, TimeUnit.SECONDS).build(); enum DisplayScreen { TOP, HELP, SORT, FIELDS } enum Columns { // in the order in which they should be displayed APPID, USER, TYPE, QUEUE, PRIORITY, CONT, RCONT, VCORES, RVCORES, MEM, RMEM, VCORESECS, MEMSECS, PROGRESS, TIME, NAME } static class ColumnInformation { String header; String format; boolean display; // should we show this field or not String description; String key; // key to press for sorting/toggling field public ColumnInformation(String header, String format, boolean display, String description, String key) { this.header = header; this.format = format; this.display = display; this.description = description; this.key = key; } } private static class ApplicationInformation { final String appid; final String user; final String type; final int priority; final int usedContainers; final int reservedContainers; final long usedMemory; final long reservedMemory; final int usedVirtualCores; final int reservedVirtualCores; final int attempts; final float progress; final String state; long runningTime; final String time; final String name; final int nodes; final String queue; final long memorySeconds; final long vcoreSeconds; final EnumMap<Columns, String> displayStringsMap; ApplicationInformation(ApplicationReport appReport) { displayStringsMap = new EnumMap<>(Columns.class); appid = appReport.getApplicationId().toString(); displayStringsMap.put(Columns.APPID, appid); user = appReport.getUser(); displayStringsMap.put(Columns.USER, user); type = appReport.getApplicationType().toLowerCase(); displayStringsMap.put(Columns.TYPE, type); state = appReport.getYarnApplicationState().toString().toLowerCase(); name = appReport.getName(); displayStringsMap.put(Columns.NAME, name); queue = appReport.getQueue(); displayStringsMap.put(Columns.QUEUE, queue); priority = 0; usedContainers = appReport.getApplicationResourceUsageReport().getNumUsedContainers(); displayStringsMap.put(Columns.CONT, String.valueOf(usedContainers)); reservedContainers = appReport.getApplicationResourceUsageReport() .getNumReservedContainers(); displayStringsMap.put(Columns.RCONT, String.valueOf(reservedContainers)); usedVirtualCores = appReport.getApplicationResourceUsageReport().getUsedResources() .getVirtualCores(); displayStringsMap.put(Columns.VCORES, String.valueOf(usedVirtualCores)); usedMemory = appReport.getApplicationResourceUsageReport().getUsedResources() .getMemory() / 1024; displayStringsMap.put(Columns.MEM, String.valueOf(usedMemory) + "G"); reservedVirtualCores = appReport.getApplicationResourceUsageReport().getReservedResources() .getVirtualCores(); displayStringsMap.put(Columns.RVCORES, String.valueOf(reservedVirtualCores)); reservedMemory = appReport.getApplicationResourceUsageReport().getReservedResources() .getMemory() / 1024; displayStringsMap.put(Columns.RMEM, String.valueOf(reservedMemory) + "G"); attempts = appReport.getCurrentApplicationAttemptId().getAttemptId(); nodes = 0; runningTime = Time.now() - appReport.getStartTime(); time = DurationFormatUtils.formatDuration(runningTime, "dd:HH:mm"); displayStringsMap.put(Columns.TIME, String.valueOf(time)); progress = appReport.getProgress() * 100; displayStringsMap.put(Columns.PROGRESS, String.format("%.2f", progress)); // store in GBSeconds memorySeconds = appReport.getApplicationResourceUsageReport().getMemorySeconds() / 1024; displayStringsMap.put(Columns.MEMSECS, String.valueOf(memorySeconds)); vcoreSeconds = appReport.getApplicationResourceUsageReport().getVcoreSeconds(); displayStringsMap.put(Columns.VCORESECS, String.valueOf(vcoreSeconds)); } } // all the sort comparators public static final Comparator<ApplicationInformation> AppIDComparator = new Comparator<ApplicationInformation>() { @Override public int compare(ApplicationInformation a1, ApplicationInformation a2) { return a1.appid.compareTo(a2.appid); } }; public static final Comparator<ApplicationInformation> UserComparator = new Comparator<ApplicationInformation>() { @Override public int compare(ApplicationInformation a1, ApplicationInformation a2) { return a1.user.compareTo(a2.user); } }; public static final Comparator<ApplicationInformation> AppTypeComparator = new Comparator<ApplicationInformation>() { @Override public int compare(ApplicationInformation a1, ApplicationInformation a2) { return a1.type.compareTo(a2.type); } }; public static final Comparator<ApplicationInformation> QueueNameComparator = new Comparator<ApplicationInformation>() { @Override public int compare(ApplicationInformation a1, ApplicationInformation a2) { return a1.queue.compareTo(a2.queue); } }; public static final Comparator<ApplicationInformation> UsedContainersComparator = new Comparator<ApplicationInformation>() { @Override public int compare(ApplicationInformation a1, ApplicationInformation a2) { return a1.usedContainers - a2.usedContainers; } }; public static final Comparator<ApplicationInformation> ReservedContainersComparator = new Comparator<ApplicationInformation>() { @Override public int compare(ApplicationInformation a1, ApplicationInformation a2) { return a1.reservedContainers - a2.reservedContainers; } }; public static final Comparator<ApplicationInformation> UsedMemoryComparator = new Comparator<ApplicationInformation>() { @Override public int compare(ApplicationInformation a1, ApplicationInformation a2) { return Long.valueOf(a1.usedMemory).compareTo(a2.usedMemory); } }; public static final Comparator<ApplicationInformation> ReservedMemoryComparator = new Comparator<ApplicationInformation>() { @Override public int compare(ApplicationInformation a1, ApplicationInformation a2) { return Long.valueOf(a1.reservedMemory).compareTo(a2.reservedMemory); } }; public static final Comparator<ApplicationInformation> UsedVCoresComparator = new Comparator<ApplicationInformation>() { @Override public int compare(ApplicationInformation a1, ApplicationInformation a2) { return a1.usedVirtualCores - a2.usedVirtualCores; } }; public static final Comparator<ApplicationInformation> ReservedVCoresComparator = new Comparator<ApplicationInformation>() { @Override public int compare(ApplicationInformation a1, ApplicationInformation a2) { return a1.reservedVirtualCores - a2.reservedVirtualCores; } }; public static final Comparator<ApplicationInformation> VCoreSecondsComparator = new Comparator<ApplicationInformation>() { @Override public int compare(ApplicationInformation a1, ApplicationInformation a2) { return Long.valueOf(a1.vcoreSeconds).compareTo(a2.vcoreSeconds); } }; public static final Comparator<ApplicationInformation> MemorySecondsComparator = new Comparator<ApplicationInformation>() { @Override public int compare(ApplicationInformation a1, ApplicationInformation a2) { return Long.valueOf(a1.memorySeconds).compareTo(a2.memorySeconds); } }; public static final Comparator<ApplicationInformation> ProgressComparator = new Comparator<ApplicationInformation>() { @Override public int compare(ApplicationInformation a1, ApplicationInformation a2) { return Float.compare(a1.progress, a2.progress); } }; public static final Comparator<ApplicationInformation> RunningTimeComparator = new Comparator<ApplicationInformation>() { @Override public int compare(ApplicationInformation a1, ApplicationInformation a2) { return Long.valueOf(a1.runningTime).compareTo(a2.runningTime); } }; public static final Comparator<ApplicationInformation> AppNameComparator = new Comparator<ApplicationInformation>() { @Override public int compare(ApplicationInformation a1, ApplicationInformation a2) { return a1.name.compareTo(a2.name); } }; private static class NodesInformation { int totalNodes; int runningNodes; int unhealthyNodes; int decommissionedNodes; int lostNodes; int rebootedNodes; } private static class QueueMetrics { long appsSubmitted; long appsRunning; long appsPending; long appsCompleted; long appsKilled; long appsFailed; long activeUsers; long availableMemoryGB; long allocatedMemoryGB; long pendingMemoryGB; long reservedMemoryGB; long availableVCores; long allocatedVCores; long pendingVCores; long reservedVCores; long allocatedContainers; long reservedContainers; long pendingContainers; } private class KeyboardMonitor extends Thread { public void run() { Scanner keyboard = new Scanner(System.in, "UTF-8"); while (runKeyboardMonitor.get()) { String in = keyboard.next(); try { if (displayScreen == DisplayScreen.SORT) { handleSortScreenKeyPress(in); } else if (displayScreen == DisplayScreen.TOP) { handleTopScreenKeyPress(in); } else if (displayScreen == DisplayScreen.FIELDS) { handleFieldsScreenKeyPress(in); } else { handleHelpScreenKeyPress(); } } catch (Exception e) { LOG.error("Caught exception", e); } } } } long refreshPeriod = 3 * 1000; int terminalWidth = -1; int terminalHeight = -1; String appsHeader; boolean ascendingSort; long rmStartTime; Comparator<ApplicationInformation> comparator; Options opts; CommandLine cliParser; Set<String> queues; Set<String> users; Set<String> types; DisplayScreen displayScreen; AtomicBoolean showingTopScreen; AtomicBoolean runMainLoop; AtomicBoolean runKeyboardMonitor; final Object lock = new Object(); String currentSortField; Map<String, Columns> keyFieldsMap; List<String> sortedKeys; Thread displayThread; final EnumMap<Columns, ColumnInformation> columnInformationEnumMap; public TopCLI() throws IOException, InterruptedException { super(); queues = new HashSet<>(); users = new HashSet<>(); types = new HashSet<>(); comparator = UsedContainersComparator; ascendingSort = false; displayScreen = DisplayScreen.TOP; showingTopScreen = new AtomicBoolean(); showingTopScreen.set(true); currentSortField = "c"; keyFieldsMap = new HashMap<>(); runKeyboardMonitor = new AtomicBoolean(); runMainLoop = new AtomicBoolean(); runKeyboardMonitor.set(true); runMainLoop.set(true); displayThread = Thread.currentThread(); columnInformationEnumMap = new EnumMap<>(Columns.class); generateColumnInformationMap(); generateKeyFieldsMap(); sortedKeys = new ArrayList<>(keyFieldsMap.keySet()); Collections.sort(sortedKeys); setTerminalSequences(); } public static void main(String[] args) throws Exception { TopCLI topImp = new TopCLI(); topImp.setSysOutPrintStream(System.out); topImp.setSysErrPrintStream(System.err); int res = ToolRunner.run(topImp, args); topImp.stop(); System.exit(res); } @Override public int run(String[] args) throws Exception { try { parseOptions(args); if (cliParser.hasOption("help")) { printUsage(); return 0; } } catch (Exception e) { LOG.error("Unable to parse options", e); return 1; } setAppsHeader(); Thread keyboardMonitor = new KeyboardMonitor(); keyboardMonitor.start(); rmStartTime = getRMStartTime(); clearScreen(); while (runMainLoop.get()) { if (displayScreen == DisplayScreen.TOP) { showTopScreen(); try { Thread.sleep(refreshPeriod); } catch (InterruptedException ie) { break; } } else if (displayScreen == DisplayScreen.SORT) { showSortScreen(); Thread.sleep(100); } else if (displayScreen == DisplayScreen.FIELDS) { showFieldsScreen(); Thread.sleep(100); } if (rmStartTime == -1) { // we were unable to get it the first time, try again rmStartTime = getRMStartTime(); } } clearScreen(); return 0; } private void parseOptions(String[] args) throws ParseException, IOException, InterruptedException { // Command line options opts = new Options(); opts.addOption("queues", true, "Comma separated list of queues to restrict applications"); opts.addOption("users", true, "Comma separated list of users to restrict applications"); opts.addOption("types", true, "Comma separated list of types to restrict" + " applications, case sensitive(though the display is lower case)"); opts.addOption("cols", true, "Number of columns on the terminal"); opts.addOption("rows", true, "Number of rows on the terminal"); opts.addOption("help", false, "Print usage; for help while the tool is running press 'h' + Enter"); opts.addOption("delay", true, "The refresh delay(in seconds), default is 3 seconds"); cliParser = new GnuParser().parse(opts, args); if (cliParser.hasOption("queues")) { String clqueues = cliParser.getOptionValue("queues"); String[] queuesArray = clqueues.split(","); queues.addAll(Arrays.asList(queuesArray)); } if (cliParser.hasOption("users")) { String clusers = cliParser.getOptionValue("users"); users.addAll(Arrays.asList(clusers.split(","))); } if (cliParser.hasOption("types")) { String cltypes = cliParser.getOptionValue("types"); types.addAll(Arrays.asList(cltypes.split(","))); } if (cliParser.hasOption("cols")) { terminalWidth = Integer.parseInt(cliParser.getOptionValue("cols")); } else { setTerminalWidth(); } if (cliParser.hasOption("rows")) { terminalHeight = Integer.parseInt(cliParser.getOptionValue("rows")); } else { setTerminalHeight(); } if (cliParser.hasOption("delay")) { int delay = Integer.parseInt(cliParser.getOptionValue("delay")); if (delay < 1) { LOG.warn("Delay set too low, using default"); } else { refreshPeriod = delay * 1000; } } } private void printUsage() { new HelpFormatter().printHelp("yarn top", opts); System.out.println(""); System.out.println("'yarn top' is a tool to help cluster administrators" + " understand cluster usage better."); System.out.println("Some notes about the implementation:"); System.out.println(" 1. Fetching information for all the apps is an" + " expensive call for the RM."); System.out.println(" To prevent a performance degradation, the results" + " are cached for 5 seconds,"); System.out.println(" irrespective of the delay value. Information about" + " the NodeManager(s) and queue"); System.out.println(" utilization stats are fetched at the specified" + " delay interval. Once we have a"); System.out.println(" better understanding of the performance impact," + " this might change."); System.out.println(" 2. Since the tool is implemented in Java, you must" + " hit Enter for key presses to"); System.out.println(" be processed."); } private void setAppsHeader() { List<String> formattedStrings = new ArrayList<>(); for (EnumMap.Entry<Columns, ColumnInformation> entry : columnInformationEnumMap.entrySet()) { if (entry.getValue().display) { formattedStrings.add(String.format(entry.getValue().format, entry.getValue().header)); } } appsHeader = StringUtils.join(formattedStrings.toArray(), " "); if (appsHeader.length() > terminalWidth) { appsHeader = appsHeader.substring(0, terminalWidth - System.lineSeparator().length()); } else { appsHeader += StringUtils.repeat(" ", terminalWidth - appsHeader.length() - System.lineSeparator().length()); } appsHeader += System.lineSeparator(); } private void setTerminalWidth() throws IOException, InterruptedException { if (terminalWidth != -1) { return; } String[] command = { "tput", "cols" }; String op = getCommandOutput(command).trim(); try { terminalWidth = Integer.parseInt(op); } catch (NumberFormatException ne) { LOG.warn("Couldn't determine terminal width, setting to 80", ne); terminalWidth = 80; } } private void setTerminalHeight() throws IOException, InterruptedException { if (terminalHeight != -1) { return; } String[] command = { "tput", "lines" }; String op = getCommandOutput(command).trim(); try { terminalHeight = Integer.parseInt(op); } catch (NumberFormatException ne) { LOG.warn("Couldn't determine terminal height, setting to 24", ne); terminalHeight = 24; } } protected void setTerminalSequences() throws IOException, InterruptedException { String[] tput_cursor_home = { "tput", "cup", "0", "0" }; String[] tput_clear = { "tput", "clear" }; String[] tput_clear_line = { "tput", "el" }; String[] tput_set_cursor_line_7_column_0 = { "tput", "cup", "6", "0" }; String[] tput_change_background = { "tput", "smso" }; String[] tput_reset_background = { "tput", "rmso" }; SET_CURSOR_HOME = getCommandOutput(tput_cursor_home); CLEAR = getCommandOutput(tput_clear); CLEAR_LINE = getCommandOutput(tput_clear_line); SET_CURSOR_LINE_7_COLUMN_0 = getCommandOutput(tput_set_cursor_line_7_column_0); CHANGE_BACKGROUND = getCommandOutput(tput_change_background); RESET_BACKGROUND = getCommandOutput(tput_reset_background); } private void generateColumnInformationMap() { columnInformationEnumMap.put(Columns.APPID, new ColumnInformation( "APPLICATIONID", "%31s", true, "Application Id", "a")); columnInformationEnumMap.put(Columns.USER, new ColumnInformation("USER", "%-10s", true, "Username", "u")); columnInformationEnumMap.put(Columns.TYPE, new ColumnInformation("TYPE", "%10s", true, "Application type", "t")); columnInformationEnumMap.put(Columns.QUEUE, new ColumnInformation("QUEUE", "%10s", true, "Application queue", "q")); columnInformationEnumMap.put(Columns.CONT, new ColumnInformation("#CONT", "%7s", true, "Number of containers", "c")); columnInformationEnumMap.put(Columns.RCONT, new ColumnInformation("#RCONT", "%7s", true, "Number of reserved containers", "r")); columnInformationEnumMap.put(Columns.VCORES, new ColumnInformation( "VCORES", "%7s", true, "Allocated vcores", "v")); columnInformationEnumMap.put(Columns.RVCORES, new ColumnInformation( "RVCORES", "%7s", true, "Reserved vcores", "o")); columnInformationEnumMap.put(Columns.MEM, new ColumnInformation("MEM", "%7s", true, "Allocated memory", "m")); columnInformationEnumMap.put(Columns.RMEM, new ColumnInformation("RMEM", "%7s", true, "Reserved memory", "w")); columnInformationEnumMap.put(Columns.VCORESECS, new ColumnInformation( "VCORESECS", "%10s", true, "Vcore seconds", "s")); columnInformationEnumMap.put(Columns.MEMSECS, new ColumnInformation( "MEMSECS", "%10s", true, "Memory seconds(in GBseconds)", "y")); columnInformationEnumMap.put(Columns.PROGRESS, new ColumnInformation( "%PROGR", "%6s", true, "Progress(percentage)", "p")); columnInformationEnumMap.put(Columns.TIME, new ColumnInformation("TIME", "%10s", true, "Running time", "i")); columnInformationEnumMap.put(Columns.NAME, new ColumnInformation("NAME", "%s", true, "Application name", "n")); } private void generateKeyFieldsMap() { for (EnumMap.Entry<Columns, ColumnInformation> entry : columnInformationEnumMap.entrySet()) { keyFieldsMap.put(entry.getValue().key, entry.getKey()); } } protected NodesInformation getNodesInfo() { NodesInformation nodeInfo = new NodesInformation(); YarnClusterMetrics yarnClusterMetrics; try { yarnClusterMetrics = client.getYarnClusterMetrics(); } catch (IOException ie) { LOG.error("Unable to fetch cluster metrics", ie); return nodeInfo; } catch (YarnException ye) { LOG.error("Unable to fetch cluster metrics", ye); return nodeInfo; } nodeInfo.decommissionedNodes = yarnClusterMetrics.getNumDecommissionedNodeManagers(); nodeInfo.totalNodes = yarnClusterMetrics.getNumNodeManagers(); nodeInfo.runningNodes = yarnClusterMetrics.getNumActiveNodeManagers(); nodeInfo.lostNodes = yarnClusterMetrics.getNumLostNodeManagers(); nodeInfo.unhealthyNodes = yarnClusterMetrics.getNumUnhealthyNodeManagers(); nodeInfo.rebootedNodes = yarnClusterMetrics.getNumRebootedNodeManagers(); return nodeInfo; } protected QueueMetrics getQueueMetrics() { QueueMetrics queueMetrics = new QueueMetrics(); List<QueueInfo> queuesInfo; if (queues.isEmpty()) { try { queuesInfo = client.getRootQueueInfos(); } catch (Exception ie) { LOG.error("Unable to get queue information", ie); return queueMetrics; } } else { queuesInfo = new ArrayList<>(); for (String queueName : queues) { try { QueueInfo qInfo = client.getQueueInfo(queueName); queuesInfo.add(qInfo); } catch (Exception ie) { LOG.error("Unable to get queue information", ie); return queueMetrics; } } } for (QueueInfo childInfo : queuesInfo) { QueueStatistics stats = childInfo.getQueueStatistics(); if (stats != null) { queueMetrics.appsSubmitted += stats.getNumAppsSubmitted(); queueMetrics.appsRunning += stats.getNumAppsRunning(); queueMetrics.appsPending += stats.getNumAppsPending(); queueMetrics.appsCompleted += stats.getNumAppsCompleted(); queueMetrics.appsKilled += stats.getNumAppsKilled(); queueMetrics.appsFailed += stats.getNumAppsFailed(); queueMetrics.activeUsers += stats.getNumActiveUsers(); queueMetrics.availableMemoryGB += stats.getAvailableMemoryMB(); queueMetrics.allocatedMemoryGB += stats.getAllocatedMemoryMB(); queueMetrics.pendingMemoryGB += stats.getPendingMemoryMB(); queueMetrics.reservedMemoryGB += stats.getReservedMemoryMB(); queueMetrics.availableVCores += stats.getAvailableVCores(); queueMetrics.allocatedVCores += stats.getAllocatedVCores(); queueMetrics.pendingVCores += stats.getPendingVCores(); queueMetrics.reservedVCores += stats.getReservedVCores(); queueMetrics.allocatedContainers += stats.getAllocatedContainers(); queueMetrics.pendingContainers += stats.getPendingContainers(); queueMetrics.reservedContainers += stats.getReservedContainers(); } } queueMetrics.availableMemoryGB = queueMetrics.availableMemoryGB / 1024; queueMetrics.allocatedMemoryGB = queueMetrics.allocatedMemoryGB / 1024; queueMetrics.pendingMemoryGB = queueMetrics.pendingMemoryGB / 1024; queueMetrics.reservedMemoryGB = queueMetrics.reservedMemoryGB / 1024; return queueMetrics; } long getRMStartTime() { try { URL url = new URL("http://" + client.getConfig().get(YarnConfiguration.RM_WEBAPP_ADDRESS) + "/ws/v1/cluster/info"); URLConnection conn = url.openConnection(); conn.connect(); InputStream in = conn.getInputStream(); String encoding = conn.getContentEncoding(); encoding = encoding == null ? "UTF-8" : encoding; String body = IOUtils.toString(in, encoding); JSONObject obj = new JSONObject(body); JSONObject clusterInfo = obj.getJSONObject("clusterInfo"); return clusterInfo.getLong("startedOn"); } catch (Exception e) { LOG.error("Could not fetch RM start time", e); } return -1; } String getHeader(QueueMetrics queueMetrics, NodesInformation nodes) { StringBuilder ret = new StringBuilder(); String queue = "root"; if (!queues.isEmpty()) { queue = StringUtils.join(queues, ","); } long now = Time.now(); long uptime = now - rmStartTime; long days = TimeUnit.MILLISECONDS.toDays(uptime); long hours = TimeUnit.MILLISECONDS.toHours(uptime) - TimeUnit.DAYS.toHours(TimeUnit.MILLISECONDS.toDays(uptime)); long minutes = TimeUnit.MILLISECONDS.toMinutes(uptime) - TimeUnit.HOURS.toMinutes(TimeUnit.MILLISECONDS.toHours(uptime)); String uptimeStr = String.format("%dd, %d:%d", days, hours, minutes); String currentTime = DateFormatUtils.ISO_TIME_NO_T_FORMAT.format(now); ret.append(CLEAR_LINE); ret.append(limitLineLength(String.format( "YARN top - %s, up %s, %d active users, queue(s): %s%n", currentTime, uptimeStr, queueMetrics.activeUsers, queue), terminalWidth, true)); ret.append(CLEAR_LINE); ret.append(limitLineLength(String.format( "NodeManager(s): %d total, %d active, %d unhealthy, %d decommissioned," + " %d lost, %d rebooted%n", nodes.totalNodes, nodes.runningNodes, nodes.unhealthyNodes, nodes.decommissionedNodes, nodes.lostNodes, nodes.rebootedNodes), terminalWidth, true)); ret.append(CLEAR_LINE); ret.append(limitLineLength(String.format( "Queue(s) Applications: %d running, %d submitted, %d pending," + " %d completed, %d killed, %d failed%n", queueMetrics.appsRunning, queueMetrics.appsSubmitted, queueMetrics.appsPending, queueMetrics.appsCompleted, queueMetrics.appsKilled, queueMetrics.appsFailed), terminalWidth, true)); ret.append(CLEAR_LINE); ret.append(limitLineLength(String.format("Queue(s) Mem(GB): %d available," + " %d allocated, %d pending, %d reserved%n", queueMetrics.availableMemoryGB, queueMetrics.allocatedMemoryGB, queueMetrics.pendingMemoryGB, queueMetrics.reservedMemoryGB), terminalWidth, true)); ret.append(CLEAR_LINE); ret.append(limitLineLength(String.format("Queue(s) VCores: %d available," + " %d allocated, %d pending, %d reserved%n", queueMetrics.availableVCores, queueMetrics.allocatedVCores, queueMetrics.pendingVCores, queueMetrics.reservedVCores), terminalWidth, true)); ret.append(CLEAR_LINE); ret.append(limitLineLength(String.format( "Queue(s) Containers: %d allocated, %d pending, %d reserved%n", queueMetrics.allocatedContainers, queueMetrics.pendingContainers, queueMetrics.reservedContainers), terminalWidth, true)); return ret.toString(); } String getPrintableAppInformation(List<ApplicationInformation> appsInfo) { StringBuilder ret = new StringBuilder(); int limit = terminalHeight - 9; List<String> columns = new ArrayList<>(); for (int i = 0; i < limit; ++i) { ret.append(CLEAR_LINE); if(i < appsInfo.size()) { ApplicationInformation appInfo = appsInfo.get(i); columns.clear(); for (EnumMap.Entry<Columns, ColumnInformation> entry : columnInformationEnumMap.entrySet()) { if (entry.getValue().display) { String value = ""; if (appInfo.displayStringsMap.containsKey(entry.getKey())) { value = appInfo.displayStringsMap.get(entry.getKey()); } columns.add(String.format(entry.getValue().format, value)); } } ret.append(limitLineLength( (StringUtils.join(columns.toArray(), " ") + System.lineSeparator()), terminalWidth, true)); } else { ret.append(System.lineSeparator()); } } return ret.toString(); } protected void clearScreen() { System.out.print(CLEAR); System.out.flush(); } protected void clearScreenWithoutScroll() { System.out.print(SET_CURSOR_HOME); for(int i = 0; i < terminalHeight; ++i) { System.out.println(CLEAR_LINE); } } protected void printHeader(String header) { System.out.print(SET_CURSOR_HOME); System.out.print(header); System.out.println(""); } protected void printApps(String appInfo) { System.out.print(CLEAR_LINE); System.out.print(CHANGE_BACKGROUND + appsHeader + RESET_BACKGROUND); System.out.print(appInfo); } private void showHelpScreen() { synchronized (lock) { if (!showingTopScreen.get()) { // we've already printed the help screen return; } showingTopScreen.set(false); clearScreenWithoutScroll(); System.out.print(SET_CURSOR_HOME); System.out.println("Help for yarn top."); System.out.println("Delay: " + (refreshPeriod / 1000) + " secs; Secure mode: " + UserGroupInformation.isSecurityEnabled()); System.out.println(""); System.out.println(" s + Enter : Select sort field"); System.out.println(" f + Enter : Select fields to display"); System.out.println(" R + Enter: Reverse current sort order"); System.out.println(" h + Enter: Display this screen"); System.out.println(" q + Enter: Quit"); System.out.println(""); System.out.println("Press any key followed by Enter to continue"); } } private void showSortScreen() { synchronized (lock) { showingTopScreen.set(false); System.out.print(SET_CURSOR_HOME); System.out.println(CLEAR_LINE + "Current Sort Field: " + currentSortField); System.out.println(CLEAR_LINE + "Select sort field via letter followed by" + " Enter, type any other key followed by Enter to return"); System.out.println(CLEAR_LINE); for (String key : sortedKeys) { String prefix = " "; if (key.equals(currentSortField)) { prefix = "*"; } ColumnInformation value = columnInformationEnumMap.get(keyFieldsMap.get(key)); System.out.print(CLEAR_LINE); System.out.println(String.format("%s %s: %-15s = %s", prefix, key, value.header, value.description)); } } } protected void showFieldsScreen() { synchronized (lock) { showingTopScreen.set(false); System.out.print(SET_CURSOR_HOME); System.out.println(CLEAR_LINE + "Current Fields: "); System.out.println(CLEAR_LINE + "Toggle fields via field letter followed" + " by Enter, type any other key followed by Enter to return"); for (String key : sortedKeys) { ColumnInformation info = columnInformationEnumMap.get(keyFieldsMap.get(key)); String prefix = " "; String letter = key; if (info.display) { prefix = "*"; letter = key.toUpperCase(); } System.out.print(CLEAR_LINE); System.out.println(String.format("%s %s: %-15s = %s", prefix, letter, info.header, info.description)); } } } protected void showTopScreen() { List<ApplicationInformation> appsInfo = new ArrayList<>(); List<ApplicationReport> apps; try { apps = fetchAppReports(); } catch (Exception e) { LOG.error("Unable to get application information", e); return; } for (ApplicationReport appReport : apps) { ApplicationInformation appInfo = new ApplicationInformation(appReport); appsInfo.add(appInfo); } if (ascendingSort) { Collections.sort(appsInfo, comparator); } else { Collections.sort(appsInfo, Collections.reverseOrder(comparator)); } NodesInformation nodesInfo = getNodesInfo(); QueueMetrics queueMetrics = getQueueMetrics(); String header = getHeader(queueMetrics, nodesInfo); String appsStr = getPrintableAppInformation(appsInfo); synchronized (lock) { printHeader(header); printApps(appsStr); System.out.print(SET_CURSOR_LINE_7_COLUMN_0); System.out.print(CLEAR_LINE); } } private void handleSortScreenKeyPress(String input) { String f = currentSortField; currentSortField = input.toLowerCase(); switch (input.toLowerCase()) { case "a": comparator = AppIDComparator; break; case "u": comparator = UserComparator; break; case "t": comparator = AppTypeComparator; break; case "q": comparator = QueueNameComparator; break; case "c": comparator = UsedContainersComparator; break; case "r": comparator = ReservedContainersComparator; break; case "v": comparator = UsedVCoresComparator; break; case "o": comparator = ReservedVCoresComparator; break; case "m": comparator = UsedMemoryComparator; break; case "w": comparator = ReservedMemoryComparator; break; case "s": comparator = VCoreSecondsComparator; break; case "y": comparator = MemorySecondsComparator; break; case "p": comparator = ProgressComparator; break; case "i": comparator = RunningTimeComparator; break; case "n": comparator = AppNameComparator; break; default: // it wasn't a sort key currentSortField = f; showTopScreen(); showingTopScreen.set(true); displayScreen = DisplayScreen.TOP; } } private void handleFieldsScreenKeyPress(String input) { if (keyFieldsMap.containsKey(input.toLowerCase())) { toggleColumn(keyFieldsMap.get(input.toLowerCase())); setAppsHeader(); } else { showTopScreen(); showingTopScreen.set(true); displayScreen = DisplayScreen.TOP; } } private void handleTopScreenKeyPress(String input) { switch (input.toLowerCase()) { case "q": runMainLoop.set(false); runKeyboardMonitor.set(false); // wake up if it's sleeping displayThread.interrupt(); break; case "s": displayScreen = DisplayScreen.SORT; showSortScreen(); break; case "f": displayScreen = DisplayScreen.FIELDS; showFieldsScreen(); break; case "r": ascendingSort = !ascendingSort; break; case "h": displayScreen = DisplayScreen.HELP; showHelpScreen(); break; default: break; } } private void handleHelpScreenKeyPress() { showTopScreen(); showingTopScreen.set(true); displayScreen = DisplayScreen.TOP; } String limitLineLength(String line, int length, boolean addNewline) { if (line.length() > length) { String tmp; if (addNewline) { tmp = line.substring(0, length - System.lineSeparator().length()); tmp += System.lineSeparator(); } else { tmp = line.substring(0, length); } return tmp; } return line; } void toggleColumn(Columns col) { columnInformationEnumMap.get(col).display = !columnInformationEnumMap.get(col).display; } protected List<ApplicationReport> fetchAppReports() throws YarnException, IOException { List<ApplicationReport> ret; EnumSet<YarnApplicationState> states = EnumSet.of(YarnApplicationState.ACCEPTED, YarnApplicationState.RUNNING); GetApplicationsRequest req = GetApplicationsRequest.newInstance(types, states); req.setQueues(queues); req.setUsers(users); ret = applicationReportsCache.getIfPresent(req); if (ret != null) { return ret; } ret = client.getApplications(queues, users, types, states); applicationReportsCache.put(req, ret); return ret; } private String getCommandOutput(String[] command) throws IOException, InterruptedException { Process p = Runtime.getRuntime().exec(command); p.waitFor(); byte[] output = IOUtils.toByteArray(p.getInputStream()); return new String(output, "ASCII"); } }
40,152
35.108813
87
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/NodeCLI.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.client.cli; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.OutputStreamWriter; import java.io.PrintWriter; import java.nio.charset.Charset; import java.util.ArrayList; import java.util.Collections; import java.util.Date; import java.util.HashSet; import java.util.List; import java.util.Set; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.GnuParser; import org.apache.commons.cli.HelpFormatter; import org.apache.commons.cli.MissingArgumentException; import org.apache.commons.cli.Option; import org.apache.commons.cli.Options; import org.apache.commons.lang.StringUtils; import org.apache.commons.lang.time.DateFormatUtils; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.util.ToolRunner; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.NodeReport; import org.apache.hadoop.yarn.api.records.NodeState; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.util.ConverterUtils; @Private @Unstable public class NodeCLI extends YarnCLI { private static final String NODES_PATTERN = "%16s\t%15s\t%17s\t%28s" + System.getProperty("line.separator"); private static final String NODE_STATE_CMD = "states"; private static final String NODE_ALL = "all"; public static void main(String[] args) throws Exception { NodeCLI cli = new NodeCLI(); cli.setSysOutPrintStream(System.out); cli.setSysErrPrintStream(System.err); int res = ToolRunner.run(cli, args); cli.stop(); System.exit(res); } @Override public int run(String[] args) throws Exception { Options opts = new Options(); opts.addOption(HELP_CMD, false, "Displays help for all commands."); opts.addOption(STATUS_CMD, true, "Prints the status report of the node."); opts.addOption(LIST_CMD, false, "List all running nodes. " + "Supports optional use of -states to filter nodes " + "based on node state, all -all to list all nodes."); Option nodeStateOpt = new Option(NODE_STATE_CMD, true, "Works with -list to filter nodes based on input comma-separated list of node states."); nodeStateOpt.setValueSeparator(','); nodeStateOpt.setArgs(Option.UNLIMITED_VALUES); nodeStateOpt.setArgName("States"); opts.addOption(nodeStateOpt); Option allOpt = new Option(NODE_ALL, false, "Works with -list to list all nodes."); opts.addOption(allOpt); opts.getOption(STATUS_CMD).setArgName("NodeId"); int exitCode = -1; CommandLine cliParser = null; try { cliParser = new GnuParser().parse(opts, args); } catch (MissingArgumentException ex) { sysout.println("Missing argument for options"); printUsage(opts); return exitCode; } if (cliParser.hasOption("status")) { if (args.length != 2) { printUsage(opts); return exitCode; } printNodeStatus(cliParser.getOptionValue("status")); } else if (cliParser.hasOption("list")) { Set<NodeState> nodeStates = new HashSet<NodeState>(); if (cliParser.hasOption(NODE_ALL)) { for (NodeState state : NodeState.values()) { nodeStates.add(state); } } else if (cliParser.hasOption(NODE_STATE_CMD)) { String[] types = cliParser.getOptionValues(NODE_STATE_CMD); if (types != null) { for (String type : types) { if (!type.trim().isEmpty()) { nodeStates.add(NodeState.valueOf( org.apache.hadoop.util.StringUtils.toUpperCase(type.trim()))); } } } } else { nodeStates.add(NodeState.RUNNING); } listClusterNodes(nodeStates); } else if (cliParser.hasOption(HELP_CMD)) { printUsage(opts); return 0; } else { syserr.println("Invalid Command Usage : "); printUsage(opts); } return 0; } /** * It prints the usage of the command * * @param opts */ private void printUsage(Options opts) { new HelpFormatter().printHelp("node", opts); } /** * Lists the nodes matching the given node states * * @param nodeStates * @throws YarnException * @throws IOException */ private void listClusterNodes(Set<NodeState> nodeStates) throws YarnException, IOException { PrintWriter writer = new PrintWriter( new OutputStreamWriter(sysout, Charset.forName("UTF-8"))); List<NodeReport> nodesReport = client.getNodeReports( nodeStates.toArray(new NodeState[0])); writer.println("Total Nodes:" + nodesReport.size()); writer.printf(NODES_PATTERN, "Node-Id", "Node-State", "Node-Http-Address", "Number-of-Running-Containers"); for (NodeReport nodeReport : nodesReport) { writer.printf(NODES_PATTERN, nodeReport.getNodeId(), nodeReport .getNodeState(), nodeReport.getHttpAddress(), nodeReport .getNumContainers()); } writer.flush(); } /** * Prints the node report for node id. * * @param nodeIdStr * @throws YarnException */ private void printNodeStatus(String nodeIdStr) throws YarnException, IOException { NodeId nodeId = ConverterUtils.toNodeId(nodeIdStr); List<NodeReport> nodesReport = client.getNodeReports(); // Use PrintWriter.println, which uses correct platform line ending. ByteArrayOutputStream baos = new ByteArrayOutputStream(); PrintWriter nodeReportStr = new PrintWriter( new OutputStreamWriter(baos, Charset.forName("UTF-8"))); NodeReport nodeReport = null; for (NodeReport report : nodesReport) { if (!report.getNodeId().equals(nodeId)) { continue; } nodeReport = report; nodeReportStr.println("Node Report : "); nodeReportStr.print("\tNode-Id : "); nodeReportStr.println(nodeReport.getNodeId()); nodeReportStr.print("\tRack : "); nodeReportStr.println(nodeReport.getRackName()); nodeReportStr.print("\tNode-State : "); nodeReportStr.println(nodeReport.getNodeState()); nodeReportStr.print("\tNode-Http-Address : "); nodeReportStr.println(nodeReport.getHttpAddress()); nodeReportStr.print("\tLast-Health-Update : "); nodeReportStr.println(DateFormatUtils.format( new Date(nodeReport.getLastHealthReportTime()), "E dd/MMM/yy hh:mm:ss:SSzz")); nodeReportStr.print("\tHealth-Report : "); nodeReportStr .println(nodeReport.getHealthReport()); nodeReportStr.print("\tContainers : "); nodeReportStr.println(nodeReport.getNumContainers()); nodeReportStr.print("\tMemory-Used : "); nodeReportStr.println((nodeReport.getUsed() == null) ? "0MB" : (nodeReport.getUsed().getMemory() + "MB")); nodeReportStr.print("\tMemory-Capacity : "); nodeReportStr.println(nodeReport.getCapability().getMemory() + "MB"); nodeReportStr.print("\tCPU-Used : "); nodeReportStr.println((nodeReport.getUsed() == null) ? "0 vcores" : (nodeReport.getUsed().getVirtualCores() + " vcores")); nodeReportStr.print("\tCPU-Capacity : "); nodeReportStr.println(nodeReport.getCapability().getVirtualCores() + " vcores"); nodeReportStr.print("\tNode-Labels : "); // Create a List for node labels since we need it get sorted List<String> nodeLabelsList = new ArrayList<String>(report.getNodeLabels()); Collections.sort(nodeLabelsList); nodeReportStr.println(StringUtils.join(nodeLabelsList.iterator(), ',')); } if (nodeReport == null) { nodeReportStr.print("Could not find the node report for node id : " + nodeIdStr); } nodeReportStr.close(); sysout.println(baos.toString("UTF-8")); } }
8,737
36.82684
96
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ClusterCLI.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.client.cli; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.OutputStreamWriter; import java.io.PrintWriter; import java.io.UnsupportedEncodingException; import java.nio.charset.Charset; import java.util.ArrayList; import java.util.List; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.GnuParser; import org.apache.commons.cli.HelpFormatter; import org.apache.commons.cli.MissingArgumentException; import org.apache.commons.cli.Options; import org.apache.commons.lang.StringUtils; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.util.ToolRunner; import org.apache.hadoop.yarn.api.records.NodeLabel; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager; import com.google.common.annotations.VisibleForTesting; /** * Cluster CLI used to get over all information of the cluster */ @Private public class ClusterCLI extends YarnCLI { private static final String TITLE = "yarn cluster"; public static final String LIST_LABELS_CMD = "list-node-labels"; public static final String DIRECTLY_ACCESS_NODE_LABEL_STORE = "directly-access-node-label-store"; public static final String CMD = "cluster"; private boolean accessLocal = false; static CommonNodeLabelsManager localNodeLabelsManager = null; public static void main(String[] args) throws Exception { ClusterCLI cli = new ClusterCLI(); cli.setSysOutPrintStream(System.out); cli.setSysErrPrintStream(System.err); int res = ToolRunner.run(cli, args); cli.stop(); System.exit(res); } @Override public int run(String[] args) throws Exception { Options opts = new Options(); opts.addOption("lnl", LIST_LABELS_CMD, false, "List cluster node-label collection"); opts.addOption("h", HELP_CMD, false, "Displays help for all commands."); opts.addOption("dnl", DIRECTLY_ACCESS_NODE_LABEL_STORE, false, "This is DEPRECATED, will be removed in future releases. Directly access node label store, " + "with this option, all node label related operations" + " will NOT connect RM. Instead, they will" + " access/modify stored node labels directly." + " By default, it is false (access via RM)." + " AND PLEASE NOTE: if you configured " + YarnConfiguration.FS_NODE_LABELS_STORE_ROOT_DIR + " to a local directory" + " (instead of NFS or HDFS), this option will only work" + " when the command run on the machine where RM is running." + " Also, this option is UNSTABLE, could be removed in future" + " releases."); int exitCode = -1; CommandLine parsedCli = null; try { parsedCli = new GnuParser().parse(opts, args); } catch (MissingArgumentException ex) { sysout.println("Missing argument for options"); printUsage(opts); return exitCode; } if (parsedCli.hasOption(DIRECTLY_ACCESS_NODE_LABEL_STORE)) { accessLocal = true; } if (parsedCli.hasOption(LIST_LABELS_CMD)) { printClusterNodeLabels(); } else if (parsedCli.hasOption(HELP_CMD)) { printUsage(opts); return 0; } else { syserr.println("Invalid Command Usage : "); printUsage(opts); } return 0; } void printClusterNodeLabels() throws YarnException, IOException { List<NodeLabel> nodeLabels = null; if (accessLocal) { nodeLabels = new ArrayList<>(getNodeLabelManagerInstance(getConf()).getClusterNodeLabels()); } else { nodeLabels = new ArrayList<>(client.getClusterNodeLabels()); } sysout.println(String.format("Node Labels: %s", StringUtils.join(nodeLabels.iterator(), ","))); } @VisibleForTesting static synchronized CommonNodeLabelsManager getNodeLabelManagerInstance(Configuration conf) { if (localNodeLabelsManager == null) { localNodeLabelsManager = new CommonNodeLabelsManager(); localNodeLabelsManager.init(conf); localNodeLabelsManager.start(); } return localNodeLabelsManager; } @VisibleForTesting void printUsage(Options opts) throws UnsupportedEncodingException { ByteArrayOutputStream baos = new ByteArrayOutputStream(); PrintWriter pw = new PrintWriter(new OutputStreamWriter(baos, Charset.forName("UTF-8"))); new HelpFormatter().printHelp(pw, HelpFormatter.DEFAULT_WIDTH, TITLE, null, opts, HelpFormatter.DEFAULT_LEFT_PAD, HelpFormatter.DEFAULT_DESC_PAD, null); pw.close(); sysout.println(baos.toString("UTF-8")); } }
5,608
36.644295
100
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/QueueCLI.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.client.cli; import java.io.IOException; import java.io.OutputStreamWriter; import java.io.PrintWriter; import java.nio.charset.Charset; import java.text.DecimalFormat; import java.util.Set; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.GnuParser; import org.apache.commons.cli.HelpFormatter; import org.apache.commons.cli.MissingArgumentException; import org.apache.commons.cli.Options; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.util.ToolRunner; import org.apache.hadoop.yarn.api.records.QueueInfo; import org.apache.hadoop.yarn.exceptions.YarnException; import com.google.common.annotations.VisibleForTesting; @Private @Unstable public class QueueCLI extends YarnCLI { public static final String QUEUE = "queue"; public static void main(String[] args) throws Exception { QueueCLI cli = new QueueCLI(); cli.setSysOutPrintStream(System.out); cli.setSysErrPrintStream(System.err); int res = ToolRunner.run(cli, args); cli.stop(); System.exit(res); } @Override public int run(String[] args) throws Exception { Options opts = new Options(); opts.addOption(STATUS_CMD, true, "List queue information about given queue."); opts.addOption(HELP_CMD, false, "Displays help for all commands."); opts.getOption(STATUS_CMD).setArgName("Queue Name"); CommandLine cliParser = null; try { cliParser = new GnuParser().parse(opts, args); } catch (MissingArgumentException ex) { sysout.println("Missing argument for options"); printUsage(opts); return -1; } if (cliParser.hasOption(STATUS_CMD)) { if (args.length != 2) { printUsage(opts); return -1; } return listQueue(cliParser.getOptionValue(STATUS_CMD)); } else if (cliParser.hasOption(HELP_CMD)) { printUsage(opts); return 0; } else { syserr.println("Invalid Command Usage : "); printUsage(opts); return -1; } } /** * It prints the usage of the command * * @param opts */ @VisibleForTesting void printUsage(Options opts) { new HelpFormatter().printHelp(QUEUE, opts); } /** * Lists the Queue Information matching the given queue name * * @param queueName * @throws YarnException * @throws IOException */ private int listQueue(String queueName) throws YarnException, IOException { int rc; PrintWriter writer = new PrintWriter( new OutputStreamWriter(sysout, Charset.forName("UTF-8"))); QueueInfo queueInfo = client.getQueueInfo(queueName); if (queueInfo != null) { writer.println("Queue Information : "); printQueueInfo(writer, queueInfo); rc = 0; } else { writer.println("Cannot get queue from RM by queueName = " + queueName + ", please check."); rc = -1; } writer.flush(); return rc; } private void printQueueInfo(PrintWriter writer, QueueInfo queueInfo) { writer.print("Queue Name : "); writer.println(queueInfo.getQueueName()); writer.print("\tState : "); writer.println(queueInfo.getQueueState()); DecimalFormat df = new DecimalFormat("#.0"); writer.print("\tCapacity : "); writer.println(df.format(queueInfo.getCapacity() * 100) + "%"); writer.print("\tCurrent Capacity : "); writer.println(df.format(queueInfo.getCurrentCapacity() * 100) + "%"); writer.print("\tMaximum Capacity : "); writer.println(df.format(queueInfo.getMaximumCapacity() * 100) + "%"); writer.print("\tDefault Node Label expression : "); if (null != queueInfo.getDefaultNodeLabelExpression()) { writer.println(queueInfo.getDefaultNodeLabelExpression()); } else { writer.println(); } Set<String> nodeLabels = queueInfo.getAccessibleNodeLabels(); StringBuilder labelList = new StringBuilder(); writer.print("\tAccessible Node Labels : "); for (String nodeLabel : nodeLabels) { if (labelList.length() > 0) { labelList.append(','); } labelList.append(nodeLabel); } writer.println(labelList.toString()); } }
5,051
31.384615
77
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/package-info.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ @InterfaceAudience.Public package org.apache.hadoop.yarn.client.api; import org.apache.hadoop.classification.InterfaceAudience;
935
41.545455
75
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.client.api; import java.io.IOException; import java.util.Collection; import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.client.api.impl.AMRMClientImpl; import org.apache.hadoop.yarn.exceptions.YarnException; import com.google.common.base.Preconditions; import com.google.common.base.Supplier; import com.google.common.collect.ImmutableList; @InterfaceAudience.Public @InterfaceStability.Stable public abstract class AMRMClient<T extends AMRMClient.ContainerRequest> extends AbstractService { private static final Log LOG = LogFactory.getLog(AMRMClient.class); /** * Create a new instance of AMRMClient. * For usage: * <pre> * {@code * AMRMClient.<T>createAMRMClientContainerRequest() * }</pre> * @return the newly create AMRMClient instance. */ @Public public static <T extends ContainerRequest> AMRMClient<T> createAMRMClient() { AMRMClient<T> client = new AMRMClientImpl<T>(); return client; } private NMTokenCache nmTokenCache; @Private protected AMRMClient(String name) { super(name); nmTokenCache = NMTokenCache.getSingleton(); } /** * Object to represent a single container request for resources. Scheduler * documentation should be consulted for the specifics of how the parameters * are honored. * * By default, YARN schedulers try to allocate containers at the requested * locations but they may relax the constraints in order to expedite meeting * allocations limits. They first relax the constraint to the same rack as the * requested node and then to anywhere in the cluster. The relaxLocality flag * may be used to disable locality relaxation and request containers at only * specific locations. The following conditions apply. * <ul> * <li>Within a priority, all container requests must have the same value for * locality relaxation. Either enabled or disabled.</li> * <li>If locality relaxation is disabled, then across requests, locations at * different network levels may not be specified. E.g. its invalid to make a * request for a specific node and another request for a specific rack.</li> * <li>If locality relaxation is disabled, then only within the same request, * a node and its rack may be specified together. This allows for a specific * rack with a preference for a specific node within that rack.</li> * <li></li> * </ul> * To re-enable locality relaxation at a given priority, all pending requests * with locality relaxation disabled must be first removed. Then they can be * added back with locality relaxation enabled. * * All getters return immutable values. */ public static class ContainerRequest { final Resource capability; final List<String> nodes; final List<String> racks; final Priority priority; final boolean relaxLocality; final String nodeLabelsExpression; /** * Instantiates a {@link ContainerRequest} with the given constraints and * locality relaxation enabled. * * @param capability * The {@link Resource} to be requested for each container. * @param nodes * Any hosts to request that the containers are placed on. * @param racks * Any racks to request that the containers are placed on. The * racks corresponding to any hosts requested will be automatically * added to this list. * @param priority * The priority at which to request the containers. Higher * priorities have lower numerical values. */ public ContainerRequest(Resource capability, String[] nodes, String[] racks, Priority priority) { this(capability, nodes, racks, priority, true, null); } /** * Instantiates a {@link ContainerRequest} with the given constraints. * * @param capability * The {@link Resource} to be requested for each container. * @param nodes * Any hosts to request that the containers are placed on. * @param racks * Any racks to request that the containers are placed on. The * racks corresponding to any hosts requested will be automatically * added to this list. * @param priority * The priority at which to request the containers. Higher * priorities have lower numerical values. * @param relaxLocality * If true, containers for this request may be assigned on hosts * and racks other than the ones explicitly requested. */ public ContainerRequest(Resource capability, String[] nodes, String[] racks, Priority priority, boolean relaxLocality) { this(capability, nodes, racks, priority, relaxLocality, null); } /** * Instantiates a {@link ContainerRequest} with the given constraints. * * @param capability * The {@link Resource} to be requested for each container. * @param nodes * Any hosts to request that the containers are placed on. * @param racks * Any racks to request that the containers are placed on. The * racks corresponding to any hosts requested will be automatically * added to this list. * @param priority * The priority at which to request the containers. Higher * priorities have lower numerical values. * @param relaxLocality * If true, containers for this request may be assigned on hosts * and racks other than the ones explicitly requested. * @param nodeLabelsExpression * Set node labels to allocate resource, now we only support * asking for only a single node label */ public ContainerRequest(Resource capability, String[] nodes, String[] racks, Priority priority, boolean relaxLocality, String nodeLabelsExpression) { // Validate request Preconditions.checkArgument(capability != null, "The Resource to be requested for each container " + "should not be null "); Preconditions.checkArgument(priority != null, "The priority at which to request containers should not be null "); Preconditions.checkArgument( !(!relaxLocality && (racks == null || racks.length == 0) && (nodes == null || nodes.length == 0)), "Can't turn off locality relaxation on a " + "request with no location constraints"); this.capability = capability; this.nodes = (nodes != null ? ImmutableList.copyOf(nodes) : null); this.racks = (racks != null ? ImmutableList.copyOf(racks) : null); this.priority = priority; this.relaxLocality = relaxLocality; this.nodeLabelsExpression = nodeLabelsExpression; } public Resource getCapability() { return capability; } public List<String> getNodes() { return nodes; } public List<String> getRacks() { return racks; } public Priority getPriority() { return priority; } public boolean getRelaxLocality() { return relaxLocality; } public String getNodeLabelExpression() { return nodeLabelsExpression; } public String toString() { StringBuilder sb = new StringBuilder(); sb.append("Capability[").append(capability).append("]"); sb.append("Priority[").append(priority).append("]"); return sb.toString(); } } /** * Register the application master. This must be called before any * other interaction * @param appHostName Name of the host on which master is running * @param appHostPort Port master is listening on * @param appTrackingUrl URL at which the master info can be seen * @return <code>RegisterApplicationMasterResponse</code> * @throws YarnException * @throws IOException */ public abstract RegisterApplicationMasterResponse registerApplicationMaster(String appHostName, int appHostPort, String appTrackingUrl) throws YarnException, IOException; /** * Request additional containers and receive new container allocations. * Requests made via <code>addContainerRequest</code> are sent to the * <code>ResourceManager</code>. New containers assigned to the master are * retrieved. Status of completed containers and node health updates are also * retrieved. This also doubles up as a heartbeat to the ResourceManager and * must be made periodically. The call may not always return any new * allocations of containers. App should not make concurrent allocate * requests. May cause request loss. * * <p> * Note : If the user has not removed container requests that have already * been satisfied, then the re-register may end up sending the entire * container requests to the RM (including matched requests). Which would mean * the RM could end up giving it a lot of new allocated containers. * </p> * * @param progressIndicator Indicates progress made by the master * @return the response of the allocate request * @throws YarnException * @throws IOException */ public abstract AllocateResponse allocate(float progressIndicator) throws YarnException, IOException; /** * Unregister the application master. This must be called in the end. * @param appStatus Success/Failure status of the master * @param appMessage Diagnostics message on failure * @param appTrackingUrl New URL to get master info * @throws YarnException * @throws IOException */ public abstract void unregisterApplicationMaster(FinalApplicationStatus appStatus, String appMessage, String appTrackingUrl) throws YarnException, IOException; /** * Request containers for resources before calling <code>allocate</code> * @param req Resource request */ public abstract void addContainerRequest(T req); /** * Remove previous container request. The previous container request may have * already been sent to the ResourceManager. So even after the remove request * the app must be prepared to receive an allocation for the previous request * even after the remove request * @param req Resource request */ public abstract void removeContainerRequest(T req); /** * Release containers assigned by the Resource Manager. If the app cannot use * the container or wants to give up the container then it can release them. * The app needs to make new requests for the released resource capability if * it still needs it. eg. it released non-local resources * @param containerId */ public abstract void releaseAssignedContainer(ContainerId containerId); /** * Get the currently available resources in the cluster. * A valid value is available after a call to allocate has been made * @return Currently available resources */ public abstract Resource getAvailableResources(); /** * Get the current number of nodes in the cluster. * A valid values is available after a call to allocate has been made * @return Current number of nodes in the cluster */ public abstract int getClusterNodeCount(); /** * Get outstanding <code>ContainerRequest</code>s matching the given * parameters. These ContainerRequests should have been added via * <code>addContainerRequest</code> earlier in the lifecycle. For performance, * the AMRMClient may return its internal collection directly without creating * a copy. Users should not perform mutable operations on the return value. * Each collection in the list contains requests with identical * <code>Resource</code> size that fit in the given capability. In a * collection, requests will be returned in the same order as they were added. * @return Collection of request matching the parameters */ public abstract List<? extends Collection<T>> getMatchingRequests( Priority priority, String resourceName, Resource capability); /** * Update application's blacklist with addition or removal resources. * * @param blacklistAdditions list of resources which should be added to the * application blacklist * @param blacklistRemovals list of resources which should be removed from the * application blacklist */ public abstract void updateBlacklist(List<String> blacklistAdditions, List<String> blacklistRemovals); /** * Set the NM token cache for the <code>AMRMClient</code>. This cache must * be shared with the {@link NMClient} used to manage containers for the * <code>AMRMClient</code> * <p> * If a NM token cache is not set, the {@link NMTokenCache#getSingleton()} * singleton instance will be used. * * @param nmTokenCache the NM token cache to use. */ public void setNMTokenCache(NMTokenCache nmTokenCache) { this.nmTokenCache = nmTokenCache; } /** * Get the NM token cache of the <code>AMRMClient</code>. This cache must be * shared with the {@link NMClient} used to manage containers for the * <code>AMRMClient</code>. * <p> * If a NM token cache is not set, the {@link NMTokenCache#getSingleton()} * singleton instance will be used. * * @return the NM token cache. */ public NMTokenCache getNMTokenCache() { return nmTokenCache; } /** * Wait for <code>check</code> to return true for each 1000 ms. * See also {@link #waitFor(com.google.common.base.Supplier, int)} * and {@link #waitFor(com.google.common.base.Supplier, int, int)} * @param check */ public void waitFor(Supplier<Boolean> check) throws InterruptedException { waitFor(check, 1000); } /** * Wait for <code>check</code> to return true for each * <code>checkEveryMillis</code> ms. * See also {@link #waitFor(com.google.common.base.Supplier, int, int)} * @param check user defined checker * @param checkEveryMillis interval to call <code>check</code> */ public void waitFor(Supplier<Boolean> check, int checkEveryMillis) throws InterruptedException { waitFor(check, checkEveryMillis, 1); } /** * Wait for <code>check</code> to return true for each * <code>checkEveryMillis</code> ms. In the main loop, this method will log * the message "waiting in main loop" for each <code>logInterval</code> times * iteration to confirm the thread is alive. * @param check user defined checker * @param checkEveryMillis interval to call <code>check</code> * @param logInterval interval to log for each */ public void waitFor(Supplier<Boolean> check, int checkEveryMillis, int logInterval) throws InterruptedException { Preconditions.checkNotNull(check, "check should not be null"); Preconditions.checkArgument(checkEveryMillis >= 0, "checkEveryMillis should be positive value"); Preconditions.checkArgument(logInterval >= 0, "logInterval should be positive value"); int loggingCounter = logInterval; do { if (LOG.isDebugEnabled()) { LOG.debug("Check the condition for main loop."); } boolean result = check.get(); if (result) { LOG.info("Exits the main loop."); return; } if (--loggingCounter <= 0) { LOG.info("Waiting in main loop."); loggingCounter = logInterval; } Thread.sleep(checkEveryMillis); } while (true); } }
17,420
38.956422
84
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/NMClient.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.client.api; import java.io.IOException; import java.nio.ByteBuffer; import java.util.Map; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.client.api.impl.NMClientImpl; import org.apache.hadoop.yarn.exceptions.YarnException; @InterfaceAudience.Public @InterfaceStability.Stable public abstract class NMClient extends AbstractService { /** * Create a new instance of NMClient. */ @Public public static NMClient createNMClient() { NMClient client = new NMClientImpl(); return client; } /** * Create a new instance of NMClient. */ @Public public static NMClient createNMClient(String name) { NMClient client = new NMClientImpl(name); return client; } private NMTokenCache nmTokenCache = NMTokenCache.getSingleton(); @Private protected NMClient(String name) { super(name); } /** * <p>Start an allocated container.</p> * * <p>The <code>ApplicationMaster</code> or other applications that use the * client must provide the details of the allocated container, including the * Id, the assigned node's Id and the token via {@link Container}. In * addition, the AM needs to provide the {@link ContainerLaunchContext} as * well.</p> * * @param container the allocated container * @param containerLaunchContext the context information needed by the * <code>NodeManager</code> to launch the * container * @return a map between the auxiliary service names and their outputs * @throws YarnException * @throws IOException */ public abstract Map<String, ByteBuffer> startContainer(Container container, ContainerLaunchContext containerLaunchContext) throws YarnException, IOException; /** * <p>Stop an started container.</p> * * @param containerId the Id of the started container * @param nodeId the Id of the <code>NodeManager</code> * * @throws YarnException * @throws IOException */ public abstract void stopContainer(ContainerId containerId, NodeId nodeId) throws YarnException, IOException; /** * <p>Query the status of a container.</p> * * @param containerId the Id of the started container * @param nodeId the Id of the <code>NodeManager</code> * * @return the status of a container * @throws YarnException * @throws IOException */ public abstract ContainerStatus getContainerStatus(ContainerId containerId, NodeId nodeId) throws YarnException, IOException; /** * <p>Set whether the containers that are started by this client, and are * still running should be stopped when the client stops. By default, the * feature should be enabled.</p> However, containers will be stopped only * when service is stopped. i.e. after {@link NMClient#stop()}. * * @param enabled whether the feature is enabled or not */ public abstract void cleanupRunningContainersOnStop(boolean enabled); /** * Set the NM Token cache of the <code>NMClient</code>. This cache must be * shared with the {@link AMRMClient} that requested the containers managed * by this <code>NMClient</code> * <p> * If a NM token cache is not set, the {@link NMTokenCache#getSingleton()} * singleton instance will be used. * * @param nmTokenCache the NM token cache to use. */ public void setNMTokenCache(NMTokenCache nmTokenCache) { this.nmTokenCache = nmTokenCache; } /** * Get the NM token cache of the <code>NMClient</code>. This cache must be * shared with the {@link AMRMClient} that requested the containers managed * by this <code>NMClient</code> * <p> * If a NM token cache is not set, the {@link NMTokenCache#getSingleton()} * singleton instance will be used. * * @return the NM token cache */ public NMTokenCache getNMTokenCache() { return nmTokenCache; } }
5,289
33.575163
78
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/SharedCacheClient.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.client.api; import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.fs.Path; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.client.api.impl.SharedCacheClientImpl; import org.apache.hadoop.yarn.exceptions.YarnException; /** * This is the client for YARN's shared cache. */ @Public @Unstable public abstract class SharedCacheClient extends AbstractService { @Public public static SharedCacheClient createSharedCacheClient() { SharedCacheClient client = new SharedCacheClientImpl(); return client; } @Private public SharedCacheClient(String name) { super(name); } /** * <p> * The method to claim a resource with the <code>SharedCacheManager.</code> * The client uses a checksum to identify the resource and an * {@link ApplicationId} to identify which application will be using the * resource. * </p> * * <p> * The <code>SharedCacheManager</code> responds with whether or not the * resource exists in the cache. If the resource exists, a <code>Path</code> * to the resource in the shared cache is returned. If the resource does not * exist, null is returned instead. * </p> * * @param applicationId ApplicationId of the application using the resource * @param resourceKey the key (i.e. checksum) that identifies the resource * @return Path to the resource, or null if it does not exist */ @Public @Unstable public abstract Path use(ApplicationId applicationId, String resourceKey) throws YarnException; /** * <p> * The method to release a resource with the <code>SharedCacheManager.</code> * This method is called once an application is no longer using a claimed * resource in the shared cache. The client uses a checksum to identify the * resource and an {@link ApplicationId} to identify which application is * releasing the resource. * </p> * * <p> * Note: This method is an optimization and the client is not required to call * it for correctness. * </p> * * @param applicationId ApplicationId of the application releasing the * resource * @param resourceKey the key (i.e. checksum) that identifies the resource */ @Public @Unstable public abstract void release(ApplicationId applicationId, String resourceKey) throws YarnException; /** * A convenience method to calculate the checksum of a specified file. * * @param sourceFile A path to the input file * @return A hex string containing the checksum digest * @throws IOException */ @Public @Unstable public abstract String getFileChecksum(Path sourceFile) throws IOException; }
3,766
33.559633
80
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AHSClient.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.client.api; import java.io.IOException; import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationReport; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerReport; import org.apache.hadoop.yarn.client.api.impl.AHSClientImpl; import org.apache.hadoop.yarn.exceptions.ApplicationAttemptNotFoundException; import org.apache.hadoop.yarn.exceptions.ContainerNotFoundException; import org.apache.hadoop.yarn.exceptions.YarnException; @InterfaceAudience.Public @InterfaceStability.Stable public abstract class AHSClient extends AbstractService { /** * Create a new instance of AHSClient. */ @Public public static AHSClient createAHSClient() { AHSClient client = new AHSClientImpl(); return client; } @Private public AHSClient(String name) { super(name); } /** * Get a report of the given Application. * <p> * In secure mode, <code>YARN</code> verifies access to the application, queue * etc. before accepting the request. * <p> * If the user does not have <code>VIEW_APP</code> access then the following * fields in the report will be set to stubbed values: * <ul> * <li>host - set to "N/A"</li> * <li>RPC port - set to -1</li> * <li>client token - set to "N/A"</li> * <li>diagnostics - set to "N/A"</li> * <li>tracking URL - set to "N/A"</li> * <li>original tracking URL - set to "N/A"</li> * <li>resource usage report - all values are -1</li> * </ul> * * @param appId * {@link ApplicationId} of the application that needs a report * @return application report * @throws YarnException * @throws IOException */ public abstract ApplicationReport getApplicationReport(ApplicationId appId) throws YarnException, IOException; /** * <p> * Get a report (ApplicationReport) of all Applications in the cluster. * </p> * * <p> * If the user does not have <code>VIEW_APP</code> access for an application * then the corresponding report will be filtered as described in * {@link #getApplicationReport(ApplicationId)}. * </p> * * @return a list of reports for all applications * @throws YarnException * @throws IOException */ public abstract List<ApplicationReport> getApplications() throws YarnException, IOException; /** * <p> * Get a report of the given ApplicationAttempt. * </p> * * <p> * In secure mode, <code>YARN</code> verifies access to the application, queue * etc. before accepting the request. * </p> * * @param applicationAttemptId * {@link ApplicationAttemptId} of the application attempt that needs * a report * @return application attempt report * @throws YarnException * @throws ApplicationAttemptNotFoundException if application attempt * not found * @throws IOException */ public abstract ApplicationAttemptReport getApplicationAttemptReport( ApplicationAttemptId applicationAttemptId) throws YarnException, IOException; /** * <p> * Get a report of all (ApplicationAttempts) of Application in the cluster. * </p> * * @param applicationId * @return a list of reports for all application attempts for specified * application * @throws YarnException * @throws IOException */ public abstract List<ApplicationAttemptReport> getApplicationAttempts( ApplicationId applicationId) throws YarnException, IOException; /** * <p> * Get a report of the given Container. * </p> * * <p> * In secure mode, <code>YARN</code> verifies access to the application, queue * etc. before accepting the request. * </p> * * @param containerId * {@link ContainerId} of the container that needs a report * @return container report * @throws YarnException * @throws ContainerNotFoundException if container not found * @throws IOException */ public abstract ContainerReport getContainerReport(ContainerId containerId) throws YarnException, IOException; /** * <p> * Get a report of all (Containers) of ApplicationAttempt in the cluster. * </p> * * @param applicationAttemptId * @return a list of reports of all containers for specified application * attempt * @throws YarnException * @throws IOException */ public abstract List<ContainerReport> getContainers( ApplicationAttemptId applicationAttemptId) throws YarnException, IOException; }
5,884
32.628571
80
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/NMTokenCache.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.client.api; import java.util.concurrent.ConcurrentHashMap; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Evolving; import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; import org.apache.hadoop.yarn.api.ContainerManagementProtocol; import org.apache.hadoop.yarn.api.records.Token; import org.apache.hadoop.yarn.client.api.async.AMRMClientAsync; import org.apache.hadoop.yarn.client.api.async.NMClientAsync; import com.google.common.annotations.VisibleForTesting; /** * NMTokenCache manages NMTokens required for an Application Master * communicating with individual NodeManagers. * <p> * By default Yarn client libraries {@link AMRMClient} and {@link NMClient} use * {@link #getSingleton()} instance of the cache. * <ul> * <li> * Using the singleton instance of the cache is appropriate when running a * single ApplicationMaster in the same JVM. * </li> * <li> * When using the singleton, users don't need to do anything special, * {@link AMRMClient} and {@link NMClient} are already set up to use the * default singleton {@link NMTokenCache} * </li> * </ul> * If running multiple Application Masters in the same JVM, a different cache * instance should be used for each Application Master. * <ul> * <li> * If using the {@link AMRMClient} and the {@link NMClient}, setting up * and using an instance cache is as follows: * <pre> * NMTokenCache nmTokenCache = new NMTokenCache(); * AMRMClient rmClient = AMRMClient.createAMRMClient(); * NMClient nmClient = NMClient.createNMClient(); * nmClient.setNMTokenCache(nmTokenCache); * ... * </pre> * </li> * <li> * If using the {@link AMRMClientAsync} and the {@link NMClientAsync}, * setting up and using an instance cache is as follows: * <pre> * NMTokenCache nmTokenCache = new NMTokenCache(); * AMRMClient rmClient = AMRMClient.createAMRMClient(); * NMClient nmClient = NMClient.createNMClient(); * nmClient.setNMTokenCache(nmTokenCache); * AMRMClientAsync rmClientAsync = new AMRMClientAsync(rmClient, 1000, [AMRM_CALLBACK]); * NMClientAsync nmClientAsync = new NMClientAsync("nmClient", nmClient, [NM_CALLBACK]); * ... * </pre> * </li> * <li> * If using {@link ApplicationMasterProtocol} and * {@link ContainerManagementProtocol} directly, setting up and using an * instance cache is as follows: * <pre> * NMTokenCache nmTokenCache = new NMTokenCache(); * ... * ApplicationMasterProtocol amPro = ClientRMProxy.createRMProxy(conf, ApplicationMasterProtocol.class); * ... * AllocateRequest allocateRequest = ... * ... * AllocateResponse allocateResponse = rmClient.allocate(allocateRequest); * for (NMToken token : allocateResponse.getNMTokens()) { * nmTokenCache.setToken(token.getNodeId().toString(), token.getToken()); * } * ... * ContainerManagementProtocolProxy nmPro = ContainerManagementProtocolProxy(conf, nmTokenCache); * ... * nmPro.startContainer(container, containerContext); * ... * </pre> * </li> * </ul> * It is also possible to mix the usage of a client ({@code AMRMClient} or * {@code NMClient}, or the async versions of them) with a protocol proxy * ({@code ContainerManagementProtocolProxy} or * {@code ApplicationMasterProtocol}). */ @Public @Evolving public class NMTokenCache { private static final NMTokenCache NM_TOKEN_CACHE = new NMTokenCache(); /** * Returns the singleton NM token cache. * * @return the singleton NM token cache. */ public static NMTokenCache getSingleton() { return NM_TOKEN_CACHE; } /** * Returns NMToken, null if absent. Only the singleton obtained from * {@link #getSingleton()} is looked at for the tokens. If you are using your * own NMTokenCache that is different from the singleton, use * {@link #getToken(String) } * * @param nodeAddr * @return {@link Token} NMToken required for communicating with node manager */ @Public public static Token getNMToken(String nodeAddr) { return NM_TOKEN_CACHE.getToken(nodeAddr); } /** * Sets the NMToken for node address only in the singleton obtained from * {@link #getSingleton()}. If you are using your own NMTokenCache that is * different from the singleton, use {@link #setToken(String, Token) } * * @param nodeAddr * node address (host:port) * @param token * NMToken */ @Public public static void setNMToken(String nodeAddr, Token token) { NM_TOKEN_CACHE.setToken(nodeAddr, token); } private ConcurrentHashMap<String, Token> nmTokens; /** * Creates a NM token cache instance. */ public NMTokenCache() { nmTokens = new ConcurrentHashMap<String, Token>(); } /** * Returns NMToken, null if absent * @param nodeAddr * @return {@link Token} NMToken required for communicating with node * manager */ @Public @Evolving public Token getToken(String nodeAddr) { return nmTokens.get(nodeAddr); } /** * Sets the NMToken for node address * @param nodeAddr node address (host:port) * @param token NMToken */ @Public @Evolving public void setToken(String nodeAddr, Token token) { nmTokens.put(nodeAddr, token); } /** * Returns true if NMToken is present in cache. */ @Private @VisibleForTesting public boolean containsToken(String nodeAddr) { return nmTokens.containsKey(nodeAddr); } /** * Returns the number of NMTokens present in cache. */ @Private @VisibleForTesting public int numberOfTokensInCache() { return nmTokens.size(); } /** * Removes NMToken for specified node manager * @param nodeAddr node address (host:port) */ @Private @VisibleForTesting public void removeToken(String nodeAddr) { nmTokens.remove(nodeAddr); } /** * It will remove all the nm tokens from its cache */ @Private @VisibleForTesting public void clearCache() { nmTokens.clear(); } }
6,978
31.013761
106
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/InvalidContainerRequestException.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.client.api; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; /** * Thrown when an arguments are combined to construct a * <code>AMRMClient.ContainerRequest</code> in an invalid way. */ public class InvalidContainerRequestException extends YarnRuntimeException { public InvalidContainerRequestException(Throwable cause) { super(cause); } public InvalidContainerRequestException(String message) { super(message); } public InvalidContainerRequestException(String message, Throwable cause) { super(message, cause); } }
1,398
34.871795
76
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/YarnClientApplication.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.client.api; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse; import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; @InterfaceAudience.Public @InterfaceStability.Stable /** * Holder for the {@link GetNewApplicationResponse} and {@link * ApplicationSubmissionContext} objects created via {@link org.apache.hadoop * .yarn.client.api.YarnClient#createApplication()} */ public class YarnClientApplication { private final GetNewApplicationResponse newAppResponse; private final ApplicationSubmissionContext appSubmissionContext; public YarnClientApplication(GetNewApplicationResponse newAppResponse, ApplicationSubmissionContext appContext) { this.newAppResponse = newAppResponse; this.appSubmissionContext = appContext; } public GetNewApplicationResponse getNewApplicationResponse() { return newAppResponse; } public ApplicationSubmissionContext getApplicationSubmissionContext() { return appSubmissionContext; } }
1,989
36.54717
77
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/YarnClient.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.client.api; import java.io.IOException; import java.util.EnumSet; import java.util.List; import java.util.Map; import java.util.Set; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.io.Text; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.yarn.api.ApplicationClientProtocol; import org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteRequest; import org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteResponse; import org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionRequest; import org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionResponse; import org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateRequest; import org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateResponse; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationReport; import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerReport; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.NodeLabel; import org.apache.hadoop.yarn.api.records.NodeReport; import org.apache.hadoop.yarn.api.records.NodeState; import org.apache.hadoop.yarn.api.records.QueueInfo; import org.apache.hadoop.yarn.api.records.QueueUserACLInfo; import org.apache.hadoop.yarn.api.records.ReservationDefinition; import org.apache.hadoop.yarn.api.records.ReservationId; import org.apache.hadoop.yarn.api.records.Token; import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.api.records.YarnClusterMetrics; import org.apache.hadoop.yarn.client.api.impl.YarnClientImpl; import org.apache.hadoop.yarn.exceptions.ApplicationAttemptNotFoundException; import org.apache.hadoop.yarn.exceptions.ApplicationIdNotProvidedException; import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException; import org.apache.hadoop.yarn.exceptions.ContainerNotFoundException; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.security.AMRMTokenIdentifier; @InterfaceAudience.Public @InterfaceStability.Stable public abstract class YarnClient extends AbstractService { /** * Create a new instance of YarnClient. */ @Public public static YarnClient createYarnClient() { YarnClient client = new YarnClientImpl(); return client; } @Private protected YarnClient(String name) { super(name); } /** * <p> * Obtain a {@link YarnClientApplication} for a new application, * which in turn contains the {@link ApplicationSubmissionContext} and * {@link org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse} * objects. * </p> * * @return {@link YarnClientApplication} built for a new application * @throws YarnException * @throws IOException */ public abstract YarnClientApplication createApplication() throws YarnException, IOException; /** * <p> * Submit a new application to <code>YARN.</code> It is a blocking call - it * will not return {@link ApplicationId} until the submitted application is * submitted successfully and accepted by the ResourceManager. * </p> * * <p> * Users should provide an {@link ApplicationId} as part of the parameter * {@link ApplicationSubmissionContext} when submitting a new application, * otherwise it will throw the {@link ApplicationIdNotProvidedException}. * </p> * * <p>This internally calls {@link ApplicationClientProtocol#submitApplication * (SubmitApplicationRequest)}, and after that, it internally invokes * {@link ApplicationClientProtocol#getApplicationReport * (GetApplicationReportRequest)} and waits till it can make sure that the * application gets properly submitted. If RM fails over or RM restart * happens before ResourceManager saves the application's state, * {@link ApplicationClientProtocol * #getApplicationReport(GetApplicationReportRequest)} will throw * the {@link ApplicationNotFoundException}. This API automatically resubmits * the application with the same {@link ApplicationSubmissionContext} when it * catches the {@link ApplicationNotFoundException}</p> * * @param appContext * {@link ApplicationSubmissionContext} containing all the details * needed to submit a new application * @return {@link ApplicationId} of the accepted application * @throws YarnException * @throws IOException * @see #createApplication() */ public abstract ApplicationId submitApplication( ApplicationSubmissionContext appContext) throws YarnException, IOException; /** * <p> * Kill an application identified by given ID. * </p> * * @param applicationId * {@link ApplicationId} of the application that needs to be killed * @throws YarnException * in case of errors or if YARN rejects the request due to * access-control restrictions. * @throws IOException * @see #getQueueAclsInfo() */ public abstract void killApplication(ApplicationId applicationId) throws YarnException, IOException; /** * <p> * Get a report of the given Application. * </p> * * <p> * In secure mode, <code>YARN</code> verifies access to the application, queue * etc. before accepting the request. * </p> * * <p> * If the user does not have <code>VIEW_APP</code> access then the following * fields in the report will be set to stubbed values: * <ul> * <li>host - set to "N/A"</li> * <li>RPC port - set to -1</li> * <li>client token - set to "N/A"</li> * <li>diagnostics - set to "N/A"</li> * <li>tracking URL - set to "N/A"</li> * <li>original tracking URL - set to "N/A"</li> * <li>resource usage report - all values are -1</li> * </ul> * * @param appId * {@link ApplicationId} of the application that needs a report * @return application report * @throws YarnException * @throws IOException */ public abstract ApplicationReport getApplicationReport(ApplicationId appId) throws YarnException, IOException; /** * Get the AMRM token of the application. * <p> * The AMRM token is required for AM to RM scheduling operations. For * managed Application Masters Yarn takes care of injecting it. For unmanaged * Applications Masters, the token must be obtained via this method and set * in the {@link org.apache.hadoop.security.UserGroupInformation} of the * current user. * <p> * The AMRM token will be returned only if all the following conditions are * met: * <ul> * <li>the requester is the owner of the ApplicationMaster</li> * <li>the application master is an unmanaged ApplicationMaster</li> * <li>the application master is in ACCEPTED state</li> * </ul> * Else this method returns NULL. * * @param appId {@link ApplicationId} of the application to get the AMRM token * @return the AMRM token if available * @throws YarnException * @throws IOException */ public abstract org.apache.hadoop.security.token.Token<AMRMTokenIdentifier> getAMRMToken(ApplicationId appId) throws YarnException, IOException; /** * <p> * Get a report (ApplicationReport) of all Applications in the cluster. * </p> * * <p> * If the user does not have <code>VIEW_APP</code> access for an application * then the corresponding report will be filtered as described in * {@link #getApplicationReport(ApplicationId)}. * </p> * * @return a list of reports of all running applications * @throws YarnException * @throws IOException */ public abstract List<ApplicationReport> getApplications() throws YarnException, IOException; /** * <p> * Get a report (ApplicationReport) of Applications * matching the given application types in the cluster. * </p> * * <p> * If the user does not have <code>VIEW_APP</code> access for an application * then the corresponding report will be filtered as described in * {@link #getApplicationReport(ApplicationId)}. * </p> * * @param applicationTypes set of application types you are interested in * @return a list of reports of applications * @throws YarnException * @throws IOException */ public abstract List<ApplicationReport> getApplications( Set<String> applicationTypes) throws YarnException, IOException; /** * <p> * Get a report (ApplicationReport) of Applications matching the given * application states in the cluster. * </p> * * <p> * If the user does not have <code>VIEW_APP</code> access for an application * then the corresponding report will be filtered as described in * {@link #getApplicationReport(ApplicationId)}. * </p> * * @param applicationStates set of application states you are interested in * @return a list of reports of applications * @throws YarnException * @throws IOException */ public abstract List<ApplicationReport> getApplications(EnumSet<YarnApplicationState> applicationStates) throws YarnException, IOException; /** * <p> * Get a report (ApplicationReport) of Applications matching the given * application types and application states in the cluster. * </p> * * <p> * If the user does not have <code>VIEW_APP</code> access for an application * then the corresponding report will be filtered as described in * {@link #getApplicationReport(ApplicationId)}. * </p> * * @param applicationTypes set of application types you are interested in * @param applicationStates set of application states you are interested in * @return a list of reports of applications * @throws YarnException * @throws IOException */ public abstract List<ApplicationReport> getApplications( Set<String> applicationTypes, EnumSet<YarnApplicationState> applicationStates) throws YarnException, IOException; /** * <p> * Get a report (ApplicationReport) of Applications matching the given users, * queues, application types and application states in the cluster. If any of * the params is set to null, it is not used when filtering. * </p> * * <p> * If the user does not have <code>VIEW_APP</code> access for an application * then the corresponding report will be filtered as described in * {@link #getApplicationReport(ApplicationId)}. * </p> * * @param queues set of queues you are interested in * @param users set of users you are interested in * @param applicationTypes set of application types you are interested in * @param applicationStates set of application states you are interested in * @return a list of reports of applications * @throws YarnException * @throws IOException */ public abstract List<ApplicationReport> getApplications(Set<String> queues, Set<String> users, Set<String> applicationTypes, EnumSet<YarnApplicationState> applicationStates) throws YarnException, IOException; /** * <p> * Get metrics ({@link YarnClusterMetrics}) about the cluster. * </p> * * @return cluster metrics * @throws YarnException * @throws IOException */ public abstract YarnClusterMetrics getYarnClusterMetrics() throws YarnException, IOException; /** * <p> * Get a report of nodes ({@link NodeReport}) in the cluster. * </p> * * @param states The {@link NodeState}s to filter on. If no filter states are * given, nodes in all states will be returned. * @return A list of node reports * @throws YarnException * @throws IOException */ public abstract List<NodeReport> getNodeReports(NodeState... states) throws YarnException, IOException; /** * <p> * Get a delegation token so as to be able to talk to YARN using those tokens. * * @param renewer * Address of the renewer who can renew these tokens when needed by * securely talking to YARN. * @return a delegation token ({@link Token}) that can be used to * talk to YARN * @throws YarnException * @throws IOException */ public abstract Token getRMDelegationToken(Text renewer) throws YarnException, IOException; /** * <p> * Get information ({@link QueueInfo}) about a given <em>queue</em>. * </p> * * @param queueName * Name of the queue whose information is needed * @return queue information * @throws YarnException * in case of errors or if YARN rejects the request due to * access-control restrictions. * @throws IOException */ public abstract QueueInfo getQueueInfo(String queueName) throws YarnException, IOException; /** * <p> * Get information ({@link QueueInfo}) about all queues, recursively if there * is a hierarchy * </p> * * @return a list of queue-information for all queues * @throws YarnException * @throws IOException */ public abstract List<QueueInfo> getAllQueues() throws YarnException, IOException; /** * <p> * Get information ({@link QueueInfo}) about top level queues. * </p> * * @return a list of queue-information for all the top-level queues * @throws YarnException * @throws IOException */ public abstract List<QueueInfo> getRootQueueInfos() throws YarnException, IOException; /** * <p> * Get information ({@link QueueInfo}) about all the immediate children queues * of the given queue * </p> * * @param parent * Name of the queue whose child-queues' information is needed * @return a list of queue-information for all queues who are direct children * of the given parent queue. * @throws YarnException * @throws IOException */ public abstract List<QueueInfo> getChildQueueInfos(String parent) throws YarnException, IOException; /** * <p> * Get information about <em>acls</em> for <em>current user</em> on all the * existing queues. * </p> * * @return a list of queue acls ({@link QueueUserACLInfo}) for * <em>current user</em> * @throws YarnException * @throws IOException */ public abstract List<QueueUserACLInfo> getQueueAclsInfo() throws YarnException, IOException; /** * <p> * Get a report of the given ApplicationAttempt. * </p> * * <p> * In secure mode, <code>YARN</code> verifies access to the application, queue * etc. before accepting the request. * </p> * * @param applicationAttemptId * {@link ApplicationAttemptId} of the application attempt that needs * a report * @return application attempt report * @throws YarnException * @throws ApplicationAttemptNotFoundException if application attempt * not found * @throws IOException */ public abstract ApplicationAttemptReport getApplicationAttemptReport( ApplicationAttemptId applicationAttemptId) throws YarnException, IOException; /** * <p> * Get a report of all (ApplicationAttempts) of Application in the cluster. * </p> * * @param applicationId application id of the app * @return a list of reports for all application attempts for specified * application. * @throws YarnException * @throws IOException */ public abstract List<ApplicationAttemptReport> getApplicationAttempts( ApplicationId applicationId) throws YarnException, IOException; /** * <p> * Get a report of the given Container. * </p> * * <p> * In secure mode, <code>YARN</code> verifies access to the application, queue * etc. before accepting the request. * </p> * * @param containerId * {@link ContainerId} of the container that needs a report * @return container report * @throws YarnException * @throws ContainerNotFoundException if container not found. * @throws IOException */ public abstract ContainerReport getContainerReport(ContainerId containerId) throws YarnException, IOException; /** * <p> * Get a report of all (Containers) of ApplicationAttempt in the cluster. * </p> * * @param applicationAttemptId application attempt id * @return a list of reports of all containers for specified application * attempts * @throws YarnException * @throws IOException */ public abstract List<ContainerReport> getContainers( ApplicationAttemptId applicationAttemptId) throws YarnException, IOException; /** * <p> * Attempts to move the given application to the given queue. * </p> * * @param appId * Application to move. * @param queue * Queue to place it in to. * @throws YarnException * @throws IOException */ public abstract void moveApplicationAcrossQueues(ApplicationId appId, String queue) throws YarnException, IOException; /** * <p> * The interface used by clients to submit a new reservation to the * {@code ResourceManager}. * </p> * * <p> * The client packages all details of its request in a * {@link ReservationSubmissionRequest} object. This contains information * about the amount of capacity, temporal constraints, and gang needs. * Furthermore, the reservation might be composed of multiple stages, with * ordering dependencies among them. * </p> * * <p> * In order to respond, a new admission control component in the * {@code ResourceManager} performs an analysis of the resources that have * been committed over the period of time the user is requesting, verify that * the user requests can be fulfilled, and that it respect a sharing policy * (e.g., {@code CapacityOverTimePolicy}). Once it has positively determined * that the ReservationRequest is satisfiable the {@code ResourceManager} * answers with a {@link ReservationSubmissionResponse} that includes a * {@link ReservationId}. Upon failure to find a valid allocation the response * is an exception with the message detailing the reason of failure. * </p> * * <p> * The semantics guarantees that the {@link ReservationId} returned, * corresponds to a valid reservation existing in the time-range request by * the user. The amount of capacity dedicated to such reservation can vary * overtime, depending of the allocation that has been determined. But it is * guaranteed to satisfy all the constraint expressed by the user in the * {@link ReservationDefinition} * </p> * * @param request request to submit a new Reservation * @return response contains the {@link ReservationId} on accepting the * submission * @throws YarnException if the reservation cannot be created successfully * @throws IOException * */ @Public @Unstable public abstract ReservationSubmissionResponse submitReservation( ReservationSubmissionRequest request) throws YarnException, IOException; /** * <p> * The interface used by clients to update an existing Reservation. This is * referred to as a re-negotiation process, in which a user that has * previously submitted a Reservation. * </p> * * <p> * The allocation is attempted by virtually substituting all previous * allocations related to this Reservation with new ones, that satisfy the new * {@link ReservationDefinition}. Upon success the previous allocation is * atomically substituted by the new one, and on failure (i.e., if the system * cannot find a valid allocation for the updated request), the previous * allocation remains valid. * </p> * * @param request to update an existing Reservation (the * {@link ReservationUpdateRequest} should refer to an existing valid * {@link ReservationId}) * @return response empty on successfully updating the existing reservation * @throws YarnException if the request is invalid or reservation cannot be * updated successfully * @throws IOException * */ @Public @Unstable public abstract ReservationUpdateResponse updateReservation( ReservationUpdateRequest request) throws YarnException, IOException; /** * <p> * The interface used by clients to remove an existing Reservation. * </p> * * @param request to remove an existing Reservation (the * {@link ReservationDeleteRequest} should refer to an existing valid * {@link ReservationId}) * @return response empty on successfully deleting the existing reservation * @throws YarnException if the request is invalid or reservation cannot be * deleted successfully * @throws IOException * */ @Public @Unstable public abstract ReservationDeleteResponse deleteReservation( ReservationDeleteRequest request) throws YarnException, IOException; /** * <p> * The interface used by client to get node to labels mappings in existing cluster * </p> * * @return node to labels mappings * @throws YarnException * @throws IOException */ @Public @Unstable public abstract Map<NodeId, Set<NodeLabel>> getNodeToLabels() throws YarnException, IOException; /** * <p> * The interface used by client to get labels to nodes mapping * in existing cluster * </p> * * @return node to labels mappings * @throws YarnException * @throws IOException */ @Public @Unstable public abstract Map<NodeLabel, Set<NodeId>> getLabelsToNodes() throws YarnException, IOException; /** * <p> * The interface used by client to get labels to nodes mapping * for specified labels in existing cluster * </p> * * @param labels labels for which labels to nodes mapping has to be retrieved * @return labels to nodes mappings for specific labels * @throws YarnException * @throws IOException */ @Public @Unstable public abstract Map<NodeLabel, Set<NodeId>> getLabelsToNodes( Set<String> labels) throws YarnException, IOException; /** * <p> * The interface used by client to get node labels in the cluster * </p> * * @return cluster node labels collection * @throws YarnException * @throws IOException */ @Public @Unstable public abstract List<NodeLabel> getClusterNodeLabels() throws YarnException, IOException; }
23,724
34.410448
89
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/package-info.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ @InterfaceAudience.Public package org.apache.hadoop.yarn.client.api.async; import org.apache.hadoop.classification.InterfaceAudience;
941
41.818182
75
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/AMRMClientAsync.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.client.api.async; import com.google.common.base.Preconditions; import com.google.common.base.Supplier; import java.io.IOException; import java.util.Collection; import java.util.List; import java.util.concurrent.atomic.AtomicInteger; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Stable; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.api.records.NodeReport; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.client.api.AMRMClient; import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest; import org.apache.hadoop.yarn.client.api.async.impl.AMRMClientAsyncImpl; import org.apache.hadoop.yarn.client.api.impl.AMRMClientImpl; import org.apache.hadoop.yarn.exceptions.YarnException; import com.google.common.annotations.VisibleForTesting; /** * <code>AMRMClientAsync</code> handles communication with the ResourceManager * and provides asynchronous updates on events such as container allocations and * completions. It contains a thread that sends periodic heartbeats to the * ResourceManager. * * It should be used by implementing a CallbackHandler: * <pre> * {@code * class MyCallbackHandler implements AMRMClientAsync.CallbackHandler { * public void onContainersAllocated(List<Container> containers) { * [run tasks on the containers] * } * * public void onContainersCompleted(List<ContainerStatus> statuses) { * [update progress, check whether app is done] * } * * public void onNodesUpdated(List<NodeReport> updated) {} * * public void onReboot() {} * } * } * </pre> * * The client's lifecycle should be managed similarly to the following: * * <pre> * {@code * AMRMClientAsync asyncClient = * createAMRMClientAsync(appAttId, 1000, new MyCallbackhandler()); * asyncClient.init(conf); * asyncClient.start(); * RegisterApplicationMasterResponse response = asyncClient * .registerApplicationMaster(appMasterHostname, appMasterRpcPort, * appMasterTrackingUrl); * asyncClient.addContainerRequest(containerRequest); * [... wait for application to complete] * asyncClient.unregisterApplicationMaster(status, appMsg, trackingUrl); * asyncClient.stop(); * } * </pre> */ @Public @Stable public abstract class AMRMClientAsync<T extends ContainerRequest> extends AbstractService { private static final Log LOG = LogFactory.getLog(AMRMClientAsync.class); protected final AMRMClient<T> client; protected final CallbackHandler handler; protected final AtomicInteger heartbeatIntervalMs = new AtomicInteger(); public static <T extends ContainerRequest> AMRMClientAsync<T> createAMRMClientAsync(int intervalMs, CallbackHandler callbackHandler) { return new AMRMClientAsyncImpl<T>(intervalMs, callbackHandler); } public static <T extends ContainerRequest> AMRMClientAsync<T> createAMRMClientAsync(AMRMClient<T> client, int intervalMs, CallbackHandler callbackHandler) { return new AMRMClientAsyncImpl<T>(client, intervalMs, callbackHandler); } protected AMRMClientAsync(int intervalMs, CallbackHandler callbackHandler) { this(new AMRMClientImpl<T>(), intervalMs, callbackHandler); } @Private @VisibleForTesting protected AMRMClientAsync(AMRMClient<T> client, int intervalMs, CallbackHandler callbackHandler) { super(AMRMClientAsync.class.getName()); this.client = client; this.heartbeatIntervalMs.set(intervalMs); this.handler = callbackHandler; } public void setHeartbeatInterval(int interval) { heartbeatIntervalMs.set(interval); } public abstract List<? extends Collection<T>> getMatchingRequests( Priority priority, String resourceName, Resource capability); /** * Registers this application master with the resource manager. On successful * registration, starts the heartbeating thread. * @throws YarnException * @throws IOException */ public abstract RegisterApplicationMasterResponse registerApplicationMaster( String appHostName, int appHostPort, String appTrackingUrl) throws YarnException, IOException; /** * Unregister the application master. This must be called in the end. * @param appStatus Success/Failure status of the master * @param appMessage Diagnostics message on failure * @param appTrackingUrl New URL to get master info * @throws YarnException * @throws IOException */ public abstract void unregisterApplicationMaster( FinalApplicationStatus appStatus, String appMessage, String appTrackingUrl) throws YarnException, IOException; /** * Request containers for resources before calling <code>allocate</code> * @param req Resource request */ public abstract void addContainerRequest(T req); /** * Remove previous container request. The previous container request may have * already been sent to the ResourceManager. So even after the remove request * the app must be prepared to receive an allocation for the previous request * even after the remove request * @param req Resource request */ public abstract void removeContainerRequest(T req); /** * Release containers assigned by the Resource Manager. If the app cannot use * the container or wants to give up the container then it can release them. * The app needs to make new requests for the released resource capability if * it still needs it. eg. it released non-local resources * @param containerId */ public abstract void releaseAssignedContainer(ContainerId containerId); /** * Get the currently available resources in the cluster. * A valid value is available after a call to allocate has been made * @return Currently available resources */ public abstract Resource getAvailableResources(); /** * Get the current number of nodes in the cluster. * A valid values is available after a call to allocate has been made * @return Current number of nodes in the cluster */ public abstract int getClusterNodeCount(); /** * Update application's blacklist with addition or removal resources. * * @param blacklistAdditions list of resources which should be added to the * application blacklist * @param blacklistRemovals list of resources which should be removed from the * application blacklist */ public abstract void updateBlacklist(List<String> blacklistAdditions, List<String> blacklistRemovals); /** * Wait for <code>check</code> to return true for each 1000 ms. * See also {@link #waitFor(com.google.common.base.Supplier, int)} * and {@link #waitFor(com.google.common.base.Supplier, int, int)} * @param check */ public void waitFor(Supplier<Boolean> check) throws InterruptedException { waitFor(check, 1000); } /** * Wait for <code>check</code> to return true for each * <code>checkEveryMillis</code> ms. * See also {@link #waitFor(com.google.common.base.Supplier, int, int)} * @param check user defined checker * @param checkEveryMillis interval to call <code>check</code> */ public void waitFor(Supplier<Boolean> check, int checkEveryMillis) throws InterruptedException { waitFor(check, checkEveryMillis, 1); }; /** * Wait for <code>check</code> to return true for each * <code>checkEveryMillis</code> ms. In the main loop, this method will log * the message "waiting in main loop" for each <code>logInterval</code> times * iteration to confirm the thread is alive. * @param check user defined checker * @param checkEveryMillis interval to call <code>check</code> * @param logInterval interval to log for each */ public void waitFor(Supplier<Boolean> check, int checkEveryMillis, int logInterval) throws InterruptedException { Preconditions.checkNotNull(check, "check should not be null"); Preconditions.checkArgument(checkEveryMillis >= 0, "checkEveryMillis should be positive value"); Preconditions.checkArgument(logInterval >= 0, "logInterval should be positive value"); int loggingCounter = logInterval; do { if (LOG.isDebugEnabled()) { LOG.debug("Check the condition for main loop."); } boolean result = check.get(); if (result) { LOG.info("Exits the main loop."); return; } if (--loggingCounter <= 0) { LOG.info("Waiting in main loop."); loggingCounter = logInterval; } Thread.sleep(checkEveryMillis); } while (true); } public interface CallbackHandler { /** * Called when the ResourceManager responds to a heartbeat with completed * containers. If the response contains both completed containers and * allocated containers, this will be called before containersAllocated. */ public void onContainersCompleted(List<ContainerStatus> statuses); /** * Called when the ResourceManager responds to a heartbeat with allocated * containers. If the response containers both completed containers and * allocated containers, this will be called after containersCompleted. */ public void onContainersAllocated(List<Container> containers); /** * Called when the ResourceManager wants the ApplicationMaster to shutdown * for being out of sync etc. The ApplicationMaster should not unregister * with the RM unless the ApplicationMaster wants to be the last attempt. */ public void onShutdownRequest(); /** * Called when nodes tracked by the ResourceManager have changed in health, * availability etc. */ public void onNodesUpdated(List<NodeReport> updatedNodes); public float getProgress(); /** * Called when error comes from RM communications as well as from errors in * the callback itself from the app. Calling * stop() is the recommended action. * * @param e */ public void onError(Throwable e); } }
11,580
36.600649
84
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/NMClientAsync.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.client.api.async; import java.nio.ByteBuffer; import java.util.Map; import java.util.concurrent.ConcurrentMap; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Stable; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.Token; import org.apache.hadoop.yarn.client.api.NMClient; import org.apache.hadoop.yarn.client.api.async.impl.NMClientAsyncImpl; import org.apache.hadoop.yarn.client.api.impl.NMClientImpl; import org.apache.hadoop.yarn.conf.YarnConfiguration; import com.google.common.annotations.VisibleForTesting; /** * <code>NMClientAsync</code> handles communication with all the NodeManagers * and provides asynchronous updates on getting responses from them. It * maintains a thread pool to communicate with individual NMs where a number of * worker threads process requests to NMs by using {@link NMClientImpl}. The max * size of the thread pool is configurable through * {@link YarnConfiguration#NM_CLIENT_ASYNC_THREAD_POOL_MAX_SIZE}. * * It should be used in conjunction with a CallbackHandler. For example * * <pre> * {@code * class MyCallbackHandler implements NMClientAsync.CallbackHandler { * public void onContainerStarted(ContainerId containerId, * Map<String, ByteBuffer> allServiceResponse) { * [post process after the container is started, process the response] * } * * public void onContainerStatusReceived(ContainerId containerId, * ContainerStatus containerStatus) { * [make use of the status of the container] * } * * public void onContainerStopped(ContainerId containerId) { * [post process after the container is stopped] * } * * public void onStartContainerError( * ContainerId containerId, Throwable t) { * [handle the raised exception] * } * * public void onGetContainerStatusError( * ContainerId containerId, Throwable t) { * [handle the raised exception] * } * * public void onStopContainerError( * ContainerId containerId, Throwable t) { * [handle the raised exception] * } * } * } * </pre> * * The client's life-cycle should be managed like the following: * * <pre> * {@code * NMClientAsync asyncClient = * NMClientAsync.createNMClientAsync(new MyCallbackhandler()); * asyncClient.init(conf); * asyncClient.start(); * asyncClient.startContainer(container, containerLaunchContext); * [... wait for container being started] * asyncClient.getContainerStatus(container.getId(), container.getNodeId(), * container.getContainerToken()); * [... handle the status in the callback instance] * asyncClient.stopContainer(container.getId(), container.getNodeId(), * container.getContainerToken()); * [... wait for container being stopped] * asyncClient.stop(); * } * </pre> */ @Public @Stable public abstract class NMClientAsync extends AbstractService { protected NMClient client; protected CallbackHandler callbackHandler; public static NMClientAsync createNMClientAsync( CallbackHandler callbackHandler) { return new NMClientAsyncImpl(callbackHandler); } protected NMClientAsync(CallbackHandler callbackHandler) { this (NMClientAsync.class.getName(), callbackHandler); } protected NMClientAsync(String name, CallbackHandler callbackHandler) { this (name, new NMClientImpl(), callbackHandler); } @Private @VisibleForTesting protected NMClientAsync(String name, NMClient client, CallbackHandler callbackHandler) { super(name); this.setClient(client); this.setCallbackHandler(callbackHandler); } public abstract void startContainerAsync( Container container, ContainerLaunchContext containerLaunchContext); public abstract void stopContainerAsync( ContainerId containerId, NodeId nodeId); public abstract void getContainerStatusAsync( ContainerId containerId, NodeId nodeId); public NMClient getClient() { return client; } public void setClient(NMClient client) { this.client = client; } public CallbackHandler getCallbackHandler() { return callbackHandler; } public void setCallbackHandler(CallbackHandler callbackHandler) { this.callbackHandler = callbackHandler; } /** * <p> * The callback interface needs to be implemented by {@link NMClientAsync} * users. The APIs are called when responses from <code>NodeManager</code> are * available. * </p> * * <p> * Once a callback happens, the users can chose to act on it in blocking or * non-blocking manner. If the action on callback is done in a blocking * manner, some of the threads performing requests on NodeManagers may get * blocked depending on how many threads in the pool are busy. * </p> * * <p> * The implementation of the callback function should not throw the * unexpected exception. Otherwise, {@link NMClientAsync} will just * catch, log and then ignore it. * </p> */ public static interface CallbackHandler { /** * The API is called when <code>NodeManager</code> responds to indicate its * acceptance of the starting container request * @param containerId the Id of the container * @param allServiceResponse a Map between the auxiliary service names and * their outputs */ void onContainerStarted(ContainerId containerId, Map<String, ByteBuffer> allServiceResponse); /** * The API is called when <code>NodeManager</code> responds with the status * of the container * @param containerId the Id of the container * @param containerStatus the status of the container */ void onContainerStatusReceived(ContainerId containerId, ContainerStatus containerStatus); /** * The API is called when <code>NodeManager</code> responds to indicate the * container is stopped. * @param containerId the Id of the container */ void onContainerStopped(ContainerId containerId); /** * The API is called when an exception is raised in the process of * starting a container * * @param containerId the Id of the container * @param t the raised exception */ void onStartContainerError(ContainerId containerId, Throwable t); /** * The API is called when an exception is raised in the process of * querying the status of a container * * @param containerId the Id of the container * @param t the raised exception */ void onGetContainerStatusError(ContainerId containerId, Throwable t); /** * The API is called when an exception is raised in the process of * stopping a container * * @param containerId the Id of the container * @param t the raised exception */ void onStopContainerError(ContainerId containerId, Throwable t); } }
8,110
33.079832
80
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/package-info.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ @InterfaceAudience.Public package org.apache.hadoop.yarn.client.api.async.impl; import org.apache.hadoop.classification.InterfaceAudience;
946
42.045455
75
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/NMClientAsyncImpl.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.client.api.async.impl; import java.io.IOException; import java.nio.ByteBuffer; import java.util.EnumSet; import java.util.HashSet; import java.util.Map; import java.util.Set; import java.util.concurrent.BlockingQueue; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ThreadFactory; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock; import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.Token; import org.apache.hadoop.yarn.client.api.NMClient; import org.apache.hadoop.yarn.client.api.async.NMClientAsync; import org.apache.hadoop.yarn.client.api.impl.NMClientImpl; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.AbstractEvent; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.ipc.RPCUtil; import org.apache.hadoop.yarn.state.InvalidStateTransitionException; import org.apache.hadoop.yarn.state.MultipleArcTransition; import org.apache.hadoop.yarn.state.SingleArcTransition; import org.apache.hadoop.yarn.state.StateMachine; import org.apache.hadoop.yarn.state.StateMachineFactory; import com.google.common.annotations.VisibleForTesting; import com.google.common.util.concurrent.ThreadFactoryBuilder; @Private @Unstable public class NMClientAsyncImpl extends NMClientAsync { private static final Log LOG = LogFactory.getLog(NMClientAsyncImpl.class); protected static final int INITIAL_THREAD_POOL_SIZE = 10; protected ThreadPoolExecutor threadPool; protected int maxThreadPoolSize; protected Thread eventDispatcherThread; protected AtomicBoolean stopped = new AtomicBoolean(false); protected BlockingQueue<ContainerEvent> events = new LinkedBlockingQueue<ContainerEvent>(); protected ConcurrentMap<ContainerId, StatefulContainer> containers = new ConcurrentHashMap<ContainerId, StatefulContainer>(); public NMClientAsyncImpl(CallbackHandler callbackHandler) { this(NMClientAsync.class.getName(), callbackHandler); } public NMClientAsyncImpl(String name, CallbackHandler callbackHandler) { this(name, new NMClientImpl(), callbackHandler); } @Private @VisibleForTesting protected NMClientAsyncImpl(String name, NMClient client, CallbackHandler callbackHandler) { super(name, client, callbackHandler); this.client = client; this.callbackHandler = callbackHandler; } @Override protected void serviceInit(Configuration conf) throws Exception { this.maxThreadPoolSize = conf.getInt( YarnConfiguration.NM_CLIENT_ASYNC_THREAD_POOL_MAX_SIZE, YarnConfiguration.DEFAULT_NM_CLIENT_ASYNC_THREAD_POOL_MAX_SIZE); LOG.info("Upper bound of the thread pool size is " + maxThreadPoolSize); client.init(conf); super.serviceInit(conf); } @Override protected void serviceStart() throws Exception { client.start(); ThreadFactory tf = new ThreadFactoryBuilder().setNameFormat( this.getClass().getName() + " #%d").setDaemon(true).build(); // Start with a default core-pool size and change it dynamically. int initSize = Math.min(INITIAL_THREAD_POOL_SIZE, maxThreadPoolSize); threadPool = new ThreadPoolExecutor(initSize, Integer.MAX_VALUE, 1, TimeUnit.HOURS, new LinkedBlockingQueue<Runnable>(), tf); eventDispatcherThread = new Thread() { @Override public void run() { ContainerEvent event = null; Set<String> allNodes = new HashSet<String>(); while (!stopped.get() && !Thread.currentThread().isInterrupted()) { try { event = events.take(); } catch (InterruptedException e) { if (!stopped.get()) { LOG.error("Returning, thread interrupted", e); } return; } allNodes.add(event.getNodeId().toString()); int threadPoolSize = threadPool.getCorePoolSize(); // We can increase the pool size only if haven't reached the maximum // limit yet. if (threadPoolSize != maxThreadPoolSize) { // nodes where containers will run at *this* point of time. This is // *not* the cluster size and doesn't need to be. int nodeNum = allNodes.size(); int idealThreadPoolSize = Math.min(maxThreadPoolSize, nodeNum); if (threadPoolSize < idealThreadPoolSize) { // Bump up the pool size to idealThreadPoolSize + // INITIAL_POOL_SIZE, the later is just a buffer so we are not // always increasing the pool-size int newThreadPoolSize = Math.min(maxThreadPoolSize, idealThreadPoolSize + INITIAL_THREAD_POOL_SIZE); LOG.info("Set NMClientAsync thread pool size to " + newThreadPoolSize + " as the number of nodes to talk to is " + nodeNum); threadPool.setCorePoolSize(newThreadPoolSize); } } // the events from the queue are handled in parallel with a thread // pool threadPool.execute(getContainerEventProcessor(event)); // TODO: Group launching of multiple containers to a single // NodeManager into a single connection } } }; eventDispatcherThread.setName("Container Event Dispatcher"); eventDispatcherThread.setDaemon(false); eventDispatcherThread.start(); super.serviceStart(); } @Override protected void serviceStop() throws Exception { if (stopped.getAndSet(true)) { // return if already stopped return; } if (eventDispatcherThread != null) { eventDispatcherThread.interrupt(); try { eventDispatcherThread.join(); } catch (InterruptedException e) { LOG.error("The thread of " + eventDispatcherThread.getName() + " didn't finish normally.", e); } } if (threadPool != null) { threadPool.shutdownNow(); } if (client != null) { // If NMClientImpl doesn't stop running containers, the states doesn't // need to be cleared. if (!(client instanceof NMClientImpl) || ((NMClientImpl) client).getCleanupRunningContainers().get()) { if (containers != null) { containers.clear(); } } client.stop(); } super.serviceStop(); } public void startContainerAsync( Container container, ContainerLaunchContext containerLaunchContext) { if (containers.putIfAbsent(container.getId(), new StatefulContainer(this, container.getId())) != null) { callbackHandler.onStartContainerError(container.getId(), RPCUtil.getRemoteException("Container " + container.getId() + " is already started or scheduled to start")); } try { events.put(new StartContainerEvent(container, containerLaunchContext)); } catch (InterruptedException e) { LOG.warn("Exception when scheduling the event of starting Container " + container.getId()); callbackHandler.onStartContainerError(container.getId(), e); } } public void stopContainerAsync(ContainerId containerId, NodeId nodeId) { if (containers.get(containerId) == null) { callbackHandler.onStopContainerError(containerId, RPCUtil.getRemoteException("Container " + containerId + " is neither started nor scheduled to start")); } try { events.put(new ContainerEvent(containerId, nodeId, null, ContainerEventType.STOP_CONTAINER)); } catch (InterruptedException e) { LOG.warn("Exception when scheduling the event of stopping Container " + containerId); callbackHandler.onStopContainerError(containerId, e); } } public void getContainerStatusAsync(ContainerId containerId, NodeId nodeId) { try { events.put(new ContainerEvent(containerId, nodeId, null, ContainerEventType.QUERY_CONTAINER)); } catch (InterruptedException e) { LOG.warn("Exception when scheduling the event of querying the status" + " of Container " + containerId); callbackHandler.onGetContainerStatusError(containerId, e); } } protected static enum ContainerState { PREP, FAILED, RUNNING, DONE, } protected boolean isCompletelyDone(StatefulContainer container) { return container.getState() == ContainerState.DONE || container.getState() == ContainerState.FAILED; } protected ContainerEventProcessor getContainerEventProcessor( ContainerEvent event) { return new ContainerEventProcessor(event); } /** * The type of the event of interacting with a container */ protected static enum ContainerEventType { START_CONTAINER, STOP_CONTAINER, QUERY_CONTAINER } protected static class ContainerEvent extends AbstractEvent<ContainerEventType>{ private ContainerId containerId; private NodeId nodeId; private Token containerToken; public ContainerEvent(ContainerId containerId, NodeId nodeId, Token containerToken, ContainerEventType type) { super(type); this.containerId = containerId; this.nodeId = nodeId; this.containerToken = containerToken; } public ContainerId getContainerId() { return containerId; } public NodeId getNodeId() { return nodeId; } public Token getContainerToken() { return containerToken; } } protected static class StartContainerEvent extends ContainerEvent { private Container container; private ContainerLaunchContext containerLaunchContext; public StartContainerEvent(Container container, ContainerLaunchContext containerLaunchContext) { super(container.getId(), container.getNodeId(), container.getContainerToken(), ContainerEventType.START_CONTAINER); this.container = container; this.containerLaunchContext = containerLaunchContext; } public Container getContainer() { return container; } public ContainerLaunchContext getContainerLaunchContext() { return containerLaunchContext; } } protected static class StatefulContainer implements EventHandler<ContainerEvent> { protected final static StateMachineFactory<StatefulContainer, ContainerState, ContainerEventType, ContainerEvent> stateMachineFactory = new StateMachineFactory<StatefulContainer, ContainerState, ContainerEventType, ContainerEvent>(ContainerState.PREP) // Transitions from PREP state .addTransition(ContainerState.PREP, EnumSet.of(ContainerState.RUNNING, ContainerState.FAILED), ContainerEventType.START_CONTAINER, new StartContainerTransition()) .addTransition(ContainerState.PREP, ContainerState.DONE, ContainerEventType.STOP_CONTAINER, new OutOfOrderTransition()) // Transitions from RUNNING state // RUNNING -> RUNNING should be the invalid transition .addTransition(ContainerState.RUNNING, EnumSet.of(ContainerState.DONE, ContainerState.FAILED), ContainerEventType.STOP_CONTAINER, new StopContainerTransition()) // Transition from DONE state .addTransition(ContainerState.DONE, ContainerState.DONE, EnumSet.of(ContainerEventType.START_CONTAINER, ContainerEventType.STOP_CONTAINER)) // Transition from FAILED state .addTransition(ContainerState.FAILED, ContainerState.FAILED, EnumSet.of(ContainerEventType.START_CONTAINER, ContainerEventType.STOP_CONTAINER)); protected static class StartContainerTransition implements MultipleArcTransition<StatefulContainer, ContainerEvent, ContainerState> { @Override public ContainerState transition( StatefulContainer container, ContainerEvent event) { ContainerId containerId = event.getContainerId(); try { StartContainerEvent scEvent = null; if (event instanceof StartContainerEvent) { scEvent = (StartContainerEvent) event; } assert scEvent != null; Map<String, ByteBuffer> allServiceResponse = container.nmClientAsync.getClient().startContainer( scEvent.getContainer(), scEvent.getContainerLaunchContext()); try { container.nmClientAsync.getCallbackHandler().onContainerStarted( containerId, allServiceResponse); } catch (Throwable thr) { // Don't process user created unchecked exception LOG.info("Unchecked exception is thrown from onContainerStarted for " + "Container " + containerId, thr); } return ContainerState.RUNNING; } catch (YarnException e) { return onExceptionRaised(container, event, e); } catch (IOException e) { return onExceptionRaised(container, event, e); } catch (Throwable t) { return onExceptionRaised(container, event, t); } } private ContainerState onExceptionRaised(StatefulContainer container, ContainerEvent event, Throwable t) { try { container.nmClientAsync.getCallbackHandler().onStartContainerError( event.getContainerId(), t); } catch (Throwable thr) { // Don't process user created unchecked exception LOG.info( "Unchecked exception is thrown from onStartContainerError for " + "Container " + event.getContainerId(), thr); } return ContainerState.FAILED; } } protected static class StopContainerTransition implements MultipleArcTransition<StatefulContainer, ContainerEvent, ContainerState> { @Override public ContainerState transition( StatefulContainer container, ContainerEvent event) { ContainerId containerId = event.getContainerId(); try { container.nmClientAsync.getClient().stopContainer( containerId, event.getNodeId()); try { container.nmClientAsync.getCallbackHandler().onContainerStopped( event.getContainerId()); } catch (Throwable thr) { // Don't process user created unchecked exception LOG.info("Unchecked exception is thrown from onContainerStopped for " + "Container " + event.getContainerId(), thr); } return ContainerState.DONE; } catch (YarnException e) { return onExceptionRaised(container, event, e); } catch (IOException e) { return onExceptionRaised(container, event, e); } catch (Throwable t) { return onExceptionRaised(container, event, t); } } private ContainerState onExceptionRaised(StatefulContainer container, ContainerEvent event, Throwable t) { try { container.nmClientAsync.getCallbackHandler().onStopContainerError( event.getContainerId(), t); } catch (Throwable thr) { // Don't process user created unchecked exception LOG.info("Unchecked exception is thrown from onStopContainerError for " + "Container " + event.getContainerId(), thr); } return ContainerState.FAILED; } } protected static class OutOfOrderTransition implements SingleArcTransition<StatefulContainer, ContainerEvent> { protected static final String STOP_BEFORE_START_ERROR_MSG = "Container was killed before it was launched"; @Override public void transition(StatefulContainer container, ContainerEvent event) { try { container.nmClientAsync.getCallbackHandler().onStartContainerError( event.getContainerId(), RPCUtil.getRemoteException(STOP_BEFORE_START_ERROR_MSG)); } catch (Throwable thr) { // Don't process user created unchecked exception LOG.info( "Unchecked exception is thrown from onStartContainerError for " + "Container " + event.getContainerId(), thr); } } } private final NMClientAsync nmClientAsync; private final ContainerId containerId; private final StateMachine<ContainerState, ContainerEventType, ContainerEvent> stateMachine; private final ReadLock readLock; private final WriteLock writeLock; public StatefulContainer(NMClientAsync client, ContainerId containerId) { this.nmClientAsync = client; this.containerId = containerId; stateMachine = stateMachineFactory.make(this); ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); readLock = lock.readLock(); writeLock = lock.writeLock(); } @Override public void handle(ContainerEvent event) { writeLock.lock(); try { try { this.stateMachine.doTransition(event.getType(), event); } catch (InvalidStateTransitionException e) { LOG.error("Can't handle this event at current state", e); } } finally { writeLock.unlock(); } } public ContainerId getContainerId() { return containerId; } public ContainerState getState() { readLock.lock(); try { return stateMachine.getCurrentState(); } finally { readLock.unlock(); } } } protected class ContainerEventProcessor implements Runnable { protected ContainerEvent event; public ContainerEventProcessor(ContainerEvent event) { this.event = event; } @Override public void run() { ContainerId containerId = event.getContainerId(); LOG.info("Processing Event " + event + " for Container " + containerId); if (event.getType() == ContainerEventType.QUERY_CONTAINER) { try { ContainerStatus containerStatus = client.getContainerStatus( containerId, event.getNodeId()); try { callbackHandler.onContainerStatusReceived( containerId, containerStatus); } catch (Throwable thr) { // Don't process user created unchecked exception LOG.info( "Unchecked exception is thrown from onContainerStatusReceived" + " for Container " + event.getContainerId(), thr); } } catch (YarnException e) { onExceptionRaised(containerId, e); } catch (IOException e) { onExceptionRaised(containerId, e); } catch (Throwable t) { onExceptionRaised(containerId, t); } } else { StatefulContainer container = containers.get(containerId); if (container == null) { LOG.info("Container " + containerId + " is already stopped or failed"); } else { container.handle(event); if (isCompletelyDone(container)) { containers.remove(containerId); } } } } private void onExceptionRaised(ContainerId containerId, Throwable t) { try { callbackHandler.onGetContainerStatusError(containerId, t); } catch (Throwable thr) { // Don't process user created unchecked exception LOG.info("Unchecked exception is thrown from onGetContainerStatusError" + " for Container " + containerId, thr); } } } }
21,315
35.942808
81
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/AMRMClientAsyncImpl.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.client.api.async.impl; import java.io.IOException; import java.util.Collection; import java.util.List; import java.util.concurrent.BlockingQueue; import java.util.concurrent.LinkedBlockingQueue; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.api.records.NodeReport; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.client.api.AMRMClient; import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest; import org.apache.hadoop.yarn.client.api.async.AMRMClientAsync; import org.apache.hadoop.yarn.client.api.impl.AMRMClientImpl; import org.apache.hadoop.yarn.exceptions.ApplicationAttemptNotFoundException; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import com.google.common.annotations.VisibleForTesting; @Private @Unstable public class AMRMClientAsyncImpl<T extends ContainerRequest> extends AMRMClientAsync<T> { private static final Log LOG = LogFactory.getLog(AMRMClientAsyncImpl.class); private final HeartbeatThread heartbeatThread; private final CallbackHandlerThread handlerThread; private final BlockingQueue<AllocateResponse> responseQueue; private final Object unregisterHeartbeatLock = new Object(); private volatile boolean keepRunning; private volatile float progress; private volatile Throwable savedException; public AMRMClientAsyncImpl(int intervalMs, CallbackHandler callbackHandler) { this(new AMRMClientImpl<T>(), intervalMs, callbackHandler); } @Private @VisibleForTesting public AMRMClientAsyncImpl(AMRMClient<T> client, int intervalMs, CallbackHandler callbackHandler) { super(client, intervalMs, callbackHandler); heartbeatThread = new HeartbeatThread(); handlerThread = new CallbackHandlerThread(); responseQueue = new LinkedBlockingQueue<AllocateResponse>(); keepRunning = true; savedException = null; } @Override protected void serviceInit(Configuration conf) throws Exception { super.serviceInit(conf); client.init(conf); } @Override protected void serviceStart() throws Exception { handlerThread.setDaemon(true); handlerThread.start(); client.start(); super.serviceStart(); } /** * Tells the heartbeat and handler threads to stop and waits for them to * terminate. */ @Override protected void serviceStop() throws Exception { keepRunning = false; heartbeatThread.interrupt(); try { heartbeatThread.join(); } catch (InterruptedException ex) { LOG.error("Error joining with heartbeat thread", ex); } client.stop(); handlerThread.interrupt(); super.serviceStop(); } public void setHeartbeatInterval(int interval) { heartbeatIntervalMs.set(interval); } public List<? extends Collection<T>> getMatchingRequests( Priority priority, String resourceName, Resource capability) { return client.getMatchingRequests(priority, resourceName, capability); } /** * Registers this application master with the resource manager. On successful * registration, starts the heartbeating thread. * @throws YarnException * @throws IOException */ public RegisterApplicationMasterResponse registerApplicationMaster( String appHostName, int appHostPort, String appTrackingUrl) throws YarnException, IOException { RegisterApplicationMasterResponse response = client .registerApplicationMaster(appHostName, appHostPort, appTrackingUrl); heartbeatThread.start(); return response; } /** * Unregister the application master. This must be called in the end. * @param appStatus Success/Failure status of the master * @param appMessage Diagnostics message on failure * @param appTrackingUrl New URL to get master info * @throws YarnException * @throws IOException */ public void unregisterApplicationMaster(FinalApplicationStatus appStatus, String appMessage, String appTrackingUrl) throws YarnException, IOException { synchronized (unregisterHeartbeatLock) { keepRunning = false; client.unregisterApplicationMaster(appStatus, appMessage, appTrackingUrl); } } /** * Request containers for resources before calling <code>allocate</code> * @param req Resource request */ public void addContainerRequest(T req) { client.addContainerRequest(req); } /** * Remove previous container request. The previous container request may have * already been sent to the ResourceManager. So even after the remove request * the app must be prepared to receive an allocation for the previous request * even after the remove request * @param req Resource request */ public void removeContainerRequest(T req) { client.removeContainerRequest(req); } /** * Release containers assigned by the Resource Manager. If the app cannot use * the container or wants to give up the container then it can release them. * The app needs to make new requests for the released resource capability if * it still needs it. eg. it released non-local resources * @param containerId */ public void releaseAssignedContainer(ContainerId containerId) { client.releaseAssignedContainer(containerId); } /** * Get the currently available resources in the cluster. * A valid value is available after a call to allocate has been made * @return Currently available resources */ public Resource getAvailableResources() { return client.getAvailableResources(); } /** * Get the current number of nodes in the cluster. * A valid values is available after a call to allocate has been made * @return Current number of nodes in the cluster */ public int getClusterNodeCount() { return client.getClusterNodeCount(); } /** * Update application's blacklist with addition or removal resources. * * @param blacklistAdditions list of resources which should be added to the * application blacklist * @param blacklistRemovals list of resources which should be removed from the * application blacklist */ public void updateBlacklist(List<String> blacklistAdditions, List<String> blacklistRemovals) { client.updateBlacklist(blacklistAdditions, blacklistRemovals); } private class HeartbeatThread extends Thread { public HeartbeatThread() { super("AMRM Heartbeater thread"); } public void run() { while (true) { AllocateResponse response = null; // synchronization ensures we don't send heartbeats after unregistering synchronized (unregisterHeartbeatLock) { if (!keepRunning) { return; } try { response = client.allocate(progress); } catch (ApplicationAttemptNotFoundException e) { handler.onShutdownRequest(); LOG.info("Shutdown requested. Stopping callback."); return; } catch (Throwable ex) { LOG.error("Exception on heartbeat", ex); savedException = ex; // interrupt handler thread in case it waiting on the queue handlerThread.interrupt(); return; } if (response != null) { while (true) { try { responseQueue.put(response); break; } catch (InterruptedException ex) { LOG.debug("Interrupted while waiting to put on response queue", ex); } } } } try { Thread.sleep(heartbeatIntervalMs.get()); } catch (InterruptedException ex) { LOG.debug("Heartbeater interrupted", ex); } } } } private class CallbackHandlerThread extends Thread { public CallbackHandlerThread() { super("AMRM Callback Handler Thread"); } public void run() { while (true) { if (!keepRunning) { return; } try { AllocateResponse response; if(savedException != null) { LOG.error("Stopping callback due to: ", savedException); handler.onError(savedException); return; } try { response = responseQueue.take(); } catch (InterruptedException ex) { LOG.info("Interrupted while waiting for queue", ex); continue; } List<NodeReport> updatedNodes = response.getUpdatedNodes(); if (!updatedNodes.isEmpty()) { handler.onNodesUpdated(updatedNodes); } List<ContainerStatus> completed = response.getCompletedContainersStatuses(); if (!completed.isEmpty()) { handler.onContainersCompleted(completed); } List<Container> allocated = response.getAllocatedContainers(); if (!allocated.isEmpty()) { handler.onContainersAllocated(allocated); } progress = handler.getProgress(); } catch (Throwable ex) { handler.onError(ex); // re-throw exception to end the thread throw new YarnRuntimeException(ex); } } } } }
10,947
33.427673
84
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.client.api.impl; import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; import java.util.LinkedHashSet; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Set; import java.util.SortedMap; import java.util.TreeMap; import java.util.TreeSet; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.Text; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest; import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse; import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest; import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.api.records.NMToken; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest; import org.apache.hadoop.yarn.api.records.ResourceRequest; import org.apache.hadoop.yarn.api.records.Token; import org.apache.hadoop.yarn.client.ClientRMProxy; import org.apache.hadoop.yarn.client.api.AMRMClient; import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest; import org.apache.hadoop.yarn.client.api.InvalidContainerRequestException; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.ApplicationMasterNotRegisteredException; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.security.AMRMTokenIdentifier; import org.apache.hadoop.yarn.util.RackResolver; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Joiner; import com.google.common.base.Preconditions; @Private @Unstable public class AMRMClientImpl<T extends ContainerRequest> extends AMRMClient<T> { private static final Log LOG = LogFactory.getLog(AMRMClientImpl.class); private static final List<String> ANY_LIST = Collections.singletonList(ResourceRequest.ANY); private int lastResponseId = 0; protected String appHostName; protected int appHostPort; protected String appTrackingUrl; protected ApplicationMasterProtocol rmClient; protected Resource clusterAvailableResources; protected int clusterNodeCount; // blacklistedNodes is required for keeping history of blacklisted nodes that // are sent to RM. On RESYNC command from RM, blacklistedNodes are used to get // current blacklisted nodes and send back to RM. protected final Set<String> blacklistedNodes = new HashSet<String>(); protected final Set<String> blacklistAdditions = new HashSet<String>(); protected final Set<String> blacklistRemovals = new HashSet<String>(); class ResourceRequestInfo { ResourceRequest remoteRequest; LinkedHashSet<T> containerRequests; ResourceRequestInfo(Priority priority, String resourceName, Resource capability, boolean relaxLocality) { remoteRequest = ResourceRequest.newInstance(priority, resourceName, capability, 0); remoteRequest.setRelaxLocality(relaxLocality); containerRequests = new LinkedHashSet<T>(); } } /** * Class compares Resource by memory then cpu in reverse order */ class ResourceReverseMemoryThenCpuComparator implements Comparator<Resource> { @Override public int compare(Resource arg0, Resource arg1) { int mem0 = arg0.getMemory(); int mem1 = arg1.getMemory(); int cpu0 = arg0.getVirtualCores(); int cpu1 = arg1.getVirtualCores(); if(mem0 == mem1) { if(cpu0 == cpu1) { return 0; } if(cpu0 < cpu1) { return 1; } return -1; } if(mem0 < mem1) { return 1; } return -1; } } static boolean canFit(Resource arg0, Resource arg1) { int mem0 = arg0.getMemory(); int mem1 = arg1.getMemory(); int cpu0 = arg0.getVirtualCores(); int cpu1 = arg1.getVirtualCores(); if(mem0 <= mem1 && cpu0 <= cpu1) { return true; } return false; } //Key -> Priority //Value -> Map //Key->ResourceName (e.g., nodename, rackname, *) //Value->Map //Key->Resource Capability //Value->ResourceRequest protected final Map<Priority, Map<String, TreeMap<Resource, ResourceRequestInfo>>> remoteRequestsTable = new TreeMap<Priority, Map<String, TreeMap<Resource, ResourceRequestInfo>>>(); protected final Set<ResourceRequest> ask = new TreeSet<ResourceRequest>( new org.apache.hadoop.yarn.api.records.ResourceRequest.ResourceRequestComparator()); protected final Set<ContainerId> release = new TreeSet<ContainerId>(); // pendingRelease holds history or release requests.request is removed only if // RM sends completedContainer. // How it different from release? --> release is for per allocate() request. protected Set<ContainerId> pendingRelease = new TreeSet<ContainerId>(); public AMRMClientImpl() { super(AMRMClientImpl.class.getName()); } @Override protected void serviceInit(Configuration conf) throws Exception { RackResolver.init(conf); super.serviceInit(conf); } @Override protected void serviceStart() throws Exception { final YarnConfiguration conf = new YarnConfiguration(getConfig()); try { rmClient = ClientRMProxy.createRMProxy(conf, ApplicationMasterProtocol.class); } catch (IOException e) { throw new YarnRuntimeException(e); } super.serviceStart(); } @Override protected void serviceStop() throws Exception { if (this.rmClient != null) { RPC.stopProxy(this.rmClient); } super.serviceStop(); } @Override public RegisterApplicationMasterResponse registerApplicationMaster( String appHostName, int appHostPort, String appTrackingUrl) throws YarnException, IOException { this.appHostName = appHostName; this.appHostPort = appHostPort; this.appTrackingUrl = appTrackingUrl; Preconditions.checkArgument(appHostName != null, "The host name should not be null"); Preconditions.checkArgument(appHostPort >= -1, "Port number of the host" + " should be any integers larger than or equal to -1"); return registerApplicationMaster(); } private RegisterApplicationMasterResponse registerApplicationMaster() throws YarnException, IOException { RegisterApplicationMasterRequest request = RegisterApplicationMasterRequest.newInstance(this.appHostName, this.appHostPort, this.appTrackingUrl); RegisterApplicationMasterResponse response = rmClient.registerApplicationMaster(request); synchronized (this) { lastResponseId = 0; if (!response.getNMTokensFromPreviousAttempts().isEmpty()) { populateNMTokens(response.getNMTokensFromPreviousAttempts()); } } return response; } @Override public AllocateResponse allocate(float progressIndicator) throws YarnException, IOException { Preconditions.checkArgument(progressIndicator >= 0, "Progress indicator should not be negative"); AllocateResponse allocateResponse = null; List<ResourceRequest> askList = null; List<ContainerId> releaseList = null; AllocateRequest allocateRequest = null; List<String> blacklistToAdd = new ArrayList<String>(); List<String> blacklistToRemove = new ArrayList<String>(); try { synchronized (this) { askList = new ArrayList<ResourceRequest>(ask.size()); for(ResourceRequest r : ask) { // create a copy of ResourceRequest as we might change it while the // RPC layer is using it to send info across askList.add(ResourceRequest.newInstance(r.getPriority(), r.getResourceName(), r.getCapability(), r.getNumContainers(), r.getRelaxLocality(), r.getNodeLabelExpression())); } releaseList = new ArrayList<ContainerId>(release); // optimistically clear this collection assuming no RPC failure ask.clear(); release.clear(); blacklistToAdd.addAll(blacklistAdditions); blacklistToRemove.addAll(blacklistRemovals); ResourceBlacklistRequest blacklistRequest = ResourceBlacklistRequest.newInstance(blacklistToAdd, blacklistToRemove); allocateRequest = AllocateRequest.newInstance(lastResponseId, progressIndicator, askList, releaseList, blacklistRequest); // clear blacklistAdditions and blacklistRemovals before // unsynchronized part blacklistAdditions.clear(); blacklistRemovals.clear(); } try { allocateResponse = rmClient.allocate(allocateRequest); } catch (ApplicationMasterNotRegisteredException e) { LOG.warn("ApplicationMaster is out of sync with ResourceManager," + " hence resyncing."); synchronized (this) { release.addAll(this.pendingRelease); blacklistAdditions.addAll(this.blacklistedNodes); for (Map<String, TreeMap<Resource, ResourceRequestInfo>> rr : remoteRequestsTable .values()) { for (Map<Resource, ResourceRequestInfo> capabalities : rr.values()) { for (ResourceRequestInfo request : capabalities.values()) { addResourceRequestToAsk(request.remoteRequest); } } } } // re register with RM registerApplicationMaster(); allocateResponse = allocate(progressIndicator); return allocateResponse; } synchronized (this) { // update these on successful RPC clusterNodeCount = allocateResponse.getNumClusterNodes(); lastResponseId = allocateResponse.getResponseId(); clusterAvailableResources = allocateResponse.getAvailableResources(); if (!allocateResponse.getNMTokens().isEmpty()) { populateNMTokens(allocateResponse.getNMTokens()); } if (allocateResponse.getAMRMToken() != null) { updateAMRMToken(allocateResponse.getAMRMToken()); } if (!pendingRelease.isEmpty() && !allocateResponse.getCompletedContainersStatuses().isEmpty()) { removePendingReleaseRequests(allocateResponse .getCompletedContainersStatuses()); } } } finally { // TODO how to differentiate remote yarn exception vs error in rpc if(allocateResponse == null) { // we hit an exception in allocate() // preserve ask and release for next call to allocate() synchronized (this) { release.addAll(releaseList); // requests could have been added or deleted during call to allocate // If requests were added/removed then there is nothing to do since // the ResourceRequest object in ask would have the actual new value. // If ask does not have this ResourceRequest then it was unchanged and // so we can add the value back safely. // This assumes that there will no concurrent calls to allocate() and // so we dont have to worry about ask being changed in the // synchronized block at the beginning of this method. for(ResourceRequest oldAsk : askList) { if(!ask.contains(oldAsk)) { ask.add(oldAsk); } } blacklistAdditions.addAll(blacklistToAdd); blacklistRemovals.addAll(blacklistToRemove); } } } return allocateResponse; } protected void removePendingReleaseRequests( List<ContainerStatus> completedContainersStatuses) { for (ContainerStatus containerStatus : completedContainersStatuses) { pendingRelease.remove(containerStatus.getContainerId()); } } @Private @VisibleForTesting protected void populateNMTokens(List<NMToken> nmTokens) { for (NMToken token : nmTokens) { String nodeId = token.getNodeId().toString(); if (getNMTokenCache().containsToken(nodeId)) { LOG.info("Replacing token for : " + nodeId); } else { LOG.info("Received new token for : " + nodeId); } getNMTokenCache().setToken(nodeId, token.getToken()); } } @Override public void unregisterApplicationMaster(FinalApplicationStatus appStatus, String appMessage, String appTrackingUrl) throws YarnException, IOException { Preconditions.checkArgument(appStatus != null, "AppStatus should not be null."); FinishApplicationMasterRequest request = FinishApplicationMasterRequest.newInstance(appStatus, appMessage, appTrackingUrl); try { while (true) { FinishApplicationMasterResponse response = rmClient.finishApplicationMaster(request); if (response.getIsUnregistered()) { break; } LOG.info("Waiting for application to be successfully unregistered."); Thread.sleep(100); } } catch (InterruptedException e) { LOG.info("Interrupted while waiting for application" + " to be removed from RMStateStore"); } catch (ApplicationMasterNotRegisteredException e) { LOG.warn("ApplicationMaster is out of sync with ResourceManager," + " hence resyncing."); // re register with RM registerApplicationMaster(); unregisterApplicationMaster(appStatus, appMessage, appTrackingUrl); } } @Override public synchronized void addContainerRequest(T req) { Preconditions.checkArgument(req != null, "Resource request can not be null."); Set<String> dedupedRacks = new HashSet<String>(); if (req.getRacks() != null) { dedupedRacks.addAll(req.getRacks()); if(req.getRacks().size() != dedupedRacks.size()) { Joiner joiner = Joiner.on(','); LOG.warn("ContainerRequest has duplicate racks: " + joiner.join(req.getRacks())); } } Set<String> inferredRacks = resolveRacks(req.getNodes()); inferredRacks.removeAll(dedupedRacks); // check that specific and non-specific requests cannot be mixed within a // priority checkLocalityRelaxationConflict(req.getPriority(), ANY_LIST, req.getRelaxLocality()); // check that specific rack cannot be mixed with specific node within a // priority. If node and its rack are both specified then they must be // in the same request. // For explicitly requested racks, we set locality relaxation to true checkLocalityRelaxationConflict(req.getPriority(), dedupedRacks, true); checkLocalityRelaxationConflict(req.getPriority(), inferredRacks, req.getRelaxLocality()); // check if the node label expression specified is valid checkNodeLabelExpression(req); if (req.getNodes() != null) { HashSet<String> dedupedNodes = new HashSet<String>(req.getNodes()); if(dedupedNodes.size() != req.getNodes().size()) { Joiner joiner = Joiner.on(','); LOG.warn("ContainerRequest has duplicate nodes: " + joiner.join(req.getNodes())); } for (String node : dedupedNodes) { addResourceRequest(req.getPriority(), node, req.getCapability(), req, true, req.getNodeLabelExpression()); } } for (String rack : dedupedRacks) { addResourceRequest(req.getPriority(), rack, req.getCapability(), req, true, req.getNodeLabelExpression()); } // Ensure node requests are accompanied by requests for // corresponding rack for (String rack : inferredRacks) { addResourceRequest(req.getPriority(), rack, req.getCapability(), req, req.getRelaxLocality(), req.getNodeLabelExpression()); } // Off-switch addResourceRequest(req.getPriority(), ResourceRequest.ANY, req.getCapability(), req, req.getRelaxLocality(), req.getNodeLabelExpression()); } @Override public synchronized void removeContainerRequest(T req) { Preconditions.checkArgument(req != null, "Resource request can not be null."); Set<String> allRacks = new HashSet<String>(); if (req.getRacks() != null) { allRacks.addAll(req.getRacks()); } allRacks.addAll(resolveRacks(req.getNodes())); // Update resource requests if (req.getNodes() != null) { for (String node : new HashSet<String>(req.getNodes())) { decResourceRequest(req.getPriority(), node, req.getCapability(), req); } } for (String rack : allRacks) { decResourceRequest(req.getPriority(), rack, req.getCapability(), req); } decResourceRequest(req.getPriority(), ResourceRequest.ANY, req.getCapability(), req); } @Override public synchronized void releaseAssignedContainer(ContainerId containerId) { Preconditions.checkArgument(containerId != null, "ContainerId can not be null."); pendingRelease.add(containerId); release.add(containerId); } @Override public synchronized Resource getAvailableResources() { return clusterAvailableResources; } @Override public synchronized int getClusterNodeCount() { return clusterNodeCount; } @Override public synchronized List<? extends Collection<T>> getMatchingRequests( Priority priority, String resourceName, Resource capability) { Preconditions.checkArgument(capability != null, "The Resource to be requested should not be null "); Preconditions.checkArgument(priority != null, "The priority at which to request containers should not be null "); List<LinkedHashSet<T>> list = new LinkedList<LinkedHashSet<T>>(); Map<String, TreeMap<Resource, ResourceRequestInfo>> remoteRequests = this.remoteRequestsTable.get(priority); if (remoteRequests == null) { return list; } TreeMap<Resource, ResourceRequestInfo> reqMap = remoteRequests .get(resourceName); if (reqMap == null) { return list; } ResourceRequestInfo resourceRequestInfo = reqMap.get(capability); if (resourceRequestInfo != null && !resourceRequestInfo.containerRequests.isEmpty()) { list.add(resourceRequestInfo.containerRequests); return list; } // no exact match. Container may be larger than what was requested. // get all resources <= capability. map is reverse sorted. SortedMap<Resource, ResourceRequestInfo> tailMap = reqMap.tailMap(capability); for(Map.Entry<Resource, ResourceRequestInfo> entry : tailMap.entrySet()) { if (canFit(entry.getKey(), capability) && !entry.getValue().containerRequests.isEmpty()) { // match found that fits in the larger resource list.add(entry.getValue().containerRequests); } } // no match found return list; } private Set<String> resolveRacks(List<String> nodes) { Set<String> racks = new HashSet<String>(); if (nodes != null) { for (String node : nodes) { // Ensure node requests are accompanied by requests for // corresponding rack String rack = RackResolver.resolve(node).getNetworkLocation(); if (rack == null) { LOG.warn("Failed to resolve rack for node " + node + "."); } else { racks.add(rack); } } } return racks; } /** * ContainerRequests with locality relaxation cannot be made at the same * priority as ContainerRequests without locality relaxation. */ private void checkLocalityRelaxationConflict(Priority priority, Collection<String> locations, boolean relaxLocality) { Map<String, TreeMap<Resource, ResourceRequestInfo>> remoteRequests = this.remoteRequestsTable.get(priority); if (remoteRequests == null) { return; } // Locality relaxation will be set to relaxLocality for all implicitly // requested racks. Make sure that existing rack requests match this. for (String location : locations) { TreeMap<Resource, ResourceRequestInfo> reqs = remoteRequests.get(location); if (reqs != null && !reqs.isEmpty()) { boolean existingRelaxLocality = reqs.values().iterator().next().remoteRequest.getRelaxLocality(); if (relaxLocality != existingRelaxLocality) { throw new InvalidContainerRequestException("Cannot submit a " + "ContainerRequest asking for location " + location + " with locality relaxation " + relaxLocality + " when it has " + "already been requested with locality relaxation " + existingRelaxLocality); } } } } /** * Valid if a node label expression specified on container request is valid or * not * * @param containerRequest */ private void checkNodeLabelExpression(T containerRequest) { String exp = containerRequest.getNodeLabelExpression(); if (null == exp || exp.isEmpty()) { return; } // Don't support specifying >= 2 node labels in a node label expression now if (exp.contains("&&") || exp.contains("||")) { throw new InvalidContainerRequestException( "Cannot specify more than two node labels" + " in a single node label expression"); } // Don't allow specify node label against ANY request if ((containerRequest.getRacks() != null && (!containerRequest.getRacks().isEmpty())) || (containerRequest.getNodes() != null && (!containerRequest.getNodes().isEmpty()))) { throw new InvalidContainerRequestException( "Cannot specify node label with rack and node"); } } private void addResourceRequestToAsk(ResourceRequest remoteRequest) { // This code looks weird but is needed because of the following scenario. // A ResourceRequest is removed from the remoteRequestTable. A 0 container // request is added to 'ask' to notify the RM about not needing it any more. // Before the call to allocate, the user now requests more containers. If // the locations of the 0 size request and the new request are the same // (with the difference being only container count), then the set comparator // will consider both to be the same and not add the new request to ask. So // we need to check for the "same" request being present and remove it and // then add it back. The comparator is container count agnostic. // This should happen only rarely but we do need to guard against it. if(ask.contains(remoteRequest)) { ask.remove(remoteRequest); } ask.add(remoteRequest); } private void addResourceRequest(Priority priority, String resourceName, Resource capability, T req, boolean relaxLocality, String labelExpression) { Map<String, TreeMap<Resource, ResourceRequestInfo>> remoteRequests = this.remoteRequestsTable.get(priority); if (remoteRequests == null) { remoteRequests = new HashMap<String, TreeMap<Resource, ResourceRequestInfo>>(); this.remoteRequestsTable.put(priority, remoteRequests); if (LOG.isDebugEnabled()) { LOG.debug("Added priority=" + priority); } } TreeMap<Resource, ResourceRequestInfo> reqMap = remoteRequests.get(resourceName); if (reqMap == null) { // capabilities are stored in reverse sorted order. smallest last. reqMap = new TreeMap<Resource, ResourceRequestInfo>( new ResourceReverseMemoryThenCpuComparator()); remoteRequests.put(resourceName, reqMap); } ResourceRequestInfo resourceRequestInfo = reqMap.get(capability); if (resourceRequestInfo == null) { resourceRequestInfo = new ResourceRequestInfo(priority, resourceName, capability, relaxLocality); reqMap.put(capability, resourceRequestInfo); } resourceRequestInfo.remoteRequest.setNumContainers( resourceRequestInfo.remoteRequest.getNumContainers() + 1); if (relaxLocality) { resourceRequestInfo.containerRequests.add(req); } if (ResourceRequest.ANY.equals(resourceName)) { resourceRequestInfo.remoteRequest.setNodeLabelExpression(labelExpression); } // Note this down for next interaction with ResourceManager addResourceRequestToAsk(resourceRequestInfo.remoteRequest); if (LOG.isDebugEnabled()) { LOG.debug("addResourceRequest:" + " applicationId=" + " priority=" + priority.getPriority() + " resourceName=" + resourceName + " numContainers=" + resourceRequestInfo.remoteRequest.getNumContainers() + " #asks=" + ask.size()); } } private void decResourceRequest(Priority priority, String resourceName, Resource capability, T req) { Map<String, TreeMap<Resource, ResourceRequestInfo>> remoteRequests = this.remoteRequestsTable.get(priority); if(remoteRequests == null) { if (LOG.isDebugEnabled()) { LOG.debug("Not decrementing resource as priority " + priority + " is not present in request table"); } return; } Map<Resource, ResourceRequestInfo> reqMap = remoteRequests.get(resourceName); if (reqMap == null) { if (LOG.isDebugEnabled()) { LOG.debug("Not decrementing resource as " + resourceName + " is not present in request table"); } return; } ResourceRequestInfo resourceRequestInfo = reqMap.get(capability); if (LOG.isDebugEnabled()) { LOG.debug("BEFORE decResourceRequest:" + " applicationId=" + " priority=" + priority.getPriority() + " resourceName=" + resourceName + " numContainers=" + resourceRequestInfo.remoteRequest.getNumContainers() + " #asks=" + ask.size()); } resourceRequestInfo.remoteRequest.setNumContainers( resourceRequestInfo.remoteRequest.getNumContainers() - 1); resourceRequestInfo.containerRequests.remove(req); if(resourceRequestInfo.remoteRequest.getNumContainers() < 0) { // guard against spurious removals resourceRequestInfo.remoteRequest.setNumContainers(0); } // send the ResourceRequest to RM even if is 0 because it needs to override // a previously sent value. If ResourceRequest was not sent previously then // sending 0 aught to be a no-op on RM addResourceRequestToAsk(resourceRequestInfo.remoteRequest); // delete entries from map if no longer needed if (resourceRequestInfo.remoteRequest.getNumContainers() == 0) { reqMap.remove(capability); if (reqMap.size() == 0) { remoteRequests.remove(resourceName); } if (remoteRequests.size() == 0) { remoteRequestsTable.remove(priority); } } if (LOG.isDebugEnabled()) { LOG.info("AFTER decResourceRequest:" + " applicationId=" + " priority=" + priority.getPriority() + " resourceName=" + resourceName + " numContainers=" + resourceRequestInfo.remoteRequest.getNumContainers() + " #asks=" + ask.size()); } } @Override public synchronized void updateBlacklist(List<String> blacklistAdditions, List<String> blacklistRemovals) { if (blacklistAdditions != null) { this.blacklistAdditions.addAll(blacklistAdditions); this.blacklistedNodes.addAll(blacklistAdditions); // if some resources are also in blacklistRemovals updated before, we // should remove them here. this.blacklistRemovals.removeAll(blacklistAdditions); } if (blacklistRemovals != null) { this.blacklistRemovals.addAll(blacklistRemovals); this.blacklistedNodes.removeAll(blacklistRemovals); // if some resources are in blacklistAdditions before, we should remove // them here. this.blacklistAdditions.removeAll(blacklistRemovals); } if (blacklistAdditions != null && blacklistRemovals != null && blacklistAdditions.removeAll(blacklistRemovals)) { // we allow resources to appear in addition list and removal list in the // same invocation of updateBlacklist(), but should get a warn here. LOG.warn("The same resources appear in both blacklistAdditions and " + "blacklistRemovals in updateBlacklist."); } } private void updateAMRMToken(Token token) throws IOException { org.apache.hadoop.security.token.Token<AMRMTokenIdentifier> amrmToken = new org.apache.hadoop.security.token.Token<AMRMTokenIdentifier>(token .getIdentifier().array(), token.getPassword().array(), new Text( token.getKind()), new Text(token.getService())); // Preserve the token service sent by the RM when adding the token // to ensure we replace the previous token setup by the RM. // Afterwards we can update the service address for the RPC layer. UserGroupInformation currentUGI = UserGroupInformation.getCurrentUser(); currentUGI.addToken(amrmToken); amrmToken.setService(ClientRMProxy.getAMRMTokenService(getConfig())); } }
31,113
37.843945
94
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/package-info.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ @InterfaceAudience.Public package org.apache.hadoop.yarn.client.api.impl; import org.apache.hadoop.classification.InterfaceAudience;
940
41.772727
75
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/ContainerManagementProtocolProxy.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.client.api.impl; import java.net.InetSocketAddress; import java.util.ArrayList; import java.util.Collections; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.SecretManager.InvalidToken; import org.apache.hadoop.yarn.api.ContainerManagementProtocol; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.Token; import org.apache.hadoop.yarn.client.NMProxy; import org.apache.hadoop.yarn.client.api.NMTokenCache; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.ipc.YarnRPC; import org.apache.hadoop.yarn.security.NMTokenIdentifier; import org.apache.hadoop.yarn.util.ConverterUtils; import com.google.common.annotations.VisibleForTesting; /** * Helper class to manage container manager proxies */ @LimitedPrivate({ "MapReduce", "YARN" }) public class ContainerManagementProtocolProxy { static final Log LOG = LogFactory.getLog(ContainerManagementProtocolProxy.class); private final int maxConnectedNMs; private final Map<String, ContainerManagementProtocolProxyData> cmProxy; private final Configuration conf; private final YarnRPC rpc; private NMTokenCache nmTokenCache; public ContainerManagementProtocolProxy(Configuration conf) { this(conf, NMTokenCache.getSingleton()); } public ContainerManagementProtocolProxy(Configuration conf, NMTokenCache nmTokenCache) { this.conf = new Configuration(conf); this.nmTokenCache = nmTokenCache; maxConnectedNMs = conf.getInt(YarnConfiguration.NM_CLIENT_MAX_NM_PROXIES, YarnConfiguration.DEFAULT_NM_CLIENT_MAX_NM_PROXIES); if (maxConnectedNMs < 0) { throw new YarnRuntimeException( YarnConfiguration.NM_CLIENT_MAX_NM_PROXIES + " (" + maxConnectedNMs + ") can not be less than 0."); } LOG.info(YarnConfiguration.NM_CLIENT_MAX_NM_PROXIES + " : " + maxConnectedNMs); if (maxConnectedNMs > 0) { cmProxy = new LinkedHashMap<String, ContainerManagementProtocolProxyData>(); } else { cmProxy = Collections.emptyMap(); // Connections are not being cached so ensure connections close quickly // to avoid creating thousands of RPC client threads on large clusters. this.conf.setInt( CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, 0); } rpc = YarnRPC.create(conf); } public synchronized ContainerManagementProtocolProxyData getProxy( String containerManagerBindAddr, ContainerId containerId) throws InvalidToken { // This get call will update the map which is working as LRU cache. ContainerManagementProtocolProxyData proxy = cmProxy.get(containerManagerBindAddr); while (proxy != null && !proxy.token.getIdentifier().equals( nmTokenCache.getToken(containerManagerBindAddr).getIdentifier())) { LOG.info("Refreshing proxy as NMToken got updated for node : " + containerManagerBindAddr); // Token is updated. check if anyone has already tried closing it. if (!proxy.scheduledForClose) { // try closing the proxy. Here if someone is already using it // then we might not close it. In which case we will wait. removeProxy(proxy); } else { try { this.wait(); } catch (InterruptedException e) { e.printStackTrace(); } } if (proxy.activeCallers < 0) { proxy = cmProxy.get(containerManagerBindAddr); } } if (proxy == null) { proxy = new ContainerManagementProtocolProxyData(rpc, containerManagerBindAddr, containerId, nmTokenCache.getToken(containerManagerBindAddr)); if (maxConnectedNMs > 0) { addProxyToCache(containerManagerBindAddr, proxy); } } // This is to track active users of this proxy. proxy.activeCallers++; updateLRUCache(containerManagerBindAddr); return proxy; } private void addProxyToCache(String containerManagerBindAddr, ContainerManagementProtocolProxyData proxy) { while (cmProxy.size() >= maxConnectedNMs) { if (LOG.isDebugEnabled()) { LOG.debug("Cleaning up the proxy cache, size=" + cmProxy.size() + " max=" + maxConnectedNMs); } boolean removedProxy = false; for (ContainerManagementProtocolProxyData otherProxy : cmProxy.values()) { removedProxy = removeProxy(otherProxy); if (removedProxy) { break; } } if (!removedProxy) { // all of the proxies are currently in use and already scheduled // for removal, so we need to wait until at least one of them closes try { this.wait(); } catch (InterruptedException e) { e.printStackTrace(); } } } if (maxConnectedNMs > 0) { cmProxy.put(containerManagerBindAddr, proxy); } } private void updateLRUCache(String containerManagerBindAddr) { if (maxConnectedNMs > 0) { ContainerManagementProtocolProxyData proxy = cmProxy.remove(containerManagerBindAddr); cmProxy.put(containerManagerBindAddr, proxy); } } public synchronized void mayBeCloseProxy( ContainerManagementProtocolProxyData proxy) { tryCloseProxy(proxy); } private boolean tryCloseProxy( ContainerManagementProtocolProxyData proxy) { proxy.activeCallers--; if (proxy.scheduledForClose && proxy.activeCallers < 0) { LOG.info("Closing proxy : " + proxy.containerManagerBindAddr); cmProxy.remove(proxy.containerManagerBindAddr); try { rpc.stopProxy(proxy.getContainerManagementProtocol(), conf); } finally { this.notifyAll(); } return true; } return false; } private synchronized boolean removeProxy( ContainerManagementProtocolProxyData proxy) { if (!proxy.scheduledForClose) { proxy.scheduledForClose = true; return tryCloseProxy(proxy); } return false; } public synchronized void stopAllProxies() { List<String> nodeIds = new ArrayList<String>(); nodeIds.addAll(this.cmProxy.keySet()); for (String nodeId : nodeIds) { ContainerManagementProtocolProxyData proxy = cmProxy.get(nodeId); // Explicitly reducing the proxy count to allow stopping proxy. proxy.activeCallers = 0; try { removeProxy(proxy); } catch (Throwable t) { LOG.error("Error closing connection", t); } } cmProxy.clear(); } public class ContainerManagementProtocolProxyData { private final String containerManagerBindAddr; private final ContainerManagementProtocol proxy; private int activeCallers; private boolean scheduledForClose; private final Token token; @Private @VisibleForTesting public ContainerManagementProtocolProxyData(YarnRPC rpc, String containerManagerBindAddr, ContainerId containerId, Token token) throws InvalidToken { this.containerManagerBindAddr = containerManagerBindAddr; ; this.activeCallers = 0; this.scheduledForClose = false; this.token = token; this.proxy = newProxy(rpc, containerManagerBindAddr, containerId, token); } @Private @VisibleForTesting protected ContainerManagementProtocol newProxy(final YarnRPC rpc, String containerManagerBindAddr, ContainerId containerId, Token token) throws InvalidToken { if (token == null) { throw new InvalidToken("No NMToken sent for " + containerManagerBindAddr); } final InetSocketAddress cmAddr = NetUtils.createSocketAddr(containerManagerBindAddr); LOG.info("Opening proxy : " + containerManagerBindAddr); // the user in createRemoteUser in this context has to be ContainerID UserGroupInformation user = UserGroupInformation.createRemoteUser(containerId .getApplicationAttemptId().toString()); org.apache.hadoop.security.token.Token<NMTokenIdentifier> nmToken = ConverterUtils.convertFromYarn(token, cmAddr); user.addToken(nmToken); return NMProxy.createNMProxy(conf, ContainerManagementProtocol.class, user, rpc, cmAddr); } public ContainerManagementProtocol getContainerManagementProtocol() { return proxy; } } }
9,784
33.946429
83
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/NMClientImpl.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.client.api.impl; import java.io.IOException; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.atomic.AtomicBoolean; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.token.SecretManager.InvalidToken; import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesResponse; import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest; import org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest; import org.apache.hadoop.yarn.api.protocolrecords.StartContainersResponse; import org.apache.hadoop.yarn.api.protocolrecords.StopContainersRequest; import org.apache.hadoop.yarn.api.protocolrecords.StopContainersResponse; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; import org.apache.hadoop.yarn.api.records.ContainerState; import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.Token; import org.apache.hadoop.yarn.client.api.NMClient; import org.apache.hadoop.yarn.client.api.impl.ContainerManagementProtocolProxy.ContainerManagementProtocolProxyData; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.ipc.RPCUtil; /** * <p> * This class implements {@link NMClient}. All the APIs are blocking. * </p> * * <p> * By default, this client stops all the running containers that are started by * it when it stops. It can be disabled via * {@link #cleanupRunningContainersOnStop}, in which case containers will * continue to run even after this client is stopped and till the application * runs at which point ResourceManager will forcefully kill them. * </p> * * <p> * Note that the blocking APIs ensure the RPC calls to <code>NodeManager</code> * are executed immediately, and the responses are received before these APIs * return. However, when {@link #startContainer} or {@link #stopContainer} * returns, <code>NodeManager</code> may still need some time to either start * or stop the container because of its asynchronous implementation. Therefore, * {@link #getContainerStatus} is likely to return a transit container status * if it is executed immediately after {@link #startContainer} or * {@link #stopContainer}. * </p> */ @Private @Unstable public class NMClientImpl extends NMClient { private static final Log LOG = LogFactory.getLog(NMClientImpl.class); // The logically coherent operations on startedContainers is synchronized to // ensure they are atomic protected ConcurrentMap<ContainerId, StartedContainer> startedContainers = new ConcurrentHashMap<ContainerId, StartedContainer>(); //enabled by default private final AtomicBoolean cleanupRunningContainers = new AtomicBoolean(true); private ContainerManagementProtocolProxy cmProxy; public NMClientImpl() { super(NMClientImpl.class.getName()); } public NMClientImpl(String name) { super(name); } @Override protected void serviceStop() throws Exception { // Usually, started-containers are stopped when this client stops. Unless // the flag cleanupRunningContainers is set to false. if (getCleanupRunningContainers().get()) { cleanupRunningContainers(); } cmProxy.stopAllProxies(); super.serviceStop(); } protected synchronized void cleanupRunningContainers() { for (StartedContainer startedContainer : startedContainers.values()) { try { stopContainer(startedContainer.getContainerId(), startedContainer.getNodeId()); } catch (YarnException e) { LOG.error("Failed to stop Container " + startedContainer.getContainerId() + "when stopping NMClientImpl"); } catch (IOException e) { LOG.error("Failed to stop Container " + startedContainer.getContainerId() + "when stopping NMClientImpl"); } } } @Override protected void serviceInit(Configuration conf) throws Exception { super.serviceInit(conf); if (getNMTokenCache() == null) { throw new IllegalStateException("NMTokenCache has not been set"); } cmProxy = new ContainerManagementProtocolProxy(conf, getNMTokenCache()); } @Override public void cleanupRunningContainersOnStop(boolean enabled) { getCleanupRunningContainers().set(enabled); } protected static class StartedContainer { private ContainerId containerId; private NodeId nodeId; private ContainerState state; public StartedContainer(ContainerId containerId, NodeId nodeId, Token containerToken) { this.containerId = containerId; this.nodeId = nodeId; state = ContainerState.NEW; } public ContainerId getContainerId() { return containerId; } public NodeId getNodeId() { return nodeId; } } private void addStartingContainer(StartedContainer startedContainer) throws YarnException { if (startedContainers.putIfAbsent(startedContainer.containerId, startedContainer) != null) { throw RPCUtil.getRemoteException("Container " + startedContainer.containerId.toString() + " is already started"); } startedContainers .put(startedContainer.getContainerId(), startedContainer); } @Override public Map<String, ByteBuffer> startContainer( Container container, ContainerLaunchContext containerLaunchContext) throws YarnException, IOException { // Do synchronization on StartedContainer to prevent race condition // between startContainer and stopContainer only when startContainer is // in progress for a given container. StartedContainer startingContainer = createStartedContainer(container); synchronized (startingContainer) { addStartingContainer(startingContainer); Map<String, ByteBuffer> allServiceResponse; ContainerManagementProtocolProxyData proxy = null; try { proxy = cmProxy.getProxy(container.getNodeId().toString(), container.getId()); StartContainerRequest scRequest = StartContainerRequest.newInstance(containerLaunchContext, container.getContainerToken()); List<StartContainerRequest> list = new ArrayList<StartContainerRequest>(); list.add(scRequest); StartContainersRequest allRequests = StartContainersRequest.newInstance(list); StartContainersResponse response = proxy .getContainerManagementProtocol().startContainers(allRequests); if (response.getFailedRequests() != null && response.getFailedRequests().containsKey(container.getId())) { Throwable t = response.getFailedRequests().get(container.getId()).deSerialize(); parseAndThrowException(t); } allServiceResponse = response.getAllServicesMetaData(); startingContainer.state = ContainerState.RUNNING; } catch (YarnException e) { startingContainer.state = ContainerState.COMPLETE; // Remove the started container if it failed to start removeStartedContainer(startingContainer); throw e; } catch (IOException e) { startingContainer.state = ContainerState.COMPLETE; removeStartedContainer(startingContainer); throw e; } catch (Throwable t) { startingContainer.state = ContainerState.COMPLETE; removeStartedContainer(startingContainer); throw RPCUtil.getRemoteException(t); } finally { if (proxy != null) { cmProxy.mayBeCloseProxy(proxy); } } return allServiceResponse; } } @Override public void stopContainer(ContainerId containerId, NodeId nodeId) throws YarnException, IOException { StartedContainer startedContainer = getStartedContainer(containerId); // Only allow one request of stopping the container to move forward // When entering the block, check whether the precursor has already stopped // the container if (startedContainer != null) { synchronized (startedContainer) { if (startedContainer.state != ContainerState.RUNNING) { return; } stopContainerInternal(containerId, nodeId); // Only after successful startedContainer.state = ContainerState.COMPLETE; removeStartedContainer(startedContainer); } } else { stopContainerInternal(containerId, nodeId); } } @Override public ContainerStatus getContainerStatus(ContainerId containerId, NodeId nodeId) throws YarnException, IOException { ContainerManagementProtocolProxyData proxy = null; List<ContainerId> containerIds = new ArrayList<ContainerId>(); containerIds.add(containerId); try { proxy = cmProxy.getProxy(nodeId.toString(), containerId); GetContainerStatusesResponse response = proxy.getContainerManagementProtocol().getContainerStatuses( GetContainerStatusesRequest.newInstance(containerIds)); if (response.getFailedRequests() != null && response.getFailedRequests().containsKey(containerId)) { Throwable t = response.getFailedRequests().get(containerId).deSerialize(); parseAndThrowException(t); } ContainerStatus containerStatus = response.getContainerStatuses().get(0); return containerStatus; } finally { if (proxy != null) { cmProxy.mayBeCloseProxy(proxy); } } } private void stopContainerInternal(ContainerId containerId, NodeId nodeId) throws IOException, YarnException { ContainerManagementProtocolProxyData proxy = null; List<ContainerId> containerIds = new ArrayList<ContainerId>(); containerIds.add(containerId); try { proxy = cmProxy.getProxy(nodeId.toString(), containerId); StopContainersResponse response = proxy.getContainerManagementProtocol().stopContainers( StopContainersRequest.newInstance(containerIds)); if (response.getFailedRequests() != null && response.getFailedRequests().containsKey(containerId)) { Throwable t = response.getFailedRequests().get(containerId) .deSerialize(); parseAndThrowException(t); } } finally { if (proxy != null) { cmProxy.mayBeCloseProxy(proxy); } } } protected synchronized StartedContainer createStartedContainer( Container container) throws YarnException, IOException { StartedContainer startedContainer = new StartedContainer(container.getId(), container.getNodeId(), container.getContainerToken()); return startedContainer; } protected synchronized void removeStartedContainer(StartedContainer container) { startedContainers.remove(container.containerId); } protected synchronized StartedContainer getStartedContainer( ContainerId containerId) { return startedContainers.get(containerId); } public AtomicBoolean getCleanupRunningContainers() { return cleanupRunningContainers; } private void parseAndThrowException(Throwable t) throws YarnException, IOException { if (t instanceof YarnException) { throw (YarnException) t; } else if (t instanceof InvalidToken) { throw (InvalidToken) t; } else { throw (IOException) t; } } }
12,784
36.602941
116
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/SharedCacheClientImpl.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.client.api.impl; import java.io.IOException; import java.net.InetSocketAddress; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.yarn.api.ClientSCMProtocol; import org.apache.hadoop.yarn.api.protocolrecords.ReleaseSharedCacheResourceRequest; import org.apache.hadoop.yarn.api.protocolrecords.UseSharedCacheResourceRequest; import org.apache.hadoop.yarn.api.protocolrecords.UseSharedCacheResourceResponse; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.client.api.SharedCacheClient; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.ipc.YarnRPC; import org.apache.hadoop.yarn.sharedcache.SharedCacheChecksum; import org.apache.hadoop.yarn.sharedcache.SharedCacheChecksumFactory; import org.apache.hadoop.yarn.util.Records; import com.google.common.annotations.VisibleForTesting; /** * An implementation of the SharedCacheClient API. */ @Private @Unstable public class SharedCacheClientImpl extends SharedCacheClient { private static final Log LOG = LogFactory .getLog(SharedCacheClientImpl.class); private ClientSCMProtocol scmClient; private InetSocketAddress scmAddress; private Configuration conf; private SharedCacheChecksum checksum; public SharedCacheClientImpl() { super(SharedCacheClientImpl.class.getName()); } private static InetSocketAddress getScmAddress(Configuration conf) { return conf.getSocketAddr(YarnConfiguration.SCM_CLIENT_SERVER_ADDRESS, YarnConfiguration.DEFAULT_SCM_CLIENT_SERVER_ADDRESS, YarnConfiguration.DEFAULT_SCM_CLIENT_SERVER_PORT); } @Override protected void serviceInit(Configuration conf) throws Exception { if (this.scmAddress == null) { this.scmAddress = getScmAddress(conf); } this.conf = conf; this.checksum = SharedCacheChecksumFactory.getChecksum(conf); super.serviceInit(conf); } @Override protected void serviceStart() throws Exception { this.scmClient = createClientProxy(); if (LOG.isDebugEnabled()) { LOG.debug("Connecting to Shared Cache Manager at " + this.scmAddress); } super.serviceStart(); } @Override protected void serviceStop() throws Exception { stopClientProxy(); super.serviceStop(); } @VisibleForTesting protected ClientSCMProtocol createClientProxy() { YarnRPC rpc = YarnRPC.create(getConfig()); return (ClientSCMProtocol) rpc.getProxy(ClientSCMProtocol.class, this.scmAddress, getConfig()); } @VisibleForTesting protected void stopClientProxy() { if (this.scmClient != null) { RPC.stopProxy(this.scmClient); this.scmClient = null; } } @Override public Path use(ApplicationId applicationId, String resourceKey) throws YarnException { Path resourcePath = null; UseSharedCacheResourceRequest request = Records.newRecord( UseSharedCacheResourceRequest.class); request.setAppId(applicationId); request.setResourceKey(resourceKey); try { UseSharedCacheResourceResponse response = this.scmClient.use(request); if (response != null && response.getPath() != null) { resourcePath = new Path(response.getPath()); } } catch (Exception e) { // Just catching IOException isn't enough. // RPC call can throw ConnectionException. // We don't handle different exceptions separately at this point. throw new YarnException(e); } return resourcePath; } @Override public void release(ApplicationId applicationId, String resourceKey) throws YarnException { ReleaseSharedCacheResourceRequest request = Records.newRecord( ReleaseSharedCacheResourceRequest.class); request.setAppId(applicationId); request.setResourceKey(resourceKey); try { // We do not care about the response because it is empty. this.scmClient.release(request); } catch (Exception e) { // Just catching IOException isn't enough. // RPC call can throw ConnectionException. throw new YarnException(e); } } @Override public String getFileChecksum(Path sourceFile) throws IOException { FileSystem fs = sourceFile.getFileSystem(this.conf); FSDataInputStream in = null; try { in = fs.open(sourceFile); return this.checksum.computeChecksum(in); } finally { if (in != null) { in.close(); } } } }
5,686
33.053892
84
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.client.api.impl; import java.io.IOException; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.EnumSet; import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.DataInputByteBuffer; import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.io.Text; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.yarn.api.ApplicationClientProtocol; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetContainersRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetContainersResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetLabelsToNodesRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetNodesToLabelsRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest; import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest; import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationResponse; import org.apache.hadoop.yarn.api.protocolrecords.MoveApplicationAcrossQueuesRequest; import org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteRequest; import org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteResponse; import org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionRequest; import org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionResponse; import org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateRequest; import org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateResponse; import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationReport; import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; import org.apache.hadoop.yarn.api.records.ContainerReport; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.NodeLabel; import org.apache.hadoop.yarn.api.records.NodeReport; import org.apache.hadoop.yarn.api.records.NodeState; import org.apache.hadoop.yarn.api.records.QueueInfo; import org.apache.hadoop.yarn.api.records.QueueUserACLInfo; import org.apache.hadoop.yarn.api.records.Token; import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.api.records.YarnClusterMetrics; import org.apache.hadoop.yarn.client.ClientRMProxy; import org.apache.hadoop.yarn.client.api.AHSClient; import org.apache.hadoop.yarn.client.api.TimelineClient; import org.apache.hadoop.yarn.client.api.YarnClient; import org.apache.hadoop.yarn.client.api.YarnClientApplication; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.ApplicationIdNotProvidedException; import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException; import org.apache.hadoop.yarn.exceptions.ContainerNotFoundException; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.security.AMRMTokenIdentifier; import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier; import org.apache.hadoop.yarn.util.ConverterUtils; import org.apache.hadoop.yarn.util.Records; import org.apache.hadoop.yarn.util.timeline.TimelineUtils; import com.google.common.annotations.VisibleForTesting; @Private @Unstable public class YarnClientImpl extends YarnClient { private static final Log LOG = LogFactory.getLog(YarnClientImpl.class); protected ApplicationClientProtocol rmClient; protected long submitPollIntervalMillis; private long asyncApiPollIntervalMillis; private long asyncApiPollTimeoutMillis; protected AHSClient historyClient; private boolean historyServiceEnabled; protected TimelineClient timelineClient; @VisibleForTesting Text timelineService; @VisibleForTesting String timelineDTRenewer; protected boolean timelineServiceEnabled; protected boolean timelineServiceBestEffort; private static final String ROOT = "root"; public YarnClientImpl() { super(YarnClientImpl.class.getName()); } @SuppressWarnings("deprecation") @Override protected void serviceInit(Configuration conf) throws Exception { asyncApiPollIntervalMillis = conf.getLong(YarnConfiguration.YARN_CLIENT_APPLICATION_CLIENT_PROTOCOL_POLL_INTERVAL_MS, YarnConfiguration.DEFAULT_YARN_CLIENT_APPLICATION_CLIENT_PROTOCOL_POLL_INTERVAL_MS); asyncApiPollTimeoutMillis = conf.getLong(YarnConfiguration.YARN_CLIENT_APPLICATION_CLIENT_PROTOCOL_POLL_TIMEOUT_MS, YarnConfiguration.DEFAULT_YARN_CLIENT_APPLICATION_CLIENT_PROTOCOL_POLL_TIMEOUT_MS); submitPollIntervalMillis = asyncApiPollIntervalMillis; if (conf.get(YarnConfiguration.YARN_CLIENT_APP_SUBMISSION_POLL_INTERVAL_MS) != null) { submitPollIntervalMillis = conf.getLong( YarnConfiguration.YARN_CLIENT_APP_SUBMISSION_POLL_INTERVAL_MS, YarnConfiguration.DEFAULT_YARN_CLIENT_APPLICATION_CLIENT_PROTOCOL_POLL_INTERVAL_MS); } if (conf.getBoolean(YarnConfiguration.APPLICATION_HISTORY_ENABLED, YarnConfiguration.DEFAULT_APPLICATION_HISTORY_ENABLED)) { historyServiceEnabled = true; historyClient = AHSClient.createAHSClient(); historyClient.init(conf); } if (conf.getBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, YarnConfiguration.DEFAULT_TIMELINE_SERVICE_ENABLED)) { timelineServiceEnabled = true; timelineClient = createTimelineClient(); timelineClient.init(conf); timelineDTRenewer = getTimelineDelegationTokenRenewer(conf); timelineService = TimelineUtils.buildTimelineTokenService(conf); } timelineServiceBestEffort = conf.getBoolean( YarnConfiguration.TIMELINE_SERVICE_CLIENT_BEST_EFFORT, YarnConfiguration.DEFAULT_TIMELINE_SERVICE_CLIENT_BEST_EFFORT); super.serviceInit(conf); } TimelineClient createTimelineClient() throws IOException, YarnException { return TimelineClient.createTimelineClient(); } @Override protected void serviceStart() throws Exception { try { rmClient = ClientRMProxy.createRMProxy(getConfig(), ApplicationClientProtocol.class); if (historyServiceEnabled) { historyClient.start(); } if (timelineServiceEnabled) { timelineClient.start(); } } catch (IOException e) { throw new YarnRuntimeException(e); } super.serviceStart(); } @Override protected void serviceStop() throws Exception { if (this.rmClient != null) { RPC.stopProxy(this.rmClient); } if (historyServiceEnabled) { historyClient.stop(); } if (timelineServiceEnabled) { timelineClient.stop(); } super.serviceStop(); } private GetNewApplicationResponse getNewApplication() throws YarnException, IOException { GetNewApplicationRequest request = Records.newRecord(GetNewApplicationRequest.class); return rmClient.getNewApplication(request); } @Override public YarnClientApplication createApplication() throws YarnException, IOException { ApplicationSubmissionContext context = Records.newRecord (ApplicationSubmissionContext.class); GetNewApplicationResponse newApp = getNewApplication(); ApplicationId appId = newApp.getApplicationId(); context.setApplicationId(appId); return new YarnClientApplication(newApp, context); } @Override public ApplicationId submitApplication(ApplicationSubmissionContext appContext) throws YarnException, IOException { ApplicationId applicationId = appContext.getApplicationId(); if (applicationId == null) { throw new ApplicationIdNotProvidedException( "ApplicationId is not provided in ApplicationSubmissionContext"); } SubmitApplicationRequest request = Records.newRecord(SubmitApplicationRequest.class); request.setApplicationSubmissionContext(appContext); // Automatically add the timeline DT into the CLC // Only when the security and the timeline service are both enabled if (isSecurityEnabled() && timelineServiceEnabled) { addTimelineDelegationToken(appContext.getAMContainerSpec()); } //TODO: YARN-1763:Handle RM failovers during the submitApplication call. rmClient.submitApplication(request); int pollCount = 0; long startTime = System.currentTimeMillis(); EnumSet<YarnApplicationState> waitingStates = EnumSet.of(YarnApplicationState.NEW, YarnApplicationState.NEW_SAVING, YarnApplicationState.SUBMITTED); EnumSet<YarnApplicationState> failToSubmitStates = EnumSet.of(YarnApplicationState.FAILED, YarnApplicationState.KILLED); while (true) { try { ApplicationReport appReport = getApplicationReport(applicationId); YarnApplicationState state = appReport.getYarnApplicationState(); if (!waitingStates.contains(state)) { if(failToSubmitStates.contains(state)) { throw new YarnException("Failed to submit " + applicationId + " to YARN : " + appReport.getDiagnostics()); } LOG.info("Submitted application " + applicationId); break; } long elapsedMillis = System.currentTimeMillis() - startTime; if (enforceAsyncAPITimeout() && elapsedMillis >= asyncApiPollTimeoutMillis) { throw new YarnException("Timed out while waiting for application " + applicationId + " to be submitted successfully"); } // Notify the client through the log every 10 poll, in case the client // is blocked here too long. if (++pollCount % 10 == 0) { LOG.info("Application submission is not finished, " + "submitted application " + applicationId + " is still in " + state); } try { Thread.sleep(submitPollIntervalMillis); } catch (InterruptedException ie) { LOG.error("Interrupted while waiting for application " + applicationId + " to be successfully submitted."); } } catch (ApplicationNotFoundException ex) { // FailOver or RM restart happens before RMStateStore saves // ApplicationState LOG.info("Re-submit application " + applicationId + "with the " + "same ApplicationSubmissionContext"); rmClient.submitApplication(request); } } return applicationId; } private void addTimelineDelegationToken( ContainerLaunchContext clc) throws YarnException, IOException { Credentials credentials = new Credentials(); DataInputByteBuffer dibb = new DataInputByteBuffer(); ByteBuffer tokens = clc.getTokens(); if (tokens != null) { dibb.reset(tokens); credentials.readTokenStorageStream(dibb); tokens.rewind(); } // If the timeline delegation token is already in the CLC, no need to add // one more for (org.apache.hadoop.security.token.Token<? extends TokenIdentifier> token : credentials .getAllTokens()) { if (token.getKind().equals(TimelineDelegationTokenIdentifier.KIND_NAME)) { return; } } org.apache.hadoop.security.token.Token<TimelineDelegationTokenIdentifier> timelineDelegationToken = getTimelineDelegationToken(); if (timelineDelegationToken == null) { return; } credentials.addToken(timelineService, timelineDelegationToken); if (LOG.isDebugEnabled()) { LOG.debug("Add timline delegation token into credentials: " + timelineDelegationToken); } DataOutputBuffer dob = new DataOutputBuffer(); credentials.writeTokenStorageToStream(dob); tokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength()); clc.setTokens(tokens); } @VisibleForTesting org.apache.hadoop.security.token.Token<TimelineDelegationTokenIdentifier> getTimelineDelegationToken() throws IOException, YarnException { try { return timelineClient.getDelegationToken(timelineDTRenewer); } catch (Exception e ) { if (timelineServiceBestEffort) { LOG.warn("Failed to get delegation token from the timeline server: " + e.getMessage()); return null; } throw e; } } private static String getTimelineDelegationTokenRenewer(Configuration conf) throws IOException, YarnException { // Parse the RM daemon user if it exists in the config String rmPrincipal = conf.get(YarnConfiguration.RM_PRINCIPAL); String renewer = null; if (rmPrincipal != null && rmPrincipal.length() > 0) { String rmHost = conf.getSocketAddr( YarnConfiguration.RM_ADDRESS, YarnConfiguration.DEFAULT_RM_ADDRESS, YarnConfiguration.DEFAULT_RM_PORT).getHostName(); renewer = SecurityUtil.getServerPrincipal(rmPrincipal, rmHost); } return renewer; } @Private @VisibleForTesting protected boolean isSecurityEnabled() { return UserGroupInformation.isSecurityEnabled(); } @Override public void killApplication(ApplicationId applicationId) throws YarnException, IOException { KillApplicationRequest request = Records.newRecord(KillApplicationRequest.class); request.setApplicationId(applicationId); try { int pollCount = 0; long startTime = System.currentTimeMillis(); while (true) { KillApplicationResponse response = rmClient.forceKillApplication(request); if (response.getIsKillCompleted()) { LOG.info("Killed application " + applicationId); break; } long elapsedMillis = System.currentTimeMillis() - startTime; if (enforceAsyncAPITimeout() && elapsedMillis >= this.asyncApiPollTimeoutMillis) { throw new YarnException("Timed out while waiting for application " + applicationId + " to be killed."); } if (++pollCount % 10 == 0) { LOG.info("Waiting for application " + applicationId + " to be killed."); } Thread.sleep(asyncApiPollIntervalMillis); } } catch (InterruptedException e) { LOG.error("Interrupted while waiting for application " + applicationId + " to be killed."); } } @VisibleForTesting boolean enforceAsyncAPITimeout() { return asyncApiPollTimeoutMillis >= 0; } @Override public ApplicationReport getApplicationReport(ApplicationId appId) throws YarnException, IOException { GetApplicationReportResponse response = null; try { GetApplicationReportRequest request = Records .newRecord(GetApplicationReportRequest.class); request.setApplicationId(appId); response = rmClient.getApplicationReport(request); } catch (YarnException e) { if (!historyServiceEnabled) { // Just throw it as usual if historyService is not enabled. throw e; } // Even if history-service is enabled, treat all exceptions still the same // except the following if (!(e.getClass() == ApplicationNotFoundException.class)) { throw e; } return historyClient.getApplicationReport(appId); } return response.getApplicationReport(); } public org.apache.hadoop.security.token.Token<AMRMTokenIdentifier> getAMRMToken(ApplicationId appId) throws YarnException, IOException { Token token = getApplicationReport(appId).getAMRMToken(); org.apache.hadoop.security.token.Token<AMRMTokenIdentifier> amrmToken = null; if (token != null) { amrmToken = ConverterUtils.convertFromYarn(token, (Text) null); } return amrmToken; } @Override public List<ApplicationReport> getApplications() throws YarnException, IOException { return getApplications(null, null); } @Override public List<ApplicationReport> getApplications(Set<String> applicationTypes) throws YarnException, IOException { return getApplications(applicationTypes, null); } @Override public List<ApplicationReport> getApplications( EnumSet<YarnApplicationState> applicationStates) throws YarnException, IOException { return getApplications(null, applicationStates); } @Override public List<ApplicationReport> getApplications(Set<String> applicationTypes, EnumSet<YarnApplicationState> applicationStates) throws YarnException, IOException { GetApplicationsRequest request = GetApplicationsRequest.newInstance(applicationTypes, applicationStates); GetApplicationsResponse response = rmClient.getApplications(request); return response.getApplicationList(); } @Override public List<ApplicationReport> getApplications(Set<String> queues, Set<String> users, Set<String> applicationTypes, EnumSet<YarnApplicationState> applicationStates) throws YarnException, IOException { GetApplicationsRequest request = GetApplicationsRequest.newInstance(applicationTypes, applicationStates); request.setQueues(queues); request.setUsers(users); GetApplicationsResponse response = rmClient.getApplications(request); return response.getApplicationList(); } @Override public YarnClusterMetrics getYarnClusterMetrics() throws YarnException, IOException { GetClusterMetricsRequest request = Records.newRecord(GetClusterMetricsRequest.class); GetClusterMetricsResponse response = rmClient.getClusterMetrics(request); return response.getClusterMetrics(); } @Override public List<NodeReport> getNodeReports(NodeState... states) throws YarnException, IOException { EnumSet<NodeState> statesSet = (states.length == 0) ? EnumSet.allOf(NodeState.class) : EnumSet.noneOf(NodeState.class); for (NodeState state : states) { statesSet.add(state); } GetClusterNodesRequest request = GetClusterNodesRequest .newInstance(statesSet); GetClusterNodesResponse response = rmClient.getClusterNodes(request); return response.getNodeReports(); } @Override public Token getRMDelegationToken(Text renewer) throws YarnException, IOException { /* get the token from RM */ GetDelegationTokenRequest rmDTRequest = Records.newRecord(GetDelegationTokenRequest.class); rmDTRequest.setRenewer(renewer.toString()); GetDelegationTokenResponse response = rmClient.getDelegationToken(rmDTRequest); return response.getRMDelegationToken(); } private GetQueueInfoRequest getQueueInfoRequest(String queueName, boolean includeApplications, boolean includeChildQueues, boolean recursive) { GetQueueInfoRequest request = Records.newRecord(GetQueueInfoRequest.class); request.setQueueName(queueName); request.setIncludeApplications(includeApplications); request.setIncludeChildQueues(includeChildQueues); request.setRecursive(recursive); return request; } @Override public QueueInfo getQueueInfo(String queueName) throws YarnException, IOException { GetQueueInfoRequest request = getQueueInfoRequest(queueName, true, false, false); Records.newRecord(GetQueueInfoRequest.class); return rmClient.getQueueInfo(request).getQueueInfo(); } @Override public List<QueueUserACLInfo> getQueueAclsInfo() throws YarnException, IOException { GetQueueUserAclsInfoRequest request = Records.newRecord(GetQueueUserAclsInfoRequest.class); return rmClient.getQueueUserAcls(request).getUserAclsInfoList(); } @Override public List<QueueInfo> getAllQueues() throws YarnException, IOException { List<QueueInfo> queues = new ArrayList<QueueInfo>(); QueueInfo rootQueue = rmClient.getQueueInfo(getQueueInfoRequest(ROOT, false, true, true)) .getQueueInfo(); getChildQueues(rootQueue, queues, true); return queues; } @Override public List<QueueInfo> getRootQueueInfos() throws YarnException, IOException { List<QueueInfo> queues = new ArrayList<QueueInfo>(); QueueInfo rootQueue = rmClient.getQueueInfo(getQueueInfoRequest(ROOT, false, true, true)) .getQueueInfo(); getChildQueues(rootQueue, queues, false); return queues; } @Override public List<QueueInfo> getChildQueueInfos(String parent) throws YarnException, IOException { List<QueueInfo> queues = new ArrayList<QueueInfo>(); QueueInfo parentQueue = rmClient.getQueueInfo(getQueueInfoRequest(parent, false, true, false)) .getQueueInfo(); getChildQueues(parentQueue, queues, true); return queues; } private void getChildQueues(QueueInfo parent, List<QueueInfo> queues, boolean recursive) { List<QueueInfo> childQueues = parent.getChildQueues(); for (QueueInfo child : childQueues) { queues.add(child); if (recursive) { getChildQueues(child, queues, recursive); } } } @Private @VisibleForTesting public void setRMClient(ApplicationClientProtocol rmClient) { this.rmClient = rmClient; } @Override public ApplicationAttemptReport getApplicationAttemptReport( ApplicationAttemptId appAttemptId) throws YarnException, IOException { try { GetApplicationAttemptReportRequest request = Records .newRecord(GetApplicationAttemptReportRequest.class); request.setApplicationAttemptId(appAttemptId); GetApplicationAttemptReportResponse response = rmClient .getApplicationAttemptReport(request); return response.getApplicationAttemptReport(); } catch (YarnException e) { if (!historyServiceEnabled) { // Just throw it as usual if historyService is not enabled. throw e; } // Even if history-service is enabled, treat all exceptions still the same // except the following if (e.getClass() != ApplicationNotFoundException.class) { throw e; } return historyClient.getApplicationAttemptReport(appAttemptId); } } @Override public List<ApplicationAttemptReport> getApplicationAttempts( ApplicationId appId) throws YarnException, IOException { try { GetApplicationAttemptsRequest request = Records .newRecord(GetApplicationAttemptsRequest.class); request.setApplicationId(appId); GetApplicationAttemptsResponse response = rmClient .getApplicationAttempts(request); return response.getApplicationAttemptList(); } catch (YarnException e) { if (!historyServiceEnabled) { // Just throw it as usual if historyService is not enabled. throw e; } // Even if history-service is enabled, treat all exceptions still the same // except the following if (e.getClass() != ApplicationNotFoundException.class) { throw e; } return historyClient.getApplicationAttempts(appId); } } @Override public ContainerReport getContainerReport(ContainerId containerId) throws YarnException, IOException { try { GetContainerReportRequest request = Records .newRecord(GetContainerReportRequest.class); request.setContainerId(containerId); GetContainerReportResponse response = rmClient .getContainerReport(request); return response.getContainerReport(); } catch (YarnException e) { if (!historyServiceEnabled) { // Just throw it as usual if historyService is not enabled. throw e; } // Even if history-service is enabled, treat all exceptions still the same // except the following if (e.getClass() != ApplicationNotFoundException.class && e.getClass() != ContainerNotFoundException.class) { throw e; } return historyClient.getContainerReport(containerId); } } @Override public List<ContainerReport> getContainers( ApplicationAttemptId applicationAttemptId) throws YarnException, IOException { List<ContainerReport> containersForAttempt = new ArrayList<ContainerReport>(); boolean appNotFoundInRM = false; try { GetContainersRequest request = Records.newRecord(GetContainersRequest.class); request.setApplicationAttemptId(applicationAttemptId); GetContainersResponse response = rmClient.getContainers(request); containersForAttempt.addAll(response.getContainerList()); } catch (YarnException e) { if (e.getClass() != ApplicationNotFoundException.class || !historyServiceEnabled) { // If Application is not in RM and history service is enabled then we // need to check with history service else throw exception. throw e; } appNotFoundInRM = true; } if (historyServiceEnabled) { // Check with AHS even if found in RM because to capture info of finished // containers also List<ContainerReport> containersListFromAHS = null; try { containersListFromAHS = historyClient.getContainers(applicationAttemptId); } catch (IOException e) { // History service access might be enabled but system metrics publisher // is disabled hence app not found exception is possible if (appNotFoundInRM) { // app not found in bothM and RM then propagate the exception. throw e; } } if (null != containersListFromAHS && containersListFromAHS.size() > 0) { // remove duplicates Set<ContainerId> containerIdsToBeKeptFromAHS = new HashSet<ContainerId>(); Iterator<ContainerReport> tmpItr = containersListFromAHS.iterator(); while (tmpItr.hasNext()) { containerIdsToBeKeptFromAHS.add(tmpItr.next().getContainerId()); } Iterator<ContainerReport> rmContainers = containersForAttempt.iterator(); while (rmContainers.hasNext()) { ContainerReport tmp = rmContainers.next(); containerIdsToBeKeptFromAHS.remove(tmp.getContainerId()); // Remove containers from AHS as container from RM will have latest // information } if (containerIdsToBeKeptFromAHS.size() > 0 && containersListFromAHS.size() != containerIdsToBeKeptFromAHS .size()) { Iterator<ContainerReport> containersFromHS = containersListFromAHS.iterator(); while (containersFromHS.hasNext()) { ContainerReport containerReport = containersFromHS.next(); if (containerIdsToBeKeptFromAHS.contains(containerReport .getContainerId())) { containersForAttempt.add(containerReport); } } } else if (containersListFromAHS.size() == containerIdsToBeKeptFromAHS .size()) { containersForAttempt.addAll(containersListFromAHS); } } } return containersForAttempt; } @Override public void moveApplicationAcrossQueues(ApplicationId appId, String queue) throws YarnException, IOException { MoveApplicationAcrossQueuesRequest request = MoveApplicationAcrossQueuesRequest.newInstance(appId, queue); rmClient.moveApplicationAcrossQueues(request); } @Override public ReservationSubmissionResponse submitReservation( ReservationSubmissionRequest request) throws YarnException, IOException { return rmClient.submitReservation(request); } @Override public ReservationUpdateResponse updateReservation( ReservationUpdateRequest request) throws YarnException, IOException { return rmClient.updateReservation(request); } @Override public ReservationDeleteResponse deleteReservation( ReservationDeleteRequest request) throws YarnException, IOException { return rmClient.deleteReservation(request); } @Override public Map<NodeId, Set<NodeLabel>> getNodeToLabels() throws YarnException, IOException { return rmClient.getNodeToLabels(GetNodesToLabelsRequest.newInstance()) .getNodeToLabels(); } @Override public Map<NodeLabel, Set<NodeId>> getLabelsToNodes() throws YarnException, IOException { return rmClient.getLabelsToNodes(GetLabelsToNodesRequest.newInstance()) .getLabelsToNodes(); } @Override public Map<NodeLabel, Set<NodeId>> getLabelsToNodes(Set<String> labels) throws YarnException, IOException { return rmClient.getLabelsToNodes( GetLabelsToNodesRequest.newInstance(labels)).getLabelsToNodes(); } @Override public List<NodeLabel> getClusterNodeLabels() throws YarnException, IOException { return rmClient.getClusterNodeLabels( GetClusterNodeLabelsRequest.newInstance()).getNodeLabels(); } }
32,074
37.925971
96
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AHSClientImpl.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.client.api.impl; import java.io.IOException; import java.net.InetSocketAddress; import java.util.List; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.yarn.api.ApplicationHistoryProtocol; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetContainersRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetContainersResponse; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationReport; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerReport; import org.apache.hadoop.yarn.client.AHSProxy; import org.apache.hadoop.yarn.client.api.AHSClient; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; @Private @Unstable public class AHSClientImpl extends AHSClient { protected ApplicationHistoryProtocol ahsClient; protected InetSocketAddress ahsAddress; public AHSClientImpl() { super(AHSClientImpl.class.getName()); } private static InetSocketAddress getAHSAddress(Configuration conf) { return conf.getSocketAddr(YarnConfiguration.TIMELINE_SERVICE_ADDRESS, YarnConfiguration.DEFAULT_TIMELINE_SERVICE_ADDRESS, YarnConfiguration.DEFAULT_TIMELINE_SERVICE_PORT); } @Override protected void serviceInit(Configuration conf) throws Exception { this.ahsAddress = getAHSAddress(conf); super.serviceInit(conf); } @Override protected void serviceStart() throws Exception { try { ahsClient = AHSProxy.createAHSProxy(getConfig(), ApplicationHistoryProtocol.class, this.ahsAddress); } catch (IOException e) { throw new YarnRuntimeException(e); } super.serviceStart(); } @Override protected void serviceStop() throws Exception { if (this.ahsClient != null) { RPC.stopProxy(this.ahsClient); } super.serviceStop(); } @Override public ApplicationReport getApplicationReport(ApplicationId appId) throws YarnException, IOException { GetApplicationReportRequest request = GetApplicationReportRequest .newInstance(appId); GetApplicationReportResponse response = ahsClient .getApplicationReport(request); return response.getApplicationReport(); } @Override public List<ApplicationReport> getApplications() throws YarnException, IOException { GetApplicationsRequest request = GetApplicationsRequest.newInstance(null, null); GetApplicationsResponse response = ahsClient.getApplications(request); return response.getApplicationList(); } @Override public ApplicationAttemptReport getApplicationAttemptReport( ApplicationAttemptId applicationAttemptId) throws YarnException, IOException { GetApplicationAttemptReportRequest request = GetApplicationAttemptReportRequest .newInstance(applicationAttemptId); GetApplicationAttemptReportResponse response = ahsClient .getApplicationAttemptReport(request); return response.getApplicationAttemptReport(); } @Override public List<ApplicationAttemptReport> getApplicationAttempts( ApplicationId appId) throws YarnException, IOException { GetApplicationAttemptsRequest request = GetApplicationAttemptsRequest .newInstance(appId); GetApplicationAttemptsResponse response = ahsClient .getApplicationAttempts(request); return response.getApplicationAttemptList(); } @Override public ContainerReport getContainerReport(ContainerId containerId) throws YarnException, IOException { GetContainerReportRequest request = GetContainerReportRequest .newInstance(containerId); GetContainerReportResponse response = ahsClient.getContainerReport(request); return response.getContainerReport(); } @Override public List<ContainerReport> getContainers( ApplicationAttemptId applicationAttemptId) throws YarnException, IOException { GetContainersRequest request = GetContainersRequest .newInstance(applicationAttemptId); GetContainersResponse response = ahsClient.getContainers(request); return response.getContainerList(); } }
6,184
38.647436
86
java
vridge-api
vridge-api-master/src/java/VRE.Vridge.API/VRE.Vridge.API.Client/src/main/java/com/riftcat/vridge/api/client/java/APIClient.java
package com.riftcat.vridge.api.client.java; import com.riftcat.vridge.api.client.java.control.ControlRequestCode; import com.riftcat.vridge.api.client.java.control.ControlResponseCode; import com.riftcat.vridge.api.client.java.control.requests.ControlRequestHeader; import com.riftcat.vridge.api.client.java.control.requests.RequestEndpoint; import com.riftcat.vridge.api.client.java.control.responses.APIStatus; import com.riftcat.vridge.api.client.java.control.responses.EndpointCreated; import com.riftcat.vridge.api.client.java.proxy.BroadcastProxy; import com.riftcat.vridge.api.client.java.proxy.ClientProxyBase; import com.riftcat.vridge.api.client.java.proxy.ControllerProxy; import com.riftcat.vridge.api.client.java.proxy.HeadTrackingProxy; import com.riftcat.vridge.api.client.java.proxy.VRidgeApiProxy; import com.riftcat.vridge.api.client.java.utils.ILog; import com.riftcat.vridge.api.client.java.utils.SocketHelpers; import org.zeromq.ZContext; import org.zeromq.ZMQ; import java.util.HashMap; import java.util.LinkedList; import java.util.concurrent.TimeoutException; public class APIClient { public final static int HEADTRACKING = 0; public final static int CONTROLLER = 1; public final static int BROADCASTS = 2; public static ZContext ZContext; private HashMap<Integer, VRidgeApiProxy> proxies; private String serverAddress = "tcp://localhost"; // Connections with same app name will not result in "endpoint in use" response private String appName = ""; public APIClient(String appName){ ZContext = new ZContext(4); proxies = new HashMap<Integer, VRidgeApiProxy>(); this.appName = appName; } public APIClient(String ip, String appName){ this(appName); serverAddress = ip; } /// <summary> /// Sends control request to see what APIs are available. /// May return null if control connection dies (automatic reconnect will follow). /// </summary> public APIStatus GetStatus() throws Exception { ZMQ.Socket controlSocket = createControlSocket(); if (controlSocket == null) { return null; } SocketHelpers.SendAsJson(controlSocket, new ControlRequestHeader(ControlRequestCode.RequestStatus)); APIStatus status = SocketHelpers.ReceiveByJson(controlSocket, APIStatus.class); APIClient.ZContext.destroySocket(controlSocket); if (status != null){ return status; } throw new Exception("Could not read API status."); } public <T extends VRidgeApiProxy> T getProxy(int proxyType) throws Exception { VRidgeApiProxy proxy = proxies.get(proxyType); ZMQ.Socket controlSocket = createControlSocket(); if(proxy == null){ String endpointName = null; switch (proxyType) { case HEADTRACKING: endpointName = EndpointNames.HeadTracking; break; case CONTROLLER: endpointName = EndpointNames.Controller; break; case BROADCASTS: endpointName = EndpointNames.Broadcast; break; } if(endpointName == null){{ throw new IllegalArgumentException("Invalid proxy type was requested."); }} SocketHelpers.SendAsJson(controlSocket, new RequestEndpoint(endpointName, appName)); EndpointCreated response = SocketHelpers.ReceiveByJson(controlSocket, EndpointCreated.class); APIClient.ZContext.destroySocket(controlSocket); if(response == null ){ throw new TimeoutException("API server timeout"); } if(response.Code == ControlResponseCode.InUse){ throw new Exception("API endpoint in use."); } switch (proxyType){ case HEADTRACKING: proxies.put(proxyType, new HeadTrackingProxy("tcp://" + serverAddress + ":" + response.Port, true)); break; case CONTROLLER: proxies.put(proxyType, new ControllerProxy("tcp://" + serverAddress + ":" + response.Port)); break; case BROADCASTS: proxies.put(proxyType, new BroadcastProxy("tcp://" + serverAddress + ":" + response.Port)); break; } } return (T) proxies.get(proxyType); } public void disconnectProxy(int proxyType) { VRidgeApiProxy proxy = proxies.get(proxyType); if (proxy == null) return; proxy.disconnect(); proxies.put(proxyType, null); } public void disconnectAll() { for(int proxyId : proxies.keySet()){ disconnectProxy(proxyId); } } private String getEndpointAddress() { return "tcp://" + serverAddress + ":38219"; } private ZMQ.Socket createControlSocket(){ String ctrlAddress = getEndpointAddress(); ZMQ.Socket controlSocket = ZContext.createSocket(ZMQ.REQ); controlSocket.connect(ctrlAddress); controlSocket.setSendTimeOut(1000); controlSocket.setReceiveTimeOut(1000); return controlSocket; } }
5,338
33.006369
120
java
vridge-api
vridge-api-master/src/java/VRE.Vridge.API/VRE.Vridge.API.Client/src/main/java/com/riftcat/vridge/api/client/java/Capabilities.java
package com.riftcat.vridge.api.client.java; public enum Capabilities { HeadTracking(1), Controllers(2); private final int value; Capabilities(int value) { this.value = value; } }
210
15.230769
43
java
vridge-api
vridge-api-master/src/java/VRE.Vridge.API/VRE.Vridge.API.Client/src/main/java/com/riftcat/vridge/api/client/java/EndpointNames.java
package com.riftcat.vridge.api.client.java; public class EndpointNames { public static final String HeadTracking = "HeadTracking"; public static final String Controller = "Controller"; public static final String Broadcast = "Broadcast"; }
257
31.25
61
java
vridge-api
vridge-api-master/src/java/VRE.Vridge.API/VRE.Vridge.API.Client/src/main/java/com/riftcat/vridge/api/client/java/codes/ControllerStateRequestCodes.java
package com.riftcat.vridge.api.client.java.codes; public class ControllerStateRequestCodes { public static int Disconnect = 255; public static int SendFullState = 1; public static int RecenterHead = 2; public static int Origin_Zero = 0; }
258
22.545455
49
java
vridge-api
vridge-api-master/src/java/VRE.Vridge.API/VRE.Vridge.API.Client/src/main/java/com/riftcat/vridge/api/client/java/codes/HeadTrackingRequestCodes.java
package com.riftcat.vridge.api.client.java.codes; public class HeadTrackingRequestCodes { public static int Disconnect = 255; public static int ChangeState = 254; public static int Recenter = 50; public static int SendPoseMatrixRotationOnly = 0; public static int SendPoseMatrixFull = 6; public static int SendRotationMatrix = 1; public static int SendRadRotationAndPosition = 3; public static int SendQuatRotationAndPosition = 4; public static int SendPositionOnly = 5; public static int RequestSyncOffset = 100; public static int RequestReadOnlyPose = 199; public static int RequestReadOnlyPhonePose = 200; public static int SetYawOffset = 201; public static int ResetYawOffset = 21; }
750
30.291667
54
java
vridge-api
vridge-api-master/src/java/VRE.Vridge.API/VRE.Vridge.API.Client/src/main/java/com/riftcat/vridge/api/client/java/codes/HeadTrackingResponseCodes.java
package com.riftcat.vridge.api.client.java.codes; public class HeadTrackingResponseCodes { public static int AcceptedYourData = 0; public static int SendingCurrentTrackedPose = 2; public static int PhoneDataTimeout = 253; public static int BadRequest = 254; public static int Disconnecting = 255; }
324
22.214286
52
java
vridge-api
vridge-api-master/src/java/VRE.Vridge.API/VRE.Vridge.API.Client/src/main/java/com/riftcat/vridge/api/client/java/codes/TrackedDeviceStatus.java
package com.riftcat.vridge.api.client.java.codes; public class TrackedDeviceStatus { public static final byte Active = 0; public static final byte TempUnavailable = 1; public static final byte Disabled = 2; }
222
26.875
49
java
vridge-api
vridge-api-master/src/java/VRE.Vridge.API/VRE.Vridge.API.Client/src/main/java/com/riftcat/vridge/api/client/java/control/BaseControlMessage.java
package com.riftcat.vridge.api.client.java.control; public class BaseControlMessage { public int ProtocolVersion = 3; public int Code; }
148
15.555556
51
java
vridge-api
vridge-api-master/src/java/VRE.Vridge.API/VRE.Vridge.API.Client/src/main/java/com/riftcat/vridge/api/client/java/control/ControlRequestCode.java
package com.riftcat.vridge.api.client.java.control; public class ControlRequestCode { public static int RequestEndpoint = 1; public static int RequestStatus = 2; }
175
18.555556
51
java
vridge-api
vridge-api-master/src/java/VRE.Vridge.API/VRE.Vridge.API.Client/src/main/java/com/riftcat/vridge/api/client/java/control/ControlResponseCode.java
package com.riftcat.vridge.api.client.java.control; public class ControlResponseCode { /// <summary> /// API awaits connection at given endpoint. /// </summary> public static int OK = 0; /// <summary> /// API is not available because of undefined reason. /// </summary> public static int NotAvailable = 1; /// <summary> /// API is in use by another client /// </summary> public static int InUse = 2; /// <summary> /// Client is trying to use something that requires API client to be updated to more recent version /// </summary> public static int ClientOutdated = 3; /// <summary> /// VRidge needs to be updated or client is not following protocol /// </summary> public static int ServerOutdated = 4; }
790
24.516129
103
java
vridge-api
vridge-api-master/src/java/VRE.Vridge.API/VRE.Vridge.API.Client/src/main/java/com/riftcat/vridge/api/client/java/control/requests/ControlRequestHeader.java
package com.riftcat.vridge.api.client.java.control.requests; import com.riftcat.vridge.api.client.java.control.BaseControlMessage; import com.riftcat.vridge.api.client.java.control.ControlRequestCode; public class ControlRequestHeader extends BaseControlMessage { public String RequestingAppName; public ControlRequestHeader(String requestingAppName){ RequestingAppName = requestingAppName; } public ControlRequestHeader(int reqCode){ Code = reqCode; } }
495
28.176471
69
java
vridge-api
vridge-api-master/src/java/VRE.Vridge.API/VRE.Vridge.API.Client/src/main/java/com/riftcat/vridge/api/client/java/control/requests/RequestEndpoint.java
package com.riftcat.vridge.api.client.java.control.requests; import com.riftcat.vridge.api.client.java.control.ControlRequestCode; public class RequestEndpoint extends ControlRequestHeader{ public String RequestedEndpointName; public RequestEndpoint(String name, String appName){ super(appName); Code = ControlRequestCode.RequestEndpoint; RequestedEndpointName = name; } }
414
24.9375
69
java
vridge-api
vridge-api-master/src/java/VRE.Vridge.API/VRE.Vridge.API.Client/src/main/java/com/riftcat/vridge/api/client/java/control/responses/APIStatus.java
package com.riftcat.vridge.api.client.java.control.responses; import com.riftcat.vridge.api.client.java.EndpointNames; import java.util.List; public class APIStatus { public List<EndpointStatus> Endpoints; @Override public String toString() { String strStatus = ""; for(EndpointStatus status : Endpoints){ strStatus += status.Name + "(" + status.codeString() + ") | "; } return strStatus; } public EndpointStatus findEndpoint(String name){ for (EndpointStatus endpoint : Endpoints) { if(endpoint.Name.equals(name)){ return endpoint; } } return null; } }
693
20.030303
74
java
vridge-api
vridge-api-master/src/java/VRE.Vridge.API/VRE.Vridge.API.Client/src/main/java/com/riftcat/vridge/api/client/java/control/responses/ControlResponseHeader.java
package com.riftcat.vridge.api.client.java.control.responses; import com.riftcat.vridge.api.client.java.control.BaseControlMessage; import com.riftcat.vridge.api.client.java.control.ControlResponseCode; public class ControlResponseHeader extends BaseControlMessage { // Predefined responses public static ControlResponseHeader ResponseInUse; public static ControlResponseHeader ResponseClientOutdated; public static ControlResponseHeader ResponseNotAvailable; static{ // Predefined responses ResponseNotAvailable = new ControlResponseHeader(); ResponseNotAvailable.Code = ControlResponseCode.NotAvailable; ResponseClientOutdated = new ControlResponseHeader(); ResponseClientOutdated.Code = ControlResponseCode.ClientOutdated; ResponseInUse = new ControlResponseHeader(); ResponseInUse.Code = ControlResponseCode.InUse; } }
913
34.153846
73
java
vridge-api
vridge-api-master/src/java/VRE.Vridge.API/VRE.Vridge.API.Client/src/main/java/com/riftcat/vridge/api/client/java/control/responses/EndpointCreated.java
package com.riftcat.vridge.api.client.java.control.responses; public class EndpointCreated extends ControlResponseHeader { public int TimeoutSec; public int Port; }
175
24.142857
61
java
vridge-api
vridge-api-master/src/java/VRE.Vridge.API/VRE.Vridge.API.Client/src/main/java/com/riftcat/vridge/api/client/java/control/responses/EndpointStatus.java
package com.riftcat.vridge.api.client.java.control.responses; import com.riftcat.vridge.api.client.java.control.ControlResponseCode; public class EndpointStatus extends ControlResponseHeader { public String Name; public String InUseBy; public EndpointStatus(String endpointName){ this.Code = ControlResponseCode.OK; this.Name = endpointName; } public String codeString(){ if(Code == ControlResponseCode.NotAvailable){ return "n/a"; } if(Code == ControlResponseCode.InUse){ return "In use by " + InUseBy; } if(Code == ControlResponseCode.OK){ return "Ready"; } return "unknown"; } }
721
23.066667
70
java
vridge-api
vridge-api-master/src/java/VRE.Vridge.API/VRE.Vridge.API.Client/src/main/java/com/riftcat/vridge/api/client/java/proxy/BroadcastProxy.java
package com.riftcat.vridge.api.client.java.proxy; import com.google.protobuf.InvalidProtocolBufferException; import com.riftcat.vridge.api.client.java.APIClient; import com.riftcat.vridge.api.client.java.proto.HapticPulse; import com.riftcat.vridge.api.client.java.utils.APILogger; import org.zeromq.ZMQ; import java.nio.charset.Charset; import java.util.LinkedList; import java.util.List; public class BroadcastProxy implements VRidgeApiProxy { private final String endpointAddr; private ZMQ.Socket socket; private List<IBroadcastListener> listeners = new LinkedList<IBroadcastListener>(); private Thread threadPolling; public BroadcastProxy(String endpointAddr){ this.endpointAddr = endpointAddr; } public void startPolling(){ if(threadPolling != null) threadPolling.interrupt(); threadPolling = new Thread(new Runnable() { @Override public void run() { socket = APIClient.ZContext.createSocket(ZMQ.SUB); socket.connect(endpointAddr); socket.subscribe("haptic".getBytes(Charset.forName("UTF-8"))); socket.setLinger(1000); ZMQ.Poller poller = APIClient.ZContext.createPoller(1); poller.register(socket, ZMQ.Poller.POLLIN); while(!Thread.currentThread().isInterrupted()){ int result = poller.poll(1000); if(result > 0){ // we can ignore topic here, it's filtered at socket level // but we need to consume it from socket to continue socket.recvStr(); byte[] bufMsg = socket.recv(); try { // Deserialize HapticPulse pulse = HapticPulse.parseFrom(bufMsg); // Notify listeners for(IBroadcastListener listener : listeners){ listener.onHapticPulse(pulse); } } catch (InvalidProtocolBufferException e) { // Invalid data - could not be deserialized } } } // while(!Thread.currentThread().isInterrupted()) poller.close(); } }); threadPolling.start(); } public void disconnect(){ try { if(threadPolling != null){ threadPolling.interrupt(); threadPolling.join(); } if(socket != null){ socket.close(); } listeners.clear(); } catch (InterruptedException e) { APILogger.error("Can't close Broadcast endpoint."); e.printStackTrace(); } } public void addListener(IBroadcastListener listener){ listeners.add(listener); } public void removeListener(IBroadcastListener listener){ listeners.remove(listener); } }
3,100
28.533333
86
java
vridge-api
vridge-api-master/src/java/VRE.Vridge.API/VRE.Vridge.API.Client/src/main/java/com/riftcat/vridge/api/client/java/proxy/ClientProxyBase.java
package com.riftcat.vridge.api.client.java.proxy; import com.google.protobuf.InvalidProtocolBufferException; import com.google.protobuf.MessageLite; import com.google.protobuf.Parser; import com.riftcat.vridge.api.client.java.APIClient; import com.riftcat.vridge.api.client.java.utils.APILogger; import org.zeromq.ZContext; import org.zeromq.ZMQ; import java.util.TimerTask; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; public abstract class ClientProxyBase implements VRidgeApiProxy{ private TimerTask keepAliveTimer; private Runnable keepAlivePing; private byte[] keepAlivePacket = { 0 }; int CurrentVersion = 3; ZMQ.Socket socket; ClientProxyBase(String endpointAddress, boolean keepAlive){ if(APIClient.ZContext == null) APIClient.ZContext = new ZContext(4); socket = APIClient.ZContext.createSocket(ZMQ.REQ); socket.setLinger(1000); socket.setSendTimeOut(15000); socket.setReceiveTimeOut(15000); socket.connect(endpointAddress); socket.setHWM(1); if (!keepAlive) return; keepAlivePing = new Runnable() { @Override public void run() { sendKeepAlivePing(); } }; ScheduledExecutorService executor = Executors.newScheduledThreadPool(1); executor.scheduleAtFixedRate(keepAlivePing, 1, 5, TimeUnit.SECONDS); } void CloseSocket(){ APIClient.ZContext.destroySocket(socket); } synchronized <T extends MessageLite> T SendMessage(MessageLite msg, Parser<T> parser) throws TimeoutException { APILogger.zmq("send begin"); long timestamp = System.nanoTime(); socket.send(msg.toByteArray()); byte[] responseBytes = socket.recv(); APILogger.zmq("recv end - " + (System.nanoTime() - timestamp) / 1000000.0); if (responseBytes != null){ try { T response = parser.parseFrom(responseBytes); return response; } catch (InvalidProtocolBufferException e) { // whoops } } APILogger.zmq("timeout"); APIClient.ZContext.destroySocket(socket); throw new TimeoutException(); } public abstract void disconnect(); public synchronized boolean sendKeepAlivePing(){ boolean error = false; APILogger.zmq("ping begin: "); error = error || !socket.send(keepAlivePacket); error = error || socket.recv() == null; APILogger.zmq("ping end - error: " + error); return !error; } }
2,718
29.550562
115
java
vridge-api
vridge-api-master/src/java/VRE.Vridge.API/VRE.Vridge.API.Client/src/main/java/com/riftcat/vridge/api/client/java/proxy/ControllerProxy.java
package com.riftcat.vridge.api.client.java.proxy; import com.riftcat.vridge.api.client.java.codes.ControllerStateRequestCodes; import com.riftcat.vridge.api.client.java.proto.ControllerStateRequest; import com.riftcat.vridge.api.client.java.proto.ControllerStateResponse; import com.riftcat.vridge.api.client.java.proto.HandType; import com.riftcat.vridge.api.client.java.proto.VRController; import com.riftcat.vridge.api.client.java.proto.VRControllerAxis_t; import com.riftcat.vridge.api.client.java.proto.VRControllerState_t; import java.util.ArrayList; import java.util.Arrays; import java.util.LinkedList; import java.util.List; import java.util.concurrent.TimeoutException; public class ControllerProxy extends ClientProxyBase { private int packetNum = 0; public ControllerProxy(String endpointAddress){ super(endpointAddress, true); } /// <summary> /// Send full single VR controller state to VR. /// </summary> public synchronized void sendControllerState(VRController state) throws TimeoutException { ControllerStateRequest data = ControllerStateRequest .newBuilder() .setTaskType(ControllerStateRequestCodes.SendFullState) .setControllerState(state) .build(); sendMessage(data); } /** * Send full single VR controller state to VR. */ public synchronized void sendControllerState(int controllerId, long touchedMask, long pressedMask, List<Float> orientationMatrix, float triggerValue, float analogX, float analogY, float[] velocity, HandType hand) throws TimeoutException { VRControllerState_t.Builder buttonState = VRControllerState_t.newBuilder() .setRAxis0(VRControllerAxis_t.newBuilder() .setX(analogX) .setY(analogY)) .setRAxis1(VRControllerAxis_t.newBuilder() .setX(triggerValue)) .setUlButtonPressed(pressedMask) .setUlButtonTouched(touchedMask) .setUnPacketNum(++packetNum); VRController.Builder controllerState = VRController.newBuilder() .setControllerId(controllerId) .addAllOrientationMatrix(orientationMatrix) .setStatus(0) .setSuggestedHand(hand) .setButtonState(buttonState); if(velocity != null){ controllerState .addVelocity(velocity[0]) .addVelocity(velocity[1]) .addVelocity(velocity[2]); } ControllerStateRequest request = ControllerStateRequest.newBuilder() .setTaskType(ControllerStateRequestCodes.SendFullState) .setControllerState(controllerState) .build(); sendMessage(request); } /** Recenter head tracking. Works the same as pressing recenter hotkey as configured in VRidge settings. */ public void recenterHead() throws TimeoutException{ ControllerStateRequest request = ControllerStateRequest .newBuilder() .setVersion(CurrentVersion) .setTaskType(ControllerStateRequestCodes.RecenterHead) .build(); sendMessage(request); } /** * Disconnected from controller API and frees the API for other clients. */ public void disconnect(){ ControllerStateRequest disconnectRequest = ControllerStateRequest .newBuilder() .setVersion(CurrentVersion) .setTaskType(ControllerStateRequestCodes.Disconnect) .build(); try{ sendMessage(disconnectRequest); } catch (Exception x){ // ignored } CloseSocket(); } private ControllerStateResponse sendMessage(ControllerStateRequest req) throws TimeoutException { return SendMessage(req, ControllerStateResponse.parser()); } }
4,216
35.991228
111
java
vridge-api
vridge-api-master/src/java/VRE.Vridge.API/VRE.Vridge.API.Client/src/main/java/com/riftcat/vridge/api/client/java/proxy/HeadTrackingProxy.java
package com.riftcat.vridge.api.client.java.proxy; import com.google.protobuf.ByteString; import com.riftcat.vridge.api.client.java.codes.HeadTrackingRequestCodes; import com.riftcat.vridge.api.client.java.codes.HeadTrackingResponseCodes; import com.riftcat.vridge.api.client.java.codes.TrackedDeviceStatus; import com.riftcat.vridge.api.client.java.proto.HeadTrackingRequest; import com.riftcat.vridge.api.client.java.proto.HeadTrackingResponse; import com.riftcat.vridge.api.client.java.proto.TrackedPose; import com.riftcat.vridge.api.client.java.utils.APILogger; import com.riftcat.vridge.api.client.java.utils.SerializationUtils; import java.nio.ByteBuffer; import java.nio.ByteOrder; import java.util.concurrent.TimeoutException; public class HeadTrackingProxy extends ClientProxyBase { // This is only partial implementation of API calls public HeadTrackingProxy(String endpointAddress, boolean shouldKeepAlive){ super(endpointAddress, shouldKeepAlive); } /** * Sets head position to new location. */ public boolean setPosition(float x, float y, float z) throws TimeoutException { HeadTrackingRequest request = HeadTrackingRequest .newBuilder() .setVersion(CurrentVersion) .setTaskType(HeadTrackingRequestCodes.SendPositionOnly) .setData(SerializationUtils.byteStringFromFloats(x, y, z)) .build(); HeadTrackingResponse reply = sendMessage(request); return reply.getReplyCode() == HeadTrackingResponseCodes.AcceptedYourData; } /** Sets position and rotation and returns true if the value was accepted. This won't work for headsets with reprojection enabled. */ public boolean setRotationAndPosition(float yaw, float pitch, float roll, float x, float y, float z) throws TimeoutException { HeadTrackingRequest request = HeadTrackingRequest .newBuilder() .setVersion(CurrentVersion) .setTaskType(HeadTrackingRequestCodes.SendRadRotationAndPosition) .setData(SerializationUtils.byteStringFromFloats(pitch, yaw, roll, x, y, z)) .build(); HeadTrackingResponse reply = sendMessage(request); return reply.getReplyCode() == HeadTrackingResponseCodes.AcceptedYourData; } /** Sets position and rotation and returns true if the value was accepted. This won't work for headsets with reprojection enabled. */ public boolean setRotationAndPosition(float quatQ, float quatY, float quatZ, float quatW, float posX, float posY, float posZ) throws TimeoutException { HeadTrackingRequest request = HeadTrackingRequest .newBuilder() .setVersion(CurrentVersion) .setTaskType(HeadTrackingRequestCodes.SendQuatRotationAndPosition) .setData(SerializationUtils.byteStringFromFloats(quatQ, quatY, quatZ, quatW, posX, posY, posZ)) .build(); HeadTrackingResponse reply = sendMessage(request); return reply.getReplyCode() == HeadTrackingResponseCodes.AcceptedYourData; } /** * Reorients tracking system and sets new center to current head direction. */ public boolean recenterView() throws TimeoutException { HeadTrackingRequest request = HeadTrackingRequest .newBuilder() .setVersion(CurrentVersion) .setTaskType(HeadTrackingRequestCodes.Recenter) .build(); HeadTrackingResponse reply = sendMessage(request); return reply.getReplyCode() == HeadTrackingResponseCodes.AcceptedYourData; } /** * Gets current head pose and related offsets. */ public TrackedPose getCurrentPose() throws TimeoutException { HeadTrackingRequest request = HeadTrackingRequest .newBuilder() .setVersion(CurrentVersion) .setTaskType(HeadTrackingRequestCodes.RequestReadOnlyPose) .build(); HeadTrackingResponse reply = sendMessage(request); if(reply.getReplyCode() == HeadTrackingResponseCodes.SendingCurrentTrackedPose){ return reply.getTrackedPose(); } else{ return null; } } public void setYawOffset(float yaw) throws TimeoutException { HeadTrackingRequest request = HeadTrackingRequest .newBuilder() .setVersion(CurrentVersion) .setTaskType(HeadTrackingRequestCodes.SetYawOffset) .setData(SerializationUtils.byteStringFromFloats(yaw)) .build(); sendMessage(request); } public void changeTrackingState(boolean isInTrackingRange) throws TimeoutException { byte[] state = new byte[1]; state[0] = isInTrackingRange ? TrackedDeviceStatus.Active : TrackedDeviceStatus.TempUnavailable; HeadTrackingRequest request = HeadTrackingRequest .newBuilder() .setVersion(CurrentVersion) .setTaskType(HeadTrackingRequestCodes.ChangeState) .setData(ByteString.copyFrom(state)) .build(); sendMessage(request); } @Override public void disconnect() { HeadTrackingRequest disconnectRequest = HeadTrackingRequest.newBuilder() .setVersion(CurrentVersion) .setTaskType(HeadTrackingRequestCodes.Disconnect) .build(); try{ sendMessage(disconnectRequest); } catch (Exception x){ // ignored } CloseSocket(); } private HeadTrackingResponse sendMessage(HeadTrackingRequest req) throws TimeoutException { return SendMessage(req, HeadTrackingResponse.parser()); } }
5,862
36.343949
155
java
vridge-api
vridge-api-master/src/java/VRE.Vridge.API/VRE.Vridge.API.Client/src/main/java/com/riftcat/vridge/api/client/java/proxy/IBroadcastListener.java
package com.riftcat.vridge.api.client.java.proxy; import com.riftcat.vridge.api.client.java.proto.HapticPulse; public interface IBroadcastListener{ void onHapticPulse(HapticPulse pulse); }
195
23.5
60
java
vridge-api
vridge-api-master/src/java/VRE.Vridge.API/VRE.Vridge.API.Client/src/main/java/com/riftcat/vridge/api/client/java/proxy/VRidgeApiProxy.java
package com.riftcat.vridge.api.client.java.proxy; public interface VRidgeApiProxy { void disconnect(); }
110
17.5
49
java
vridge-api
vridge-api-master/src/java/VRE.Vridge.API/VRE.Vridge.API.Client/src/main/java/com/riftcat/vridge/api/client/java/remotes/ControllerRemote.java
package com.riftcat.vridge.api.client.java.remotes; import com.riftcat.vridge.api.client.java.proto.HandType; import com.riftcat.vridge.api.client.java.proto.HeadRelation; import com.riftcat.vridge.api.client.java.proto.VRController; import com.riftcat.vridge.api.client.java.proto.VRControllerAxis_t; import com.riftcat.vridge.api.client.java.proto.VRControllerState_t; import com.riftcat.vridge.api.client.java.proto.VRControllerState_tOrBuilder; import com.riftcat.vridge.api.client.java.proxy.ControllerProxy; import com.riftcat.vridge.api.client.java.utils.ButtonMask; import java.util.concurrent.TimeoutException; public class ControllerRemote extends RemoteBase { private static int packetNum = 0; private ControllerProxy proxy; ControllerRemote(ControllerProxy proxy) { super(proxy); this.proxy = proxy; } /** * Sets VR controller to a new state * @param controllerId Unique ID of given controller * @param headRelation How given pose relates to the head. Usually unrelated is the best pick. * @param orientation Orientation as XYZW float[4]. * @param position Position as XYZ float[3]. * @param analogX (-1,1) horizontal touchpad position. * @param analogY (-1,1) vertical touchpad position. * @param analogTrigger (0,1) trigger state (1 is fully pulled) * @param isMenuPressed * @param isSystemPressed * @param isTriggerPressed * @param isGripPressed * @param isTouchpadPressed * @param isTouchpadTouched */ public void setControllerState( // Controller ID int controllerId, HandType handType, boolean disableController, // Pose data HeadRelation headRelation, float[] orientation, float[] position, // Touchpad state [-1,1] double analogX, double analogY, // Trigger state double analogTrigger, // Button states boolean isMenuPressed, boolean isSystemPressed, boolean isTriggerPressed, boolean isGripPressed, boolean isTouchpadPressed, boolean isTouchpadTouched){ // See openvr.h in OpenVR SDK for mappings and masks // https://github.com/ValveSoftware/openvr/blob/master/headers/openvr.h long pressedMask = buildPressedMask(isMenuPressed, isSystemPressed, isTriggerPressed, isGripPressed, isTouchpadPressed); long touchedMask = buildTouchedMask(isTouchpadTouched, isTriggerPressed); int controllerStatus = 0; if(disableController == true) { controllerStatus = 2; } VRControllerState_tOrBuilder buttons = VRControllerState_t.newBuilder() .setRAxis0(VRControllerAxis_t.newBuilder() .setX((float) analogX) .setY((float) analogY)) .setRAxis1(VRControllerAxis_t.newBuilder() .setX((float) analogTrigger)) .setUlButtonPressed(pressedMask) .setUlButtonTouched(touchedMask) .setUnPacketNum(++packetNum); // Touchpad VRController.Builder controllerState = VRController.newBuilder() .setControllerId(controllerId) .addOrientation(orientation[0]) .addOrientation(orientation[1]) .addOrientation(orientation[2]) .addOrientation(orientation[3]) .setStatus(controllerStatus) .setSuggestedHand(handType) .setHeadRelation(headRelation) .setButtonState((VRControllerState_t.Builder) buttons); if(position != null){ controllerState.addPosition(position[0]); controllerState.addPosition(position[1]); controllerState.addPosition(position[2]); } try { proxy.sendControllerState(controllerState.build()); } catch (Exception e) { dispose(); } } /** Recenter head tracking. Works the same as pressing recenter hotkey as configured in VRidge settings. */ public void recenterHead(){ try{ proxy.recenterHead(); } catch (Exception e){ dispose(); } } private long buildTouchedMask(boolean isTouchpadTouched, boolean isTriggerTouched) { long mask = 0; if (isTouchpadTouched) mask |= ButtonMask.Touchpad; if (isTriggerTouched) mask |= ButtonMask.Trigger; return mask; } private long buildPressedMask( boolean isMenuPressed, boolean isSystemPressed, boolean isTriggerPressed, boolean isGripPressed, boolean isTouchpadPressed) { long mask = 0; if (isMenuPressed) mask |= ButtonMask.ApplicationMenu; if (isSystemPressed) mask |= ButtonMask.System; if (isTriggerPressed) mask |= ButtonMask.Trigger; if (isGripPressed) mask |= ButtonMask.Grip; if (isTouchpadPressed) mask |= ButtonMask.Touchpad; return mask; } }
5,188
35.286713
128
java
vridge-api
vridge-api-master/src/java/VRE.Vridge.API/VRE.Vridge.API.Client/src/main/java/com/riftcat/vridge/api/client/java/remotes/DiscoveryClient.java
package com.riftcat.vridge.api.client.java.remotes; import com.google.protobuf.InvalidProtocolBufferException; import com.riftcat.vridge.api.client.java.proto.Beacon; import com.riftcat.vridge.api.client.java.proto.BeaconOrigin; import com.riftcat.vridge.api.client.java.remotes.beacons.VridgeServerBeacon; import com.riftcat.vridge.api.client.java.remotes.beacons.VridgeServerBeaconList; import org.zeromq.ZBeacon; import java.net.InetAddress; import java.util.List; class DiscoveryClient implements Thread.UncaughtExceptionHandler { private final byte[] identity; private VridgeServerBeaconList beaconList; private ZBeacon beaconClient; DiscoveryClient(){ identity = Beacon.newBuilder() .setRole(BeaconOrigin.Client) // We don't use information below .setMachineName("Android") .setHumanReadableVersion("Android") .setUserName("Android") .build() .toByteArray(); beaconList = new VridgeServerBeaconList(); reset(); } public void reset(){ dispose(); beaconClient = new ZBeacon("255.255.255.255",38219, identity, true, true); beaconClient.setBroadcastInterval(1000); beaconClient.setListener(new ZBeacon.Listener() { @Override public void onBeacon(InetAddress sender, byte[] buffer) { Beacon beacon; try { beacon = Beacon.parser().parseFrom(buffer); } catch (InvalidProtocolBufferException e) { // Ignore stray packets return; } if(beacon.getRole() != BeaconOrigin.Server){ // Skip other clients return; } beaconList.add(beacon, sender); } }); beaconClient.setUncaughtExceptionHandlers(this, this); beaconClient.start(); } public List<VridgeServerBeacon> getFreshServers() { return beaconList.getFreshServers(); } public synchronized void dispose() { try { if(beaconClient != null){ beaconClient.stop(); beaconClient = null; } } catch (InterruptedException e) { e.printStackTrace(); } } @Override public void uncaughtException(Thread t, Throwable e) { try { Thread.sleep(3000); } catch (InterruptedException ignored) { } reset(); } }
2,591
28.123596
82
java
vridge-api
vridge-api-master/src/java/VRE.Vridge.API/VRE.Vridge.API.Client/src/main/java/com/riftcat/vridge/api/client/java/remotes/HeadRemote.java
package com.riftcat.vridge.api.client.java.remotes; import com.riftcat.vridge.api.client.java.proto.TrackedPose; import com.riftcat.vridge.api.client.java.proxy.HeadTrackingProxy; import java.util.concurrent.TimeoutException; public class HeadRemote extends RemoteBase { private HeadTrackingProxy proxy; HeadRemote(HeadTrackingProxy proxy) { super(proxy); this.proxy = proxy; } /** * Reorients tracking system and sets new center to current head direction. */ public void recenter(){ try{ proxy.recenterView(); } catch (Exception x){ dispose(); } } /** * Gets current head pose and related offsets. May return null on connection issues. */ public TrackedPose getCurrentPose() { try{ return proxy.getCurrentPose(); } catch (Exception e){ dispose(); return null; } } /** * Sets head position to new location. */ public void setPosition(float x, float y, float z){ try{ proxy.setPosition(x, y, z); } catch (Exception e){ dispose(); } } /** * Sets head position to new location and orientation. * This won't work for headsets with reprojection enabled. */ public void setRotationAndPosition(float yaw, float pitch, float roll, float x, float y, float z){ try{ proxy.setRotationAndPosition(yaw, pitch, roll, x, y, z); } catch (Exception e){ dispose(); } } /** * Sets offsets in yaw axis to be applied to each head and controller pose processed by the system. * @param yaw Offset in radians. */ public void setYawOffset(float yaw){ try{ proxy.setYawOffset(yaw); } catch (Exception x){ dispose(); } } /** * Marks the headset as in/outside of tracking range. Setting it to false will most likely * stop rendering on SteamVR side as pose data will be considered invalid. */ public void setStatus(boolean isInTrackingRange) { try{ proxy.changeTrackingState(isInTrackingRange); } catch (Exception e){ dispose(); } } // Type-cast-methods /** * Sets head position to new location and orientation. * This won't work for headsets with reprojection enabled. */ public void setRotationAndPosition(double yaw, double pitch, double roll, double x, double y, double z){ setRotationAndPosition((float) yaw, (float) pitch, (float) roll, (float) x, (float) y, (float) z); } /** * Sets head position to new location. */ public void setPosition(double x, double y, double z) { setPosition((float) x, (float) y, (float) z); } }
2,914
24.79646
108
java
vridge-api
vridge-api-master/src/java/VRE.Vridge.API/VRE.Vridge.API.Client/src/main/java/com/riftcat/vridge/api/client/java/remotes/RemoteBase.java
package com.riftcat.vridge.api.client.java.remotes; import com.riftcat.vridge.api.client.java.proxy.ClientProxyBase; import com.riftcat.vridge.api.client.java.proxy.VRidgeApiProxy; class RemoteBase { private boolean isDisposed; private VRidgeApiProxy proxy; protected RemoteBase(VRidgeApiProxy proxy){ this.proxy = proxy; } public boolean isDisposed() { return isDisposed; } public void dispose(){ isDisposed = true; if(proxy != null){ proxy.disconnect(); } } }
552
20.269231
64
java
vridge-api
vridge-api-master/src/java/VRE.Vridge.API/VRE.Vridge.API.Client/src/main/java/com/riftcat/vridge/api/client/java/remotes/VridgeRemote.java
package com.riftcat.vridge.api.client.java.remotes; import com.google.protobuf.InvalidProtocolBufferException; import com.riftcat.vridge.api.client.java.APIClient; import com.riftcat.vridge.api.client.java.Capabilities; import com.riftcat.vridge.api.client.java.EndpointNames; import com.riftcat.vridge.api.client.java.control.ControlResponseCode; import com.riftcat.vridge.api.client.java.control.responses.APIStatus; import com.riftcat.vridge.api.client.java.control.responses.EndpointStatus; import com.riftcat.vridge.api.client.java.proto.Beacon; import com.riftcat.vridge.api.client.java.proto.BeaconOrigin; import com.riftcat.vridge.api.client.java.proto.HapticPulse; import com.riftcat.vridge.api.client.java.proxy.BroadcastProxy; import com.riftcat.vridge.api.client.java.proxy.ControllerProxy; import com.riftcat.vridge.api.client.java.proxy.HeadTrackingProxy; import com.riftcat.vridge.api.client.java.proxy.IBroadcastListener; import com.riftcat.vridge.api.client.java.remotes.beacons.VridgeServerBeacon; import com.riftcat.vridge.api.client.java.remotes.beacons.VridgeServerBeaconList; import com.riftcat.vridge.api.client.java.utils.APILogger; import org.zeromq.ZBeacon; import java.net.InetAddress; import java.util.ArrayList; import java.util.EnumSet; import java.util.List; public class VridgeRemote { private static DiscoveryClient discovery; // User config private final int reconnectFrequencyMs; private final int timeoutThresholdMs; private final EnumSet<Capabilities> capabilities; private final String appName; // Access objects private APIClient api; private ControllerRemote controller; private HeadRemote head; private BroadcastProxy broadcasts; // State private long lastConnectionAttempt; private boolean isDisposed = false; private List<IBroadcastListener> eventListeners = new ArrayList<IBroadcastListener>(); static{ discovery = new DiscoveryClient(); } public VridgeRemote( String serverIp, String appName, EnumSet<Capabilities> capabilities, int reconnectFrequencyMs, int timeoutThresholdMs) { this.reconnectFrequencyMs = reconnectFrequencyMs; this.timeoutThresholdMs = timeoutThresholdMs; this.capabilities = capabilities; this.appName = appName; api = new APIClient(serverIp, appName); } /** * Returns list of currently running API servers in reachable networks * along with their IP addresses and identification information. */ public static List<VridgeServerBeacon> getVridgeServers(){ return discovery.getFreshServers(); } /** Adds subscriber that will receive haptic feedback events from VRidge. * Possibly more in the future. */ public void addEventListener(IBroadcastListener listener){ eventListeners.add(listener); } /** Removes event listener added by addEventListener. */ public void removeEventListener(IBroadcastListener listener){ eventListeners.remove(listener); } public synchronized APIStatus getStatus(){ try { return api.GetStatus(); } catch (Exception e) { return null; } } /** * Returns controller remote if connection seems valid. Will return null if connection is not in healthy state. * Always check for null as a indicator of connection state. */ public ControllerRemote getController() { if (isDisposed){ throw new IllegalStateException("You already disposed this remote. Create new one."); } synchronized (this) { if (controller != null && !controller.isDisposed()) { return controller; } if (lastConnectionAttempt + reconnectFrequencyMs < System.currentTimeMillis()) { return trySettingUpConnection() ? controller : null; } return null; } } /** * Returns head remote if connection seems valid. Will return null if connection is not in healthy state. * Always check for null as a indicator of connection state. */ public HeadRemote getHead() { if (isDisposed){ throw new IllegalStateException("You already disposed this remote. Create new one."); } synchronized (this) { if (head != null && !head.isDisposed()) { return head; } if (lastConnectionAttempt + reconnectFrequencyMs < System.currentTimeMillis()) { return trySettingUpConnection() ? head : null; } return null; } } /** Checks if current instance of VRidgeRemote can communicate with API * without establishing permanent connection. * Will timeout after 1 second without response. */ public VridgeRemoteConnectionStatus preCheck(){ synchronized (this){ try{ // First, check if we can even query status APIStatus status = api.GetStatus(); if (status == null){ return VridgeRemoteConnectionStatus.Unreachable; } // Then verify if headTracking is available if we want it if (capabilities.contains(Capabilities.HeadTracking)){ EndpointStatus endpoint = status.findEndpoint(EndpointNames.HeadTracking); if (endpoint == null){ // This shouldn't happen, ever return VridgeRemoteConnectionStatus.UnexpectedError; } if (endpoint.Code == ControlResponseCode.InUse){ return VridgeRemoteConnectionStatus.InUse; } } // And do the same for controllers; this can be refactored (DRY) if (capabilities.contains(Capabilities.Controllers)){ EndpointStatus endpoint = status.findEndpoint(EndpointNames.Controller); if (endpoint == null){ return VridgeRemoteConnectionStatus.UnexpectedError; } if (endpoint.Code == ControlResponseCode.InUse){ return VridgeRemoteConnectionStatus.InUse; } } return VridgeRemoteConnectionStatus.Okay; } catch (Exception e){ // Discard it and return unknown error. // API clients can use lower level access to read actual errors instead of VRidge remote which is fire-and-forget api. return VridgeRemoteConnectionStatus.UnexpectedError; } } } /** Clears all active connections. This object cannot be used after disposing */ public void dispose(){ synchronized (this){ isDisposed = true; eventListeners.clear(); disconnectAllEndpoints(); discovery.dispose(); } } /** Sets up connection and returns true if all links are established. */ private boolean trySettingUpConnection() { lastConnectionAttempt = System.currentTimeMillis(); // Make sure API server is alive APIStatus status; try{ status = api.GetStatus(); } catch (Exception x){ return false; } // Reset current connections, if exist disconnectAllEndpoints(); try { if (capabilities.contains(Capabilities.HeadTracking)) { EndpointStatus endpointStatus = status.findEndpoint(EndpointNames.HeadTracking); if (endpointStatus == null || endpointStatus.Code != ControlResponseCode.OK) { return false; } head = new HeadRemote((HeadTrackingProxy)api.getProxy(APIClient.HEADTRACKING)); } if (capabilities.contains(Capabilities.Controllers)) { EndpointStatus endpointStatus = status.findEndpoint(EndpointNames.Controller); if (endpointStatus == null){ return false; } controller = new ControllerRemote((ControllerProxy) api.getProxy(APIClient.CONTROLLER)); } // Subscribe to haptic pulse broadcasts, if controller proxy is in use if (controller != null){ broadcasts = api.getProxy(APIClient.BROADCASTS); // Forward the events to long-lived event so API user doesn't // have to resubscribe on reconnect broadcasts.addListener(new IBroadcastListener() { @Override public void onHapticPulse(HapticPulse pulse) { for (IBroadcastListener listener : eventListeners) { listener.onHapticPulse(pulse); } } }); broadcasts.startPolling(); } return true; } catch (Exception x) { APILogger.error("Error during API connection: " + x); // Cleanup possibly connected endpoints if(controller != null) controller.dispose(); if(head != null) head.dispose(); controller = null; head = null; } return false; } private void disconnectAllEndpoints(){ if(head != null) head.dispose(); if(controller != null) controller.dispose(); if(broadcasts != null) broadcasts.disconnect(); head = null; controller = null; } }
9,883
33.439024
134
java
vridge-api
vridge-api-master/src/java/VRE.Vridge.API/VRE.Vridge.API.Client/src/main/java/com/riftcat/vridge/api/client/java/remotes/VridgeRemoteConnectionStatus.java
package com.riftcat.vridge.api.client.java.remotes; public enum VridgeRemoteConnectionStatus { /** Connected or will be auto-connected on first call */ Okay, Unreachable, InUse, UnexpectedError }
218
18.909091
60
java
vridge-api
vridge-api-master/src/java/VRE.Vridge.API/VRE.Vridge.API.Client/src/main/java/com/riftcat/vridge/api/client/java/remotes/beacons/VridgeServerBeacon.java
package com.riftcat.vridge.api.client.java.remotes.beacons; import com.riftcat.vridge.api.client.java.proto.Beacon; import java.net.InetAddress; public class VridgeServerBeacon { private static long timeoutMs = 5000; private Beacon beacon; private InetAddress endpoint; private long timestmapMs; public VridgeServerBeacon(Beacon beacon, InetAddress endpoint) { this.beacon = beacon; this.endpoint = endpoint; timestmapMs = System.currentTimeMillis(); } public Beacon getBeacon() { return beacon; } public InetAddress getEndpoint() { return endpoint; } public boolean isFresh(){ return timestmapMs + timeoutMs > System.currentTimeMillis(); } @Override public String toString() { return beacon.getRole() + "|" + beacon.getHumanReadableVersion() + "|" + beacon.getMachineName() + "|" + beacon.getUserName() + "@" + endpoint.getHostAddress(); } }
1,032
24.195122
68
java
vridge-api
vridge-api-master/src/java/VRE.Vridge.API/VRE.Vridge.API.Client/src/main/java/com/riftcat/vridge/api/client/java/remotes/beacons/VridgeServerBeaconList.java
package com.riftcat.vridge.api.client.java.remotes.beacons; import com.riftcat.vridge.api.client.java.proto.Beacon; import java.net.InetAddress; import java.util.ArrayList; import java.util.Dictionary; import java.util.HashMap; import java.util.LinkedList; import java.util.List; public class VridgeServerBeaconList { private HashMap<InetAddress, VridgeServerBeacon> timedList = new HashMap<InetAddress, VridgeServerBeacon>(); public synchronized void add(Beacon beacon, InetAddress endpoint){ if(timedList.containsKey(endpoint)){ timedList.remove(endpoint); } timedList.put(endpoint, new VridgeServerBeacon(beacon, endpoint)); } public synchronized List<VridgeServerBeacon> getFreshServers(){ LinkedList<VridgeServerBeacon> beacons = new LinkedList<VridgeServerBeacon>(); for (VridgeServerBeacon vridgeServerBeacon : timedList.values()) { if(vridgeServerBeacon.isFresh()){ beacons.add(vridgeServerBeacon); } } return beacons; } }
1,066
30.382353
112
java
vridge-api
vridge-api-master/src/java/VRE.Vridge.API/VRE.Vridge.API.Client/src/main/java/com/riftcat/vridge/api/client/java/utils/APILogger.java
package com.riftcat.vridge.api.client.java.utils; import java.util.LinkedList; public class APILogger { private static LinkedList<ILog> loggers = new LinkedList<ILog>(); public static void AddLogListener(ILog logger){ loggers.add(logger); } public static void debug(String s){ for (ILog log : loggers) { log.debug(s); } } public static void zmq(String s) { //debug("ZMQ: " + s); } public static void error(String s) { for (ILog log : loggers) { log.error(s); } } }
580
19.034483
69
java
vridge-api
vridge-api-master/src/java/VRE.Vridge.API/VRE.Vridge.API.Client/src/main/java/com/riftcat/vridge/api/client/java/utils/ButtonMask.java
package com.riftcat.vridge.api.client.java.utils; public class ButtonMask{ private static int k_EButton_System = 0; private static int k_EButton_ApplicationMenu = 1; private static int k_EButton_Grip = 2; private static int k_EButton_DPad_Left = 3; private static int k_EButton_DPad_Up = 4; private static int k_EButton_DPad_Right = 5; private static int k_EButton_DPad_Down = 6; private static int k_EButton_A = 7; private static int k_EButton_ProximitySensor = 31; private static int k_EButton_Axis0 = 32; private static int k_EButton_Axis1 = 33; private static int k_EButton_Axis2 = 34; private static int k_EButton_Axis3 = 35; private static int k_EButton_Axis4 = 36; // aliases for well known controllers private static int k_EButton_SteamVR_Touchpad = k_EButton_Axis0; private static int k_EButton_SteamVR_Trigger = k_EButton_Axis1; private static int k_EButton_Dashboard_Back = k_EButton_Grip; private static int k_EButton_Max = 64; public static long System = (1L << (int)k_EButton_System); public static long ApplicationMenu = (1L << (int)k_EButton_ApplicationMenu); public static long Grip = (1L << (int)k_EButton_Grip); public static long Axis0 = (1L << (int)k_EButton_Axis0); public static long Axis1 = (1L << (int)k_EButton_Axis1); public static long Axis2 = (1L << (int)k_EButton_Axis2); public static long Axis3 = (1L << (int)k_EButton_Axis3); public static long Axis4 = (1L << (int)k_EButton_Axis4); public static long Touchpad = (1L << (int)k_EButton_SteamVR_Touchpad); public static long Trigger = (1L << (int)k_EButton_SteamVR_Trigger); }
1,679
43.210526
80
java
vridge-api
vridge-api-master/src/java/VRE.Vridge.API/VRE.Vridge.API.Client/src/main/java/com/riftcat/vridge/api/client/java/utils/ILog.java
package com.riftcat.vridge.api.client.java.utils; public interface ILog { void debug(String s); void error(String s); }
129
17.571429
49
java
vridge-api
vridge-api-master/src/java/VRE.Vridge.API/VRE.Vridge.API.Client/src/main/java/com/riftcat/vridge/api/client/java/utils/SerializationUtils.java
package com.riftcat.vridge.api.client.java.utils; import com.google.protobuf.ByteString; import java.nio.ByteBuffer; import java.nio.ByteOrder; public class SerializationUtils { public static ByteString byteStringFromFloats(float...args){ return ByteString.copyFrom(byteArrayFromFloats(args)); } public static byte[] byteArrayFromFloats(float... args){ ByteBuffer data = ByteBuffer.allocate(args.length * 4); data.order(ByteOrder.LITTLE_ENDIAN); for (float arg : args) { data.putFloat(arg); } return data.array(); } public static ByteString byteStringFromFloatArray(float[] array){ ByteBuffer data = ByteBuffer.allocate(array.length * 4); data.order(ByteOrder.LITTLE_ENDIAN); for (float arg : array) { data.putFloat(arg); } return ByteString.copyFrom(data.array()); } }
914
25.911765
69
java
vridge-api
vridge-api-master/src/java/VRE.Vridge.API/VRE.Vridge.API.Client/src/main/java/com/riftcat/vridge/api/client/java/utils/SocketHelpers.java
package com.riftcat.vridge.api.client.java.utils; import com.google.gson.Gson; import org.zeromq.ZMQ; public class SocketHelpers { public static Gson Serializer; static{ Serializer = new Gson(); } public static boolean SendAsJson(ZMQ.Socket socket, Object obj){ String json = Serializer.toJson(obj); return socket.send(json); } public static <T> T ReceiveByJson(ZMQ.Socket socket, Class<T> type){ String json = socket.recvStr(); Object obj = Serializer.fromJson(json, type); return (T) obj; } }
581
19.068966
72
java
dl4el
dl4el-master/jrk/java/FreebaseE2W.java
package jrk.java; import java.io.*; import java.util.*; import gnu.trove.map.hash.*; import gnu.trove.map.*; import gnu.trove.set.hash.*; import gnu.trove.set.*; import gnu.trove.iterator.*; public class FreebaseE2W { static int nHop = 0; static StrIntMap dict = null; static StrIntMap entId = null; static TIntObjectHashMap<TIntHashSet> curE2WId = null; static TIntObjectHashMap<TIntHashSet> preE2WId = null; static final String prefix = "http://rdf.freebase.com/ns/"; public static String clean(String str) { if (str.startsWith("<")) str = str.substring(1, str.length() - 1); if (str.startsWith(prefix)) return str.substring(prefix.length()); else return null; } public static TIntHashSet getWords(String str, TIntHashSet ret) { String[] words = str.split("[_ ./]"); if (ret == null) ret = new TIntHashSet(); if (words.length < 20) { for (String w: words) { int id = dict.add(w); ret.add(id); } } return ret; } public static void loadWordId(String dir) { dict = new StrIntMap(); try { System.out.println("loading words from " + dir + "/wordId.ser"); File f = new File(dir + "/wordId.ser"); if (f.exists()) { ObjectInputStream ois = new ObjectInputStream(new FileInputStream(f)); dict = (StrIntMap) ois.readObject(); } System.out.format("total %d words\n", dict.size()); } catch(Exception e) { e.printStackTrace(); } } public static void loadPreHopE2W(String dir, String fname) { preE2WId = new TIntObjectHashMap<>(); entId = new StrIntMap(); dict = new StrIntMap(); try { System.out.println("loading words from " + dir + "/wordId.ser"); File f = new File(dir + "/wordId.ser"); if (f.exists()) { ObjectInputStream ois = new ObjectInputStream(new FileInputStream(f)); dict = (StrIntMap) ois.readObject(); } System.out.format("total %d words\n", dict.size()); System.out.println("loading prehopE2W from " + dir + "/" + fname); BufferedReader br = new BufferedReader(new FileReader(dir + "/" + fname)); int count = 0; for (String line; (line = br.readLine()) != null; ) { String[] strs = line.split("\t"); Integer eId = entId.add(strs[0]); preE2WId.put(eId, getWords(strs[1], null)); count++; if (count % (int)1e6 == 0) { System.out.print(Integer.toString(count) + "\r"); //break; } } System.out.format("total %d freebase entities\n", preE2WId.size()); } catch(Exception e) { e.printStackTrace(); } } public static void nextHop(String path) { try { curE2WId = new TIntObjectHashMap<>(); BufferedReader br = new BufferedReader(new FileReader(path)); StrIntMap relId = new StrIntMap(); TIntObjectHashMap<TIntHashSet> r2wId = new TIntObjectHashMap<>(); int count = 0; for (String line; (line = br.readLine()) != null; ) { String[] strs = line.split("\t"); String hstr = clean(strs[0]); String tstr = clean(strs[2]); String rstr = clean(strs[1]); if (hstr == null || tstr == null || rstr == null) continue; int h = entId.str2int.get(hstr); int t = entId.str2int.get(tstr); if (h == entId.str2int.getNoEntryValue() || t == entId.str2int.getNoEntryValue()) continue; TIntHashSet hwords = preE2WId.get(h); TIntHashSet twords = preE2WId.get(t); int r = relId.str2int.get(rstr); TIntHashSet rwords = null; if (r == entId.str2int.getNoEntryValue()) { r = relId.add(rstr); rwords = getWords(rstr, null); r2wId.put(r, rwords); } else rwords = r2wId.get(r); boolean isTypeInstance = rstr.endsWith("type.type.instance"); boolean isObjType = rstr.endsWith("type.object.type") || rstr.endsWith("prominent_type"); // for h { TIntHashSet words = curE2WId.get(h); if (words == null) { words = new TIntHashSet(); curE2WId.put(h, words); } if (!isTypeInstance) { words.addAll(twords); words.addAll(rwords); } } // for t { TIntHashSet words = curE2WId.get(t); if (words == null) { words = new TIntHashSet(); curE2WId.put(t, words); } if (!isObjType) { words.addAll(hwords); } if (!(isObjType || isTypeInstance)) words.addAll(rwords); } count++; if (count % (int)1e6 == 0) { System.out.print(Integer.toString(count) + "\r"); //break; } } } catch(Exception e) { e.printStackTrace(); } } public static void printRelWords(String path) { try { BufferedReader br = new BufferedReader(new FileReader(path)); int count = 0; Set<String> allWords = new HashSet<>(); for (String line; (line = br.readLine()) != null; ) { String[] strs = line.split("\t"); String rstr = clean(strs[1]); if (rstr != null) { String[] words = rstr.split("[_ ./]"); for (String w: words) allWords.add(w); } count++; if (count % 1000000 == 0) System.out.print(count + "\r"); } BufferedWriter bw = new BufferedWriter(new FileWriter("relWords.txt")); for (String w: allWords) bw.write(w + "\n"); bw.close(); } catch(Exception e) { e.printStackTrace(); } } public static void printEntType(String path) { try { BufferedReader br = new BufferedReader(new FileReader(path)); BufferedWriter bw = new BufferedWriter(new FileWriter("freebase-ent-type.txt")); Map<String, List<String>> e2cList = new HashMap<>(); int count = 0; for (String line; (line = br.readLine()) != null; ) { String[] strs = line.split("\t"); if (strs[1].endsWith("type.instance>")) { count++; if (count % 1000 == 0) { System.out.print(count + "\r"); //break; } String hstr = clean(strs[0]); String tstr = clean(strs[2]); List<String> cList = e2cList.get(tstr); if (cList == null) { cList = new ArrayList<>(); e2cList.put(tstr, cList); } cList.add(hstr); } } for (Map.Entry<String, List<String>> item: e2cList.entrySet()) { bw.write(item.getKey() + "\t"); for (String cat: item.getValue()) bw.write(cat + " "); bw.write("\n"); } bw.close(); } catch(Exception e) { e.printStackTrace(); } } public static void combineFilesNextHop(String dir, int n) { try { curE2WId = new TIntObjectHashMap<>(); for (int i = 0; i < n ; i++) { String ppath = dir + String.format("/e2w_%02d.ser", i); System.out.println("merging with " + ppath); File f = new File(ppath); if (!f.exists()) { System.out.println("STOP"); break; } ObjectInputStream ois = new ObjectInputStream(new FileInputStream(f)); TIntObjectHashMap curE2WId_i = (TIntObjectHashMap) ois.readObject(); for (TIntObjectIterator<TIntHashSet> iter = curE2WId_i.iterator(); iter.hasNext(); ) { iter.advance(); int eId = iter.key(); TIntHashSet ws_i = iter.value(); TIntHashSet ws = curE2WId.get(eId); if (ws == null) curE2WId.put(eId, ws_i); else ws.addAll(ws_i); } } } catch (Exception e) { e.printStackTrace(); } } public static void saveWordIdTxt(String dir) { try { System.out.println("write wordId"); BufferedWriter bw = new BufferedWriter(new FileWriter(dir + "/wordId.txt")); for (TObjectIntIterator<String> iter = dict.str2int.iterator(); iter.hasNext(); ) { iter.advance(); String word = iter.key(); bw.write(word + "\n"); } bw.close(); } catch (Exception e) { e.printStackTrace(); } } public static void saveToFile(String dir, String fname, String format) { try { // write dict using ser System.out.println("save wordId to " + dir + "/wordId.ser"); ObjectOutputStream oos = new ObjectOutputStream(new FileOutputStream(dir + "/wordId.ser")); oos.writeObject(dict); if (format.equals("txt")) { System.out.println("write e2w"); BufferedWriter bw = new BufferedWriter(new FileWriter(dir + "/" + fname)); for (TIntObjectIterator<TIntHashSet> iter = curE2WId.iterator(); iter.hasNext(); ) { iter.advance(); int eId = iter.key(); TIntHashSet ws = iter.value(); bw.write(entId.int2str.get(eId) + "\t"); for (TIntIterator wIter = ws.iterator(); wIter.hasNext(); ) { int wId = wIter.next(); bw.write(dict.int2str.get(wId) + " "); } bw.write("\n"); } bw.close(); } else if (format.equals("ser")) { System.out.println("save e2w to " + dir + "/" + fname); oos = new ObjectOutputStream(new FileOutputStream(dir + "/" + fname)); oos.writeObject(curE2WId); } } catch (Exception e) { e.printStackTrace(); } } public static void main(String[] args) { String mode = args[0]; if (mode.equals("process")) { int portionId = Integer.parseInt(args[1]); String dir = "data/freebase/"; String firstHopE2W_fname = "freebase-entity.txt"; System.out.println("loading entities from " + dir + firstHopE2W_fname); loadPreHopE2W(dir, firstHopE2W_fname); String fbPath = dir + String.format("freebase-rdf-%02d", portionId); System.out.println("get words from freebase " + fbPath); nextHop(fbPath); String format = "ser"; String fname = String.format("e2w_%02d.%s", portionId, format); saveToFile(dir, fname, format); } else if (mode.equals("combine")) { int nPortion = Integer.parseInt(args[1]); String dir = "data/freebase/"; String firstHopE2W_fname = "freebase-entity.txt"; System.out.println("loading entities from " + dir + firstHopE2W_fname); loadPreHopE2W(dir, firstHopE2W_fname); String fname = "e2w.txt"; combineFilesNextHop(dir, nPortion); saveToFile(dir, fname, "txt"); } else if (mode.equals("print_wordId")) { String dir = "data/freebase/"; System.out.println("load wordId"); loadWordId(dir); System.out.println("save to file"); saveWordIdTxt(dir); } else if (mode.equals("print_relWords")) { String fbPath = "../freebase2tacred/data/freebase-rdf-latest"; System.out.println("process" + fbPath); printRelWords(fbPath); } else if (mode.equals("print_ent_type")) { String fbPath = "../freebase2tacred/data/freebase-rdf-latest"; System.out.println("process" + fbPath); printEntType(fbPath); } } }
13,444
34.474934
105
java
dl4el
dl4el-master/jrk/java/FreebaseTriples.java
package jrk.java; import java.io.*; import java.util.*; import gnu.trove.map.hash.*; import gnu.trove.map.*; import gnu.trove.set.hash.*; import gnu.trove.set.*; import gnu.trove.iterator.*; public class FreebaseTriples { static Set<String> entSet = null; static final String prefix = "http://rdf.freebase.com/ns/"; public static String clean(String str) { if (str.startsWith("<")) str = str.substring(1, str.length() - 1); if (str.startsWith(prefix)) return str.substring(prefix.length()); else return null; } public static void loadEntSet(String path) { entSet = new HashSet<>(); try { System.out.println("loading ent from " + path); BufferedReader br = new BufferedReader(new FileReader(path)); int count = 0; for (String line; (line = br.readLine()) != null; ) { String[] strs = line.split("\t"); if (strs.length != 2){ System.out.println(line); continue; } entSet.add(strs[0]); count++; if (count % (int)1e6 == 0) { System.out.print(Integer.toString(count) + "\r"); //break; } } System.out.format("total %d freebase entities\n", entSet.size()); } catch(Exception e) { e.printStackTrace(); } } public static void getTriples(String fbPath, String outPath) { try { BufferedReader br = new BufferedReader(new FileReader(fbPath)); BufferedWriter bw = new BufferedWriter(new FileWriter(outPath)); int count = 0; int nTriples = 0; for (String line; (line = br.readLine()) != null; ) { String[] strs = line.split("\t"); count++; if (count % (int)1e6 == 0) { System.out.print(String.format("%15d\t%15d", count, nTriples) + "\r"); //break; } String hstr = clean(strs[0]); String tstr = clean(strs[2]); String rstr = clean(strs[1]); if (hstr == null || tstr == null || rstr == null) continue; if (!entSet.contains(hstr) || !entSet.contains(tstr)) continue; bw.write(hstr + "\t" + rstr + "\t" + tstr + "\n"); nTriples++; } } catch(Exception e) { e.printStackTrace(); } } public static void main(String[] args) { { String entPath = "data/freebase/freebase-entity.txt"; System.out.println("loading entities from " + entPath); loadEntSet(entPath); String fbPath = "../freebase2tacred/data/freebase-rdf-latest"; String outPath = "data/freebase/freebase-triples.txt"; System.out.println("get triples from freebase " + fbPath); getTriples(fbPath, outPath); } } }
3,136
31.010204
90
java
dl4el
dl4el-master/jrk/java/Pair.java
package jrk.java; import java.io.Serializable; public class Pair<A, B> implements Serializable { public final A fst; public final B snd; public Pair(A fst, B snd) { this.fst = fst; this.snd = snd; } public A getKey() { return fst; } public B getValue() { return snd; } public String toString() { return "Pair[" + fst + "," + snd + "]"; } private static boolean equals(Object x, Object y) { return (x == null && y == null) || (x != null && x.equals(y)); } public boolean equals(Object other) { return other instanceof Pair<?,?> && equals(fst, ((Pair<?,?>)other).fst) && equals(snd, ((Pair<?,?>)other).snd); } public int hashCode() { if (fst == null) return (snd == null) ? 0 : snd.hashCode() + 1; else if (snd == null) return fst.hashCode() + 2; else return fst.hashCode() * 17 + snd.hashCode(); } public static <A,B> Pair<A,B> of(A a, B b) { return new Pair<A,B>(a,b); } }
1,110
21.22
71
java