repo
stringlengths
1
191
file
stringlengths
23
351
code
stringlengths
0
5.32M
file_length
int64
0
5.32M
avg_line_length
float64
0
2.9k
max_line_length
int64
0
288k
extension_type
stringclasses
1 value
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ReservationDefinition.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.api.records; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Evolving; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.util.Records; /** * {@link ReservationDefinition} captures the set of resource and time * constraints the user cares about regarding a reservation. * * @see ResourceRequest * */ @Public @Unstable public abstract class ReservationDefinition { @Public @Unstable public static ReservationDefinition newInstance(long arrival, long deadline, ReservationRequests reservationRequests, String name) { ReservationDefinition rDefinition = Records.newRecord(ReservationDefinition.class); rDefinition.setArrival(arrival); rDefinition.setDeadline(deadline); rDefinition.setReservationRequests(reservationRequests); rDefinition.setReservationName(name); return rDefinition; } /** * Get the arrival time or the earliest time from which the resource(s) can be * allocated. Time expressed as UTC. * * @return the earliest valid time for this reservation */ @Public @Unstable public abstract long getArrival(); /** * Set the arrival time or the earliest time from which the resource(s) can be * allocated. Time expressed as UTC. * * @param earliestStartTime the earliest valid time for this reservation */ @Public @Unstable public abstract void setArrival(long earliestStartTime); /** * Get the deadline or the latest time by when the resource(s) must be * allocated. Time expressed as UTC. * * @return the deadline or the latest time by when the resource(s) must be * allocated */ @Public @Unstable public abstract long getDeadline(); /** * Set the deadline or the latest time by when the resource(s) must be * allocated. Time expressed as UTC. * * @param latestEndTime the deadline or the latest time by when the * resource(s) should be allocated */ @Public @Unstable public abstract void setDeadline(long latestEndTime); /** * Get the list of {@link ReservationRequests} representing the resources * required by the application * * @return the list of {@link ReservationRequests} */ @Public @Unstable public abstract ReservationRequests getReservationRequests(); /** * Set the list of {@link ReservationRequests} representing the resources * required by the application * * @param reservationRequests the list of {@link ReservationRequests} */ @Public @Unstable public abstract void setReservationRequests( ReservationRequests reservationRequests); /** * Get the name for this reservation. The name need-not be unique, and it is * just a mnemonic for the user (akin to job names). Accepted reservations are * uniquely identified by a system-generated ReservationId. * * @return string representing the name of the corresponding reserved resource * allocation in the scheduler */ @Public @Evolving public abstract String getReservationName(); /** * Set the name for this reservation. The name need-not be unique, and it is * just a mnemonic for the user (akin to job names). Accepted reservations are * uniquely identified by a system-generated ReservationId. * * @param name representing the name of the corresponding reserved resource * allocation in the scheduler */ @Public @Evolving public abstract void setReservationName(String name); }
4,430
31.108696
80
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LocalResourceVisibility.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.api.records; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Stable; import org.apache.hadoop.yarn.api.ContainerManagementProtocol; /** * {@code LocalResourceVisibility} specifies the <em>visibility</em> * of a resource localized by the {@code NodeManager}. * <p> * The <em>visibility</em> can be one of: * <ul> * <li>{@link #PUBLIC} - Shared by all users on the node.</li> * <li> * {@link #PRIVATE} - Shared among all applications of the * <em>same user</em> on the node. * </li> * <li> * {@link #APPLICATION} - Shared only among containers of the * <em>same application</em> on the node. * </li> * </ul> * * @see LocalResource * @see ContainerLaunchContext * @see ApplicationSubmissionContext * @see ContainerManagementProtocol#startContainers(org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest) */ @Public @Stable public enum LocalResourceVisibility { /** * Shared by all users on the node. */ PUBLIC, /** * Shared among all applications of the <em>same user</em> on the node. */ PRIVATE, /** * Shared only among containers of the <em>same application</em> on the node. */ APPLICATION }
2,112
31.507692
118
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueUserACLInfo.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.api.records; import java.util.List; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Stable; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.ApplicationClientProtocol; import org.apache.hadoop.yarn.util.Records; /** * <p><code>QueueUserACLInfo</code> provides information {@link QueueACL} for * the given user.</p> * * @see QueueACL * @see ApplicationClientProtocol#getQueueUserAcls(org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest) */ @Public @Stable public abstract class QueueUserACLInfo { @Private @Unstable public static QueueUserACLInfo newInstance(String queueName, List<QueueACL> acls) { QueueUserACLInfo info = Records.newRecord(QueueUserACLInfo.class); info.setQueueName(queueName); info.setUserAcls(acls); return info; } /** * Get the <em>queue name</em> of the queue. * @return <em>queue name</em> of the queue */ @Public @Stable public abstract String getQueueName(); @Private @Unstable public abstract void setQueueName(String queueName); /** * Get the list of <code>QueueACL</code> for the given user. * @return list of <code>QueueACL</code> for the given user */ @Public @Stable public abstract List<QueueACL> getUserAcls(); @Private @Unstable public abstract void setUserAcls(List<QueueACL> acls); }
2,369
30.6
122
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceOption.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.api.records; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Evolving; import org.apache.hadoop.yarn.util.Records; @Public @Evolving public abstract class ResourceOption { public static ResourceOption newInstance(Resource resource, int overCommitTimeout){ ResourceOption resourceOption = Records.newRecord(ResourceOption.class); resourceOption.setResource(resource); resourceOption.setOverCommitTimeout(overCommitTimeout); resourceOption.build(); return resourceOption; } /** * Get the <em>resource</em> of the ResourceOption. * @return <em>resource</em> of the ResourceOption */ @Private @Evolving public abstract Resource getResource(); @Private @Evolving protected abstract void setResource(Resource resource); /** * Get timeout for tolerant of resource over-commitment * Note: negative value means no timeout so that allocated containers will * keep running until the end even under resource over-commitment cases. * @return <em>overCommitTimeout</em> of the ResourceOption */ @Private @Evolving public abstract int getOverCommitTimeout(); @Private @Evolving protected abstract void setOverCommitTimeout(int overCommitTimeout); @Private @Evolving protected abstract void build(); @Override public String toString() { return "Resource:" + getResource().toString() + ", overCommitTimeout:" + getOverCommitTimeout(); } }
2,434
31.039474
76
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerExitStatus.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.api.records; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Unstable; /** * Container exit statuses indicating special exit circumstances. */ @Public @Unstable public class ContainerExitStatus { public static final int SUCCESS = 0; public static final int INVALID = -1000; /** * Containers killed by the framework, either due to being released by * the application or being 'lost' due to node failures etc. */ public static final int ABORTED = -100; /** * When threshold number of the nodemanager-local-directories or * threshold number of the nodemanager-log-directories become bad. */ public static final int DISKS_FAILED = -101; /** * Containers preempted by the framework. */ public static final int PREEMPTED = -102; /** * Container terminated because of exceeding allocated virtual memory. */ public static final int KILLED_EXCEEDED_VMEM = -103; /** * Container terminated because of exceeding allocated physical memory. */ public static final int KILLED_EXCEEDED_PMEM = -104; /** * Container was terminated by stop request by the app master. */ public static final int KILLED_BY_APPMASTER = -105; /** * Container was terminated by the resource manager. */ public static final int KILLED_BY_RESOURCEMANAGER = -106; /** * Container was terminated after the application finished. */ public static final int KILLED_AFTER_APP_COMPLETION = -107; }
2,355
30
74
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/YarnApplicationState.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.api.records; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Stable; /** * Enumeration of various states of an <code>ApplicationMaster</code>. */ @Public @Stable public enum YarnApplicationState { /** Application which was just created. */ NEW, /** Application which is being saved. */ NEW_SAVING, /** Application which has been submitted. */ SUBMITTED, /** Application has been accepted by the scheduler */ ACCEPTED, /** Application which is currently running. */ RUNNING, /** Application which finished successfully. */ FINISHED, /** Application which failed. */ FAILED, /** Application which was terminated by a user or admin. */ KILLED }
1,585
28.37037
74
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/URL.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.api.records; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Stable; import org.apache.hadoop.yarn.util.Records; /** * <p><code>URL</code> represents a serializable {@link java.net.URL}.</p> */ @Public @Stable public abstract class URL { @Public @Stable public static URL newInstance(String scheme, String host, int port, String file) { URL url = Records.newRecord(URL.class); url.setScheme(scheme); url.setHost(host); url.setPort(port); url.setFile(file); return url; } /** * Get the scheme of the URL. * @return scheme of the URL */ @Public @Stable public abstract String getScheme(); /** * Set the scheme of the URL * @param scheme scheme of the URL */ @Public @Stable public abstract void setScheme(String scheme); /** * Get the user info of the URL. * @return user info of the URL */ @Public @Stable public abstract String getUserInfo(); /** * Set the user info of the URL. * @param userInfo user info of the URL */ @Public @Stable public abstract void setUserInfo(String userInfo); /** * Get the host of the URL. * @return host of the URL */ @Public @Stable public abstract String getHost(); /** * Set the host of the URL. * @param host host of the URL */ @Public @Stable public abstract void setHost(String host); /** * Get the port of the URL. * @return port of the URL */ @Public @Stable public abstract int getPort(); /** * Set the port of the URL * @param port port of the URL */ @Public @Stable public abstract void setPort(int port); /** * Get the file of the URL. * @return file of the URL */ @Public @Stable public abstract String getFile(); /** * Set the file of the URL. * @param file file of the URL */ @Public @Stable public abstract void setFile(String file); }
2,825
21.97561
84
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueState.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.api.records; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Stable; import org.apache.hadoop.yarn.api.ApplicationClientProtocol; /** * State of a Queue. * <p> * A queue is in one of: * <ul> * <li>{@link #RUNNING} - normal state.</li> * <li>{@link #STOPPED} - not accepting new application submissions.</li> * </ul> * * @see QueueInfo * @see ApplicationClientProtocol#getQueueInfo(org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest) */ @Public @Stable public enum QueueState { /** * Stopped - Not accepting submissions of new applications. */ STOPPED, /** * Running - normal operation. */ RUNNING }
1,567
31
110
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/AMCommand.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.api.records; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest; import org.apache.hadoop.yarn.exceptions.ApplicationAttemptNotFoundException; import org.apache.hadoop.yarn.exceptions.ApplicationMasterNotRegisteredException; /** * Command sent by the Resource Manager to the Application Master in the * AllocateResponse * @see AllocateResponse */ @Public @Unstable public enum AMCommand { /** * @deprecated Sent by Resource Manager when it is out of sync with the AM and * wants the AM get back in sync. * * Note: Instead of sending this command, * {@link ApplicationMasterNotRegisteredException} will be thrown * when ApplicationMaster is out of sync with ResourceManager and * ApplicationMaster is expected to re-register with RM by calling * {@link ApplicationMasterProtocol#registerApplicationMaster(RegisterApplicationMasterRequest)} */ AM_RESYNC, /** * @deprecated Sent by Resource Manager when it wants the AM to shutdown. * Note: This command was earlier sent by ResourceManager to * instruct AM to shutdown if RM had restarted. Now * {@link ApplicationAttemptNotFoundException} will be thrown in case * that RM has restarted and AM is supposed to handle this * exception by shutting down itself. */ AM_SHUTDOWN }
2,556
41.616667
110
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerResourceIncrease.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.api.records; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.yarn.util.Records; /** * Represent a new increased container accepted by Resource Manager */ public abstract class ContainerResourceIncrease { @Public public static ContainerResourceIncrease newInstance( ContainerId existingContainerId, Resource targetCapability, Token token) { ContainerResourceIncrease context = Records .newRecord(ContainerResourceIncrease.class); context.setContainerId(existingContainerId); context.setCapability(targetCapability); context.setContainerToken(token); return context; } @Public public abstract ContainerId getContainerId(); @Public public abstract void setContainerId(ContainerId containerId); @Public public abstract Resource getCapability(); @Public public abstract void setCapability(Resource capability); @Public public abstract Token getContainerToken(); @Public public abstract void setContainerToken(Token token); @Override public int hashCode() { return getCapability().hashCode() + getContainerId().hashCode(); } @Override public boolean equals(Object other) { if (other instanceof ContainerResourceIncrease) { ContainerResourceIncrease ctx = (ContainerResourceIncrease)other; if (getContainerId() == null && ctx.getContainerId() != null) { return false; } else if (!getContainerId().equals(ctx.getContainerId())) { return false; } if (getCapability() == null && ctx.getCapability() != null) { return false; } else if (!getCapability().equals(ctx.getCapability())) { return false; } return true; } else { return false; } } }
2,628
29.929412
80
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LocalResource.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.api.records; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Stable; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.ContainerManagementProtocol; import org.apache.hadoop.yarn.util.Records; /** * <p><code>LocalResource</code> represents a local resource required to * run a container.</p> * * <p>The <code>NodeManager</code> is responsible for localizing the resource * prior to launching the container.</p> * * <p>Applications can specify {@link LocalResourceType} and * {@link LocalResourceVisibility}.</p> * * @see LocalResourceType * @see LocalResourceVisibility * @see ContainerLaunchContext * @see ApplicationSubmissionContext * @see ContainerManagementProtocol#startContainers(org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest) */ @Public @Stable public abstract class LocalResource { @Public @Stable public static LocalResource newInstance(URL url, LocalResourceType type, LocalResourceVisibility visibility, long size, long timestamp, String pattern) { return newInstance(url, type, visibility, size, timestamp, pattern, false); } @Public @Unstable public static LocalResource newInstance(URL url, LocalResourceType type, LocalResourceVisibility visibility, long size, long timestamp, String pattern, boolean shouldBeUploadedToSharedCache) { LocalResource resource = Records.newRecord(LocalResource.class); resource.setResource(url); resource.setType(type); resource.setVisibility(visibility); resource.setSize(size); resource.setTimestamp(timestamp); resource.setPattern(pattern); resource.setShouldBeUploadedToSharedCache(shouldBeUploadedToSharedCache); return resource; } @Public @Stable public static LocalResource newInstance(URL url, LocalResourceType type, LocalResourceVisibility visibility, long size, long timestamp) { return newInstance(url, type, visibility, size, timestamp, null); } @Public @Unstable public static LocalResource newInstance(URL url, LocalResourceType type, LocalResourceVisibility visibility, long size, long timestamp, boolean shouldBeUploadedToSharedCache) { return newInstance(url, type, visibility, size, timestamp, null, shouldBeUploadedToSharedCache); } /** * Get the <em>location</em> of the resource to be localized. * @return <em>location</em> of the resource to be localized */ @Public @Stable public abstract URL getResource(); /** * Set <em>location</em> of the resource to be localized. * @param resource <em>location</em> of the resource to be localized */ @Public @Stable public abstract void setResource(URL resource); /** * Get the <em>size</em> of the resource to be localized. * @return <em>size</em> of the resource to be localized */ @Public @Stable public abstract long getSize(); /** * Set the <em>size</em> of the resource to be localized. * @param size <em>size</em> of the resource to be localized */ @Public @Stable public abstract void setSize(long size); /** * Get the original <em>timestamp</em> of the resource to be localized, used * for verification. * @return <em>timestamp</em> of the resource to be localized */ @Public @Stable public abstract long getTimestamp(); /** * Set the <em>timestamp</em> of the resource to be localized, used * for verification. * @param timestamp <em>timestamp</em> of the resource to be localized */ @Public @Stable public abstract void setTimestamp(long timestamp); /** * Get the <code>LocalResourceType</code> of the resource to be localized. * @return <code>LocalResourceType</code> of the resource to be localized */ @Public @Stable public abstract LocalResourceType getType(); /** * Set the <code>LocalResourceType</code> of the resource to be localized. * @param type <code>LocalResourceType</code> of the resource to be localized */ @Public @Stable public abstract void setType(LocalResourceType type); /** * Get the <code>LocalResourceVisibility</code> of the resource to be * localized. * @return <code>LocalResourceVisibility</code> of the resource to be * localized */ @Public @Stable public abstract LocalResourceVisibility getVisibility(); /** * Set the <code>LocalResourceVisibility</code> of the resource to be * localized. * @param visibility <code>LocalResourceVisibility</code> of the resource to be * localized */ @Public @Stable public abstract void setVisibility(LocalResourceVisibility visibility); /** * Get the <em>pattern</em> that should be used to extract entries from the * archive (only used when type is <code>PATTERN</code>). * @return <em>pattern</em> that should be used to extract entries from the * archive. */ @Public @Stable public abstract String getPattern(); /** * Set the <em>pattern</em> that should be used to extract entries from the * archive (only used when type is <code>PATTERN</code>). * @param pattern <em>pattern</em> that should be used to extract entries * from the archive. */ @Public @Stable public abstract void setPattern(String pattern); /** * NM uses it to decide whether if it is necessary to upload the resource to * the shared cache */ @Public @Unstable public abstract boolean getShouldBeUploadedToSharedCache(); /** * Inform NM whether upload to SCM is needed. * * @param shouldBeUploadedToSharedCache <em>shouldBeUploadedToSharedCache</em> * of this request */ @Public @Unstable public abstract void setShouldBeUploadedToSharedCache( boolean shouldBeUploadedToSharedCache); }
6,747
30.830189
118
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Priority.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.api.records; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Stable; import org.apache.hadoop.yarn.util.Records; /** * The priority assigned to a ResourceRequest or Application or Container * allocation * */ @Public @Stable public abstract class Priority implements Comparable<Priority> { public static final Priority UNDEFINED = newInstance(-1); @Public @Stable public static Priority newInstance(int p) { Priority priority = Records.newRecord(Priority.class); priority.setPriority(p); return priority; } /** * Get the assigned priority * @return the assigned priority */ @Public @Stable public abstract int getPriority(); /** * Set the assigned priority * @param priority the assigned priority */ @Public @Stable public abstract void setPriority(int priority); @Override public int hashCode() { final int prime = 517861; int result = 9511; result = prime * result + getPriority(); return result; } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (getClass() != obj.getClass()) return false; Priority other = (Priority) obj; if (getPriority() != other.getPriority()) return false; return true; } @Override public int compareTo(Priority other) { return other.getPriority() - this.getPriority(); } @Override public String toString() { return "{Priority: " + getPriority() + "}"; } }
2,432
25.445652
75
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LogAggregationStatus.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.api.records; import org.apache.hadoop.yarn.conf.YarnConfiguration; /** * <p>Status of Log aggregation.</p> */ public enum LogAggregationStatus { /** Log Aggregation is Disabled. */ DISABLED, /** Log Aggregation does not Start. */ NOT_START, /** Log Aggregation is Running. */ RUNNING, /** Log Aggregation is Running, but has failures in previous cycles. */ RUNNING_WITH_FAILURE, /** * Log Aggregation is Succeeded. All of the logs have been aggregated * successfully. */ SUCCEEDED, /** * Log Aggregation is completed. But at least one of the logs have not been * aggregated. */ FAILED, /** * The application is finished, but the log aggregation status is not updated * for a long time. * @see YarnConfiguration#LOG_AGGREGATION_STATUS_TIME_OUT_MS */ TIME_OUT }
1,668
27.775862
79
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerResourceDecrease.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.api.records; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.yarn.util.Records; /** * Used by Application Master to ask Node Manager reduce size of a specified * container */ public abstract class ContainerResourceDecrease { @Public public static ContainerResourceDecrease newInstance( ContainerId existingContainerId, Resource targetCapability) { ContainerResourceDecrease context = Records .newRecord(ContainerResourceDecrease.class); context.setContainerId(existingContainerId); context.setCapability(targetCapability); return context; } @Public public abstract ContainerId getContainerId(); @Public public abstract void setContainerId(ContainerId containerId); @Public public abstract Resource getCapability(); @Public public abstract void setCapability(Resource capability); @Override public int hashCode() { return getCapability().hashCode() + getContainerId().hashCode(); } @Override public boolean equals(Object other) { if (other instanceof ContainerResourceDecrease) { ContainerResourceDecrease ctx = (ContainerResourceDecrease)other; if (getContainerId() == null && ctx.getContainerId() != null) { return false; } else if (!getContainerId().equals(ctx.getContainerId())) { return false; } if (getCapability() == null && ctx.getCapability() != null) { return false; } else if (!getCapability().equals(ctx.getCapability())) { return false; } return true; } else { return false; } } }
2,475
30.341772
76
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.api.records; import java.io.Serializable; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Evolving; import org.apache.hadoop.classification.InterfaceStability.Stable; import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; import org.apache.hadoop.yarn.util.Records; /** * {@code ResourceRequest} represents the request made * by an application to the {@code ResourceManager} * to obtain various {@code Container} allocations. * <p> * It includes: * <ul> * <li>{@link Priority} of the request.</li> * <li> * The <em>name</em> of the machine or rack on which the allocation is * desired. A special value of <em>*</em> signifies that * <em>any</em> host/rack is acceptable to the application. * </li> * <li>{@link Resource} required for each request.</li> * <li> * Number of containers, of above specifications, which are required * by the application. * </li> * <li> * A boolean <em>relaxLocality</em> flag, defaulting to {@code true}, * which tells the {@code ResourceManager} if the application wants * locality to be loose (i.e. allows fall-through to rack or <em>any</em>) * or strict (i.e. specify hard constraint on resource allocation). * </li> * </ul> * * @see Resource * @see ApplicationMasterProtocol#allocate(org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest) */ @Public @Stable public abstract class ResourceRequest implements Comparable<ResourceRequest> { @Public @Stable public static ResourceRequest newInstance(Priority priority, String hostName, Resource capability, int numContainers) { return newInstance(priority, hostName, capability, numContainers, true); } @Public @Stable public static ResourceRequest newInstance(Priority priority, String hostName, Resource capability, int numContainers, boolean relaxLocality) { return newInstance(priority, hostName, capability, numContainers, relaxLocality, null); } @Public @Stable public static ResourceRequest newInstance(Priority priority, String hostName, Resource capability, int numContainers, boolean relaxLocality, String labelExpression) { ResourceRequest request = Records.newRecord(ResourceRequest.class); request.setPriority(priority); request.setResourceName(hostName); request.setCapability(capability); request.setNumContainers(numContainers); request.setRelaxLocality(relaxLocality); request.setNodeLabelExpression(labelExpression); return request; } @Public @Stable public static class ResourceRequestComparator implements java.util.Comparator<ResourceRequest>, Serializable { private static final long serialVersionUID = 1L; @Override public int compare(ResourceRequest r1, ResourceRequest r2) { // Compare priority, host and capability int ret = r1.getPriority().compareTo(r2.getPriority()); if (ret == 0) { String h1 = r1.getResourceName(); String h2 = r2.getResourceName(); ret = h1.compareTo(h2); } if (ret == 0) { ret = r1.getCapability().compareTo(r2.getCapability()); } return ret; } } /** * The constant string representing no locality. * It should be used by all references that want to pass an arbitrary host * name in. */ public static final String ANY = "*"; /** * Check whether the given <em>host/rack</em> string represents an arbitrary * host name. * * @param hostName <em>host/rack</em> on which the allocation is desired * @return whether the given <em>host/rack</em> string represents an arbitrary * host name */ @Public @Stable public static boolean isAnyLocation(String hostName) { return ANY.equals(hostName); } /** * Get the <code>Priority</code> of the request. * @return <code>Priority</code> of the request */ @Public @Stable public abstract Priority getPriority(); /** * Set the <code>Priority</code> of the request * @param priority <code>Priority</code> of the request */ @Public @Stable public abstract void setPriority(Priority priority); /** * Get the resource (e.g. <em>host/rack</em>) on which the allocation * is desired. * * A special value of <em>*</em> signifies that <em>any</em> resource * (host/rack) is acceptable. * * @return resource (e.g. <em>host/rack</em>) on which the allocation * is desired */ @Public @Stable public abstract String getResourceName(); /** * Set the resource name (e.g. <em>host/rack</em>) on which the allocation * is desired. * * A special value of <em>*</em> signifies that <em>any</em> resource name * (e.g. host/rack) is acceptable. * * @param resourceName (e.g. <em>host/rack</em>) on which the * allocation is desired */ @Public @Stable public abstract void setResourceName(String resourceName); /** * Get the <code>Resource</code> capability of the request. * @return <code>Resource</code> capability of the request */ @Public @Stable public abstract Resource getCapability(); /** * Set the <code>Resource</code> capability of the request * @param capability <code>Resource</code> capability of the request */ @Public @Stable public abstract void setCapability(Resource capability); /** * Get the number of containers required with the given specifications. * @return number of containers required with the given specifications */ @Public @Stable public abstract int getNumContainers(); /** * Set the number of containers required with the given specifications * @param numContainers number of containers required with the given * specifications */ @Public @Stable public abstract void setNumContainers(int numContainers); /** * Get whether locality relaxation is enabled with this * <code>ResourceRequest</code>. Defaults to true. * * @return whether locality relaxation is enabled with this * <code>ResourceRequest</code>. */ @Public @Stable public abstract boolean getRelaxLocality(); /** * <p>For a request at a network hierarchy level, set whether locality can be relaxed * to that level and beyond.<p> * * <p>If the flag is off on a rack-level <code>ResourceRequest</code>, * containers at that request's priority will not be assigned to nodes on that * request's rack unless requests specifically for those nodes have also been * submitted.<p> * * <p>If the flag is off on an {@link ResourceRequest#ANY}-level * <code>ResourceRequest</code>, containers at that request's priority will * only be assigned on racks for which specific requests have also been * submitted.<p> * * <p>For example, to request a container strictly on a specific node, the * corresponding rack-level and any-level requests should have locality * relaxation set to false. Similarly, to request a container strictly on a * specific rack, the corresponding any-level request should have locality * relaxation set to false.<p> * * @param relaxLocality whether locality relaxation is enabled with this * <code>ResourceRequest</code>. */ @Public @Stable public abstract void setRelaxLocality(boolean relaxLocality); /** * Get node-label-expression for this Resource Request. If this is set, all * containers allocated to satisfy this resource-request will be only on those * nodes that satisfy this node-label-expression. * * Please note that node label expression now can only take effect when the * resource request has resourceName = ANY * * @return node-label-expression */ @Public @Evolving public abstract String getNodeLabelExpression(); /** * Set node label expression of this resource request. Now only support * specifying a single node label. In the future we will support more complex * node label expression specification like {@code AND(&&), OR(||)}, etc. * * Any please note that node label expression now can only take effect when * the resource request has resourceName = ANY * * @param nodelabelExpression * node-label-expression of this ResourceRequest */ @Public @Evolving public abstract void setNodeLabelExpression(String nodelabelExpression); @Override public int hashCode() { final int prime = 2153; int result = 2459; Resource capability = getCapability(); String hostName = getResourceName(); Priority priority = getPriority(); result = prime * result + ((capability == null) ? 0 : capability.hashCode()); result = prime * result + ((hostName == null) ? 0 : hostName.hashCode()); result = prime * result + getNumContainers(); result = prime * result + ((priority == null) ? 0 : priority.hashCode()); return result; } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (getClass() != obj.getClass()) return false; ResourceRequest other = (ResourceRequest) obj; Resource capability = getCapability(); if (capability == null) { if (other.getCapability() != null) return false; } else if (!capability.equals(other.getCapability())) return false; String hostName = getResourceName(); if (hostName == null) { if (other.getResourceName() != null) return false; } else if (!hostName.equals(other.getResourceName())) return false; if (getNumContainers() != other.getNumContainers()) return false; Priority priority = getPriority(); if (priority == null) { if (other.getPriority() != null) return false; } else if (!priority.equals(other.getPriority())) return false; if (getNodeLabelExpression() == null) { if (other.getNodeLabelExpression() != null) { return false; } } else { // do normalize on label expression before compare String label1 = getNodeLabelExpression().replaceAll("[\\t ]", ""); String label2 = other.getNodeLabelExpression() == null ? null : other .getNodeLabelExpression().replaceAll("[\\t ]", ""); if (!label1.equals(label2)) { return false; } } return true; } @Override public int compareTo(ResourceRequest other) { int priorityComparison = this.getPriority().compareTo(other.getPriority()); if (priorityComparison == 0) { int hostNameComparison = this.getResourceName().compareTo(other.getResourceName()); if (hostNameComparison == 0) { int capabilityComparison = this.getCapability().compareTo(other.getCapability()); if (capabilityComparison == 0) { return this.getNumContainers() - other.getNumContainers(); } else { return capabilityComparison; } } else { return hostNameComparison; } } else { return priorityComparison; } } }
12,034
32.063187
102
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueACL.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.api.records; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Stable; import org.apache.hadoop.yarn.api.ApplicationClientProtocol; /** * {@code QueueACL} enumerates the various ACLs for queues. * <p> * The ACL is one of: * <ul> * <li> * {@link #SUBMIT_APPLICATIONS} - ACL to submit applications to the queue. * </li> * <li>{@link #ADMINISTER_QUEUE} - ACL to administer the queue.</li> * </ul> * * @see QueueInfo * @see ApplicationClientProtocol#getQueueUserAcls(org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest) */ @Public @Stable public enum QueueACL { /** * ACL to submit applications to the queue. */ SUBMIT_APPLICATIONS, /** * ACL to administer the queue. */ ADMINISTER_QUEUE, }
1,668
31.72549
122
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationAccessType.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.api.records; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Stable; /** * Application access types. */ @Public @Stable public enum ApplicationAccessType { /** * Access-type representing 'viewing' application. ACLs against this type * dictate who can 'view' some or all of the application related details. */ VIEW_APP, /** * Access-type representing 'modifying' application. ACLs against this type * dictate who can 'modify' the application for e.g., by killing the * application */ MODIFY_APP; }
1,447
32.674419
77
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Container.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.api.records; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Stable; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; import org.apache.hadoop.yarn.api.ContainerManagementProtocol; import org.apache.hadoop.yarn.util.Records; /** * {@code Container} represents an allocated resource in the cluster. * <p> * The {@code ResourceManager} is the sole authority to allocate any * {@code Container} to applications. The allocated {@code Container} * is always on a single node and has a unique {@link ContainerId}. It has * a specific amount of {@link Resource} allocated. * <p> * It includes details such as: * <ul> * <li>{@link ContainerId} for the container, which is globally unique.</li> * <li> * {@link NodeId} of the node on which it is allocated. * </li> * <li>HTTP uri of the node.</li> * <li>{@link Resource} allocated to the container.</li> * <li>{@link Priority} at which the container was allocated.</li> * <li> * Container {@link Token} of the container, used to securely verify * authenticity of the allocation. * </li> * </ul> * * Typically, an {@code ApplicationMaster} receives the {@code Container} * from the {@code ResourceManager} during resource-negotiation and then * talks to the {@code NodeManager} to start/stop containers. * * @see ApplicationMasterProtocol#allocate(org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest) * @see ContainerManagementProtocol#startContainers(org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest) * @see ContainerManagementProtocol#stopContainers(org.apache.hadoop.yarn.api.protocolrecords.StopContainersRequest) */ @Public @Stable public abstract class Container implements Comparable<Container> { @Private @Unstable public static Container newInstance(ContainerId containerId, NodeId nodeId, String nodeHttpAddress, Resource resource, Priority priority, Token containerToken) { Container container = Records.newRecord(Container.class); container.setId(containerId); container.setNodeId(nodeId); container.setNodeHttpAddress(nodeHttpAddress); container.setResource(resource); container.setPriority(priority); container.setContainerToken(containerToken); return container; } /** * Get the globally unique identifier for the container. * @return globally unique identifier for the container */ @Public @Stable public abstract ContainerId getId(); @Private @Unstable public abstract void setId(ContainerId id); /** * Get the identifier of the node on which the container is allocated. * @return identifier of the node on which the container is allocated */ @Public @Stable public abstract NodeId getNodeId(); @Private @Unstable public abstract void setNodeId(NodeId nodeId); /** * Get the http uri of the node on which the container is allocated. * @return http uri of the node on which the container is allocated */ @Public @Stable public abstract String getNodeHttpAddress(); @Private @Unstable public abstract void setNodeHttpAddress(String nodeHttpAddress); /** * Get the <code>Resource</code> allocated to the container. * @return <code>Resource</code> allocated to the container */ @Public @Stable public abstract Resource getResource(); @Private @Unstable public abstract void setResource(Resource resource); /** * Get the <code>Priority</code> at which the <code>Container</code> was * allocated. * @return <code>Priority</code> at which the <code>Container</code> was * allocated */ @Public @Stable public abstract Priority getPriority(); @Private @Unstable public abstract void setPriority(Priority priority); /** * Get the <code>ContainerToken</code> for the container. * <p><code>ContainerToken</code> is the security token used by the framework * to verify authenticity of any <code>Container</code>.</p> * * <p>The <code>ResourceManager</code>, on container allocation provides a * secure token which is verified by the <code>NodeManager</code> on * container launch.</p> * * <p>Applications do not need to care about <code>ContainerToken</code>, they * are transparently handled by the framework - the allocated * <code>Container</code> includes the <code>ContainerToken</code>.</p> * * @see ApplicationMasterProtocol#allocate(org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest) * @see ContainerManagementProtocol#startContainers(org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest) * * @return <code>ContainerToken</code> for the container */ @Public @Stable public abstract Token getContainerToken(); @Private @Unstable public abstract void setContainerToken(Token containerToken); }
5,877
34.197605
120
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.api.records; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Evolving; import org.apache.hadoop.classification.InterfaceStability.Stable; import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; import org.apache.hadoop.yarn.util.Records; /** * <p><code>Resource</code> models a set of computer resources in the * cluster.</p> * * <p>Currently it models both <em>memory</em> and <em>CPU</em>.</p> * * <p>The unit for memory is megabytes. CPU is modeled with virtual cores * (vcores), a unit for expressing parallelism. A node's capacity should * be configured with virtual cores equal to its number of physical cores. A * container should be requested with the number of cores it can saturate, i.e. * the average number of threads it expects to have runnable at a time.</p> * * <p>Virtual cores take integer values and thus currently CPU-scheduling is * very coarse. A complementary axis for CPU requests that represents processing * power will likely be added in the future to enable finer-grained resource * configuration.</p> * * <p>Typically, applications request <code>Resource</code> of suitable * capability to run their component tasks.</p> * * @see ResourceRequest * @see ApplicationMasterProtocol#allocate(org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest) */ @Public @Stable public abstract class Resource implements Comparable<Resource> { @Public @Stable public static Resource newInstance(int memory, int vCores) { Resource resource = Records.newRecord(Resource.class); resource.setMemory(memory); resource.setVirtualCores(vCores); return resource; } /** * Get <em>memory</em> of the resource. * @return <em>memory</em> of the resource */ @Public @Stable public abstract int getMemory(); /** * Set <em>memory</em> of the resource. * @param memory <em>memory</em> of the resource */ @Public @Stable public abstract void setMemory(int memory); /** * Get <em>number of virtual cpu cores</em> of the resource. * * Virtual cores are a unit for expressing CPU parallelism. A node's capacity * should be configured with virtual cores equal to its number of physical cores. * A container should be requested with the number of cores it can saturate, i.e. * the average number of threads it expects to have runnable at a time. * * @return <em>num of virtual cpu cores</em> of the resource */ @Public @Evolving public abstract int getVirtualCores(); /** * Set <em>number of virtual cpu cores</em> of the resource. * * Virtual cores are a unit for expressing CPU parallelism. A node's capacity * should be configured with virtual cores equal to its number of physical cores. * A container should be requested with the number of cores it can saturate, i.e. * the average number of threads it expects to have runnable at a time. * * @param vCores <em>number of virtual cpu cores</em> of the resource */ @Public @Evolving public abstract void setVirtualCores(int vCores); @Override public int hashCode() { final int prime = 263167; int result = 3571; result = 939769357 + getMemory(); // prime * result = 939769357 initially result = prime * result + getVirtualCores(); return result; } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (!(obj instanceof Resource)) return false; Resource other = (Resource) obj; if (getMemory() != other.getMemory() || getVirtualCores() != other.getVirtualCores()) { return false; } return true; } @Override public String toString() { return "<memory:" + getMemory() + ", vCores:" + getVirtualCores() + ">"; } }
4,718
33.195652
102
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.api.records; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Stable; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.ApplicationClientProtocol; import org.apache.hadoop.yarn.util.Records; import java.util.Set; /** * {@code ApplicationReport} is a report of an application. * <p> * It includes details such as: * <ul> * <li>{@link ApplicationId} of the application.</li> * <li>Applications user.</li> * <li>Application queue.</li> * <li>Application name.</li> * <li>Host on which the <code>ApplicationMaster</code> is running.</li> * <li>RPC port of the <code>ApplicationMaster</code>.</li> * <li>Tracking URL.</li> * <li>{@link YarnApplicationState} of the application.</li> * <li>Diagnostic information in case of errors.</li> * <li>Start time of the application.</li> * <li>Client {@link Token} of the application (if security is enabled).</li> * </ul> * * @see ApplicationClientProtocol#getApplicationReport(org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest) */ @Public @Stable public abstract class ApplicationReport { @Private @Unstable public static ApplicationReport newInstance(ApplicationId applicationId, ApplicationAttemptId applicationAttemptId, String user, String queue, String name, String host, int rpcPort, Token clientToAMToken, YarnApplicationState state, String diagnostics, String url, long startTime, long finishTime, FinalApplicationStatus finalStatus, ApplicationResourceUsageReport appResources, String origTrackingUrl, float progress, String applicationType, Token amRmToken) { ApplicationReport report = Records.newRecord(ApplicationReport.class); report.setApplicationId(applicationId); report.setCurrentApplicationAttemptId(applicationAttemptId); report.setUser(user); report.setQueue(queue); report.setName(name); report.setHost(host); report.setRpcPort(rpcPort); report.setClientToAMToken(clientToAMToken); report.setYarnApplicationState(state); report.setDiagnostics(diagnostics); report.setTrackingUrl(url); report.setStartTime(startTime); report.setFinishTime(finishTime); report.setFinalApplicationStatus(finalStatus); report.setApplicationResourceUsageReport(appResources); report.setOriginalTrackingUrl(origTrackingUrl); report.setProgress(progress); report.setApplicationType(applicationType); report.setAMRMToken(amRmToken); return report; } @Private @Unstable public static ApplicationReport newInstance(ApplicationId applicationId, ApplicationAttemptId applicationAttemptId, String user, String queue, String name, String host, int rpcPort, Token clientToAMToken, YarnApplicationState state, String diagnostics, String url, long startTime, long finishTime, FinalApplicationStatus finalStatus, ApplicationResourceUsageReport appResources, String origTrackingUrl, float progress, String applicationType, Token amRmToken, Set<String> tags, boolean unmanagedApplication) { ApplicationReport report = newInstance(applicationId, applicationAttemptId, user, queue, name, host, rpcPort, clientToAMToken, state, diagnostics, url, startTime, finishTime, finalStatus, appResources, origTrackingUrl, progress, applicationType, amRmToken); report.setApplicationTags(tags); report.setUnmanagedApp(unmanagedApplication); return report; } /** * Get the <code>ApplicationId</code> of the application. * @return <code>ApplicationId</code> of the application */ @Public @Stable public abstract ApplicationId getApplicationId(); @Private @Unstable public abstract void setApplicationId(ApplicationId applicationId); /** * Get the <code>ApplicationAttemptId</code> of the current * attempt of the application * @return <code>ApplicationAttemptId</code> of the attempt */ @Public @Stable public abstract ApplicationAttemptId getCurrentApplicationAttemptId(); @Private @Unstable public abstract void setCurrentApplicationAttemptId(ApplicationAttemptId applicationAttemptId); /** * Get the <em>user</em> who submitted the application. * @return <em>user</em> who submitted the application */ @Public @Stable public abstract String getUser(); @Private @Unstable public abstract void setUser(String user); /** * Get the <em>queue</em> to which the application was submitted. * @return <em>queue</em> to which the application was submitted */ @Public @Stable public abstract String getQueue(); @Private @Unstable public abstract void setQueue(String queue); /** * Get the user-defined <em>name</em> of the application. * @return <em>name</em> of the application */ @Public @Stable public abstract String getName(); @Private @Unstable public abstract void setName(String name); /** * Get the <em>host</em> on which the <code>ApplicationMaster</code> * is running. * @return <em>host</em> on which the <code>ApplicationMaster</code> * is running */ @Public @Stable public abstract String getHost(); @Private @Unstable public abstract void setHost(String host); /** * Get the <em>RPC port</em> of the <code>ApplicationMaster</code>. * @return <em>RPC port</em> of the <code>ApplicationMaster</code> */ @Public @Stable public abstract int getRpcPort(); @Private @Unstable public abstract void setRpcPort(int rpcPort); /** * Get the <em>client token</em> for communicating with the * <code>ApplicationMaster</code>. * <p> * <em>ClientToAMToken</em> is the security token used by the AMs to verify * authenticity of any <code>client</code>. * </p> * * <p> * The <code>ResourceManager</code>, provides a secure token (via * {@link ApplicationReport#getClientToAMToken()}) which is verified by the * ApplicationMaster when the client directly talks to an AM. * </p> * @return <em>client token</em> for communicating with the * <code>ApplicationMaster</code> */ @Public @Stable public abstract Token getClientToAMToken(); @Private @Unstable public abstract void setClientToAMToken(Token clientToAMToken); /** * Get the <code>YarnApplicationState</code> of the application. * @return <code>YarnApplicationState</code> of the application */ @Public @Stable public abstract YarnApplicationState getYarnApplicationState(); @Private @Unstable public abstract void setYarnApplicationState(YarnApplicationState state); /** * Get the <em>diagnositic information</em> of the application in case of * errors. * @return <em>diagnositic information</em> of the application in case * of errors */ @Public @Stable public abstract String getDiagnostics(); @Private @Unstable public abstract void setDiagnostics(String diagnostics); /** * Get the <em>tracking url</em> for the application. * @return <em>tracking url</em> for the application */ @Public @Stable public abstract String getTrackingUrl(); @Private @Unstable public abstract void setTrackingUrl(String url); /** * Get the original not-proxied <em>tracking url</em> for the application. * This is intended to only be used by the proxy itself. * @return the original not-proxied <em>tracking url</em> for the application */ @Private @Unstable public abstract String getOriginalTrackingUrl(); @Private @Unstable public abstract void setOriginalTrackingUrl(String url); /** * Get the <em>start time</em> of the application. * @return <em>start time</em> of the application */ @Public @Stable public abstract long getStartTime(); @Private @Unstable public abstract void setStartTime(long startTime); /** * Get the <em>finish time</em> of the application. * @return <em>finish time</em> of the application */ @Public @Stable public abstract long getFinishTime(); @Private @Unstable public abstract void setFinishTime(long finishTime); /** * Get the <em>final finish status</em> of the application. * @return <em>final finish status</em> of the application */ @Public @Stable public abstract FinalApplicationStatus getFinalApplicationStatus(); @Private @Unstable public abstract void setFinalApplicationStatus(FinalApplicationStatus finishState); /** * Retrieve the structure containing the job resources for this application * @return the job resources structure for this application */ @Public @Stable public abstract ApplicationResourceUsageReport getApplicationResourceUsageReport(); /** * Store the structure containing the job resources for this application * @param appResources structure for this application */ @Private @Unstable public abstract void setApplicationResourceUsageReport(ApplicationResourceUsageReport appResources); /** * Get the application's progress ( range 0.0 to 1.0 ) * @return application's progress */ @Public @Stable public abstract float getProgress(); @Private @Unstable public abstract void setProgress(float progress); /** * Get the application's Type * @return application's Type */ @Public @Stable public abstract String getApplicationType(); @Private @Unstable public abstract void setApplicationType(String applicationType); /** * Get all tags corresponding to the application * @return Application's tags */ @Public @Stable public abstract Set<String> getApplicationTags(); @Private @Unstable public abstract void setApplicationTags(Set<String> tags); @Private @Stable public abstract void setAMRMToken(Token amRmToken); /** * Get the AMRM token of the application. * <p> * The AMRM token is required for AM to RM scheduling operations. For * managed Application Masters Yarn takes care of injecting it. For unmanaged * Applications Masters, the token must be obtained via this method and set * in the {@link org.apache.hadoop.security.UserGroupInformation} of the * current user. * <p> * The AMRM token will be returned only if all the following conditions are * met: * <ul> * <li>the requester is the owner of the ApplicationMaster</li> * <li>the application master is an unmanaged ApplicationMaster</li> * <li>the application master is in ACCEPTED state</li> * </ul> * Else this method returns NULL. * * @return the AM to RM token if available. */ @Public @Stable public abstract Token getAMRMToken(); /** * Get log aggregation status for the application * @return Application's log aggregation status */ @Public @Stable public abstract LogAggregationStatus getLogAggregationStatus(); @Private @Unstable public abstract void setLogAggregationStatus( LogAggregationStatus logAggregationStatus); /** * @return true if the AM is not managed by the RM */ @Public @Unstable public abstract boolean isUnmanagedApp(); /** * @param value true if RM should not manage the AM */ @Public @Unstable public abstract void setUnmanagedApp(boolean unmanagedApplication); /** * Get priority of the application * * @return Application's priority */ @Public @Stable public abstract Priority getPriority(); @Private @Unstable public abstract void setPriority(Priority priority); }
12,508
28.432941
126
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeState.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.api.records; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Unstable; /** * <p>State of a <code>Node</code>.</p> */ @Public @Unstable public enum NodeState { /** New node */ NEW, /** Running node */ RUNNING, /** Node is unhealthy */ UNHEALTHY, /** Node is out of service */ DECOMMISSIONED, /** Node has not sent a heartbeat for some configured time threshold*/ LOST, /** Node has rebooted */ REBOOTED, /** Node decommission is in progress */ DECOMMISSIONING, /** Node has shutdown gracefully. */ SHUTDOWN; public boolean isUnusable() { return (this == UNHEALTHY || this == DECOMMISSIONED || this == LOST || this == SHUTDOWN); } }
1,622
26.508475
75
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/YarnApplicationAttemptState.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.api.records; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Stable; /** * Enumeration of various states of a <code>RMAppAttempt</code>. */ @Public @Stable public enum YarnApplicationAttemptState { /** AppAttempt was just created. */ NEW, /** AppAttempt has been submitted. */ SUBMITTED, /** AppAttempt was scheduled */ SCHEDULED, /** Acquired AM Container from Scheduler and Saving AppAttempt Data */ ALLOCATED_SAVING, /** AppAttempt Data was saved */ ALLOCATED, /** AppAttempt was launched */ LAUNCHED, /** AppAttempt failed. */ FAILED, /** AppAttempt is currently running. */ RUNNING, /** AppAttempt is finishing. */ FINISHING, /** AppAttempt finished successfully. */ FINISHED, /** AppAttempt was terminated by a user or admin. */ KILLED }
1,718
26.285714
75
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerReport.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.api.records; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.util.Records; /** * {@code ContainerReport} is a report of an container. * <p> * It includes details such as: * <ul> * <li>{@link ContainerId} of the container.</li> * <li>Allocated Resources to the container.</li> * <li>Assigned Node id.</li> * <li>Assigned Priority.</li> * <li>Creation Time.</li> * <li>Finish Time.</li> * <li>Container Exit Status.</li> * <li>{@link ContainerState} of the container.</li> * <li>Diagnostic information in case of errors.</li> * <li>Log URL.</li> * <li>nodeHttpAddress</li> * </ul> */ @Public @Unstable public abstract class ContainerReport { @Private @Unstable public static ContainerReport newInstance(ContainerId containerId, Resource allocatedResource, NodeId assignedNode, Priority priority, long creationTime, long finishTime, String diagnosticInfo, String logUrl, int containerExitStatus, ContainerState containerState, String nodeHttpAddress) { ContainerReport report = Records.newRecord(ContainerReport.class); report.setContainerId(containerId); report.setAllocatedResource(allocatedResource); report.setAssignedNode(assignedNode); report.setPriority(priority); report.setCreationTime(creationTime); report.setFinishTime(finishTime); report.setDiagnosticsInfo(diagnosticInfo); report.setLogUrl(logUrl); report.setContainerExitStatus(containerExitStatus); report.setContainerState(containerState); report.setNodeHttpAddress(nodeHttpAddress); return report; } /** * Get the <code>ContainerId</code> of the container. * * @return <code>ContainerId</code> of the container. */ @Public @Unstable public abstract ContainerId getContainerId(); @Public @Unstable public abstract void setContainerId(ContainerId containerId); /** * Get the allocated <code>Resource</code> of the container. * * @return allocated <code>Resource</code> of the container. */ @Public @Unstable public abstract Resource getAllocatedResource(); @Public @Unstable public abstract void setAllocatedResource(Resource resource); /** * Get the allocated <code>NodeId</code> where container is running. * * @return allocated <code>NodeId</code> where container is running. */ @Public @Unstable public abstract NodeId getAssignedNode(); @Public @Unstable public abstract void setAssignedNode(NodeId nodeId); /** * Get the allocated <code>Priority</code> of the container. * * @return allocated <code>Priority</code> of the container. */ @Public @Unstable public abstract Priority getPriority(); @Public @Unstable public abstract void setPriority(Priority priority); /** * Get the creation time of the container. * * @return creation time of the container */ @Public @Unstable public abstract long getCreationTime(); @Public @Unstable public abstract void setCreationTime(long creationTime); /** * Get the Finish time of the container. * * @return Finish time of the container */ @Public @Unstable public abstract long getFinishTime(); @Public @Unstable public abstract void setFinishTime(long finishTime); /** * Get the DiagnosticsInfo of the container. * * @return DiagnosticsInfo of the container */ @Public @Unstable public abstract String getDiagnosticsInfo(); @Public @Unstable public abstract void setDiagnosticsInfo(String diagnosticsInfo); /** * Get the LogURL of the container. * * @return LogURL of the container */ @Public @Unstable public abstract String getLogUrl(); @Public @Unstable public abstract void setLogUrl(String logUrl); /** * Get the final <code>ContainerState</code> of the container. * * @return final <code>ContainerState</code> of the container. */ @Public @Unstable public abstract ContainerState getContainerState(); @Public @Unstable public abstract void setContainerState(ContainerState containerState); /** * Get the final <code>exit status</code> of the container. * * @return final <code>exit status</code> of the container. */ @Public @Unstable public abstract int getContainerExitStatus(); @Public @Unstable public abstract void setContainerExitStatus(int containerExitStatus); /** * Get the Node Http address of the container * * @return the node http address of the container */ @Public @Unstable public abstract String getNodeHttpAddress(); @Private @Unstable public abstract void setNodeHttpAddress(String nodeHttpAddress); }
5,704
25.784038
79
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Token.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.api.records; import java.nio.ByteBuffer; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Stable; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.util.Records; /** * <p><code>Token</code> is the security entity used by the framework * to verify authenticity of any resource.</p> */ @Public @Stable public abstract class Token { @Private @Unstable public static Token newInstance(byte[] identifier, String kind, byte[] password, String service) { Token token = Records.newRecord(Token.class); token.setIdentifier(ByteBuffer.wrap(identifier)); token.setKind(kind); token.setPassword(ByteBuffer.wrap(password)); token.setService(service); return token; } /** * Get the token identifier. * @return token identifier */ @Public @Stable public abstract ByteBuffer getIdentifier(); @Private @Unstable public abstract void setIdentifier(ByteBuffer identifier); /** * Get the token password * @return token password */ @Public @Stable public abstract ByteBuffer getPassword(); @Private @Unstable public abstract void setPassword(ByteBuffer password); /** * Get the token kind. * @return token kind */ @Public @Stable public abstract String getKind(); @Private @Unstable public abstract void setKind(String kind); /** * Get the service to which the token is allocated. * @return service to which the token is allocated */ @Public @Stable public abstract String getService(); @Private @Unstable public abstract void setService(String service); }
2,619
25.734694
82
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/DecommissionType.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.api.records; public enum DecommissionType { /** Decomissioning nodes in normal way **/ NORMAL, /** Graceful decommissioning of nodes **/ GRACEFUL, /** Forceful decommissioning of nodes which are already in progress **/ FORCEFUL }
1,082
36.344828
75
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationAttemptId.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.api.records; import java.text.NumberFormat; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Stable; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.util.Records; /** * <p><code>ApplicationAttemptId</code> denotes the particular <em>attempt</em> * of an <code>ApplicationMaster</code> for a given {@link ApplicationId}.</p> * * <p>Multiple attempts might be needed to run an application to completion due * to temporal failures of the <code>ApplicationMaster</code> such as hardware * failures, connectivity issues etc. on the node on which it was scheduled.</p> */ @Public @Stable public abstract class ApplicationAttemptId implements Comparable<ApplicationAttemptId> { @Private @Unstable public static final String appAttemptIdStrPrefix = "appattempt_"; @Private @Unstable public static ApplicationAttemptId newInstance(ApplicationId appId, int attemptId) { ApplicationAttemptId appAttemptId = Records.newRecord(ApplicationAttemptId.class); appAttemptId.setApplicationId(appId); appAttemptId.setAttemptId(attemptId); appAttemptId.build(); return appAttemptId; } /** * Get the <code>ApplicationId</code> of the <code>ApplicationAttempId</code>. * @return <code>ApplicationId</code> of the <code>ApplicationAttempId</code> */ @Public @Stable public abstract ApplicationId getApplicationId(); @Private @Unstable protected abstract void setApplicationId(ApplicationId appID); /** * Get the <code>attempt id</code> of the <code>Application</code>. * @return <code>attempt id</code> of the <code>Application</code> */ @Public @Stable public abstract int getAttemptId(); @Private @Unstable protected abstract void setAttemptId(int attemptId); static final ThreadLocal<NumberFormat> attemptIdFormat = new ThreadLocal<NumberFormat>() { @Override public NumberFormat initialValue() { NumberFormat fmt = NumberFormat.getInstance(); fmt.setGroupingUsed(false); fmt.setMinimumIntegerDigits(6); return fmt; } }; @Override public int hashCode() { // Generated by eclipse. final int prime = 347671; int result = 5501; ApplicationId appId = getApplicationId(); result = prime * result + appId.hashCode(); result = prime * result + getAttemptId(); return result; } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (getClass() != obj.getClass()) return false; ApplicationAttemptId other = (ApplicationAttemptId) obj; if (!this.getApplicationId().equals(other.getApplicationId())) return false; if (this.getAttemptId() != other.getAttemptId()) return false; return true; } @Override public int compareTo(ApplicationAttemptId other) { int compareAppIds = this.getApplicationId().compareTo( other.getApplicationId()); if (compareAppIds == 0) { return this.getAttemptId() - other.getAttemptId(); } else { return compareAppIds; } } @Override public String toString() { StringBuilder sb = new StringBuilder(appAttemptIdStrPrefix); sb.append(this.getApplicationId().getClusterTimestamp()).append("_"); sb.append(ApplicationId.appIdFormat.get().format( this.getApplicationId().getId())); sb.append("_").append(attemptIdFormat.get().format(getAttemptId())); return sb.toString(); } protected abstract void build(); }
4,568
30.951049
81
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timeline/TimelineAbout.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.api.records.timeline; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlRootElement; @XmlRootElement(name = "about") @XmlAccessorType(XmlAccessType.NONE) @InterfaceAudience.Public @InterfaceStability.Evolving public class TimelineAbout { private String about; private String timelineServiceVersion; private String timelineServiceBuildVersion; private String timelineServiceVersionBuiltOn; private String hadoopVersion; private String hadoopBuildVersion; private String hadoopVersionBuiltOn; public TimelineAbout() { } public TimelineAbout(String about) { this.about = about; } @XmlElement(name = "About") public String getAbout() { return about; } public void setAbout(String about) { this.about = about; } @XmlElement(name = "timeline-service-version") public String getTimelineServiceVersion() { return timelineServiceVersion; } public void setTimelineServiceVersion(String timelineServiceVersion) { this.timelineServiceVersion = timelineServiceVersion; } @XmlElement(name = "timeline-service-build-version") public String getTimelineServiceBuildVersion() { return timelineServiceBuildVersion; } public void setTimelineServiceBuildVersion( String timelineServiceBuildVersion) { this.timelineServiceBuildVersion = timelineServiceBuildVersion; } @XmlElement(name = "timeline-service-version-built-on") public String getTimelineServiceVersionBuiltOn() { return timelineServiceVersionBuiltOn; } public void setTimelineServiceVersionBuiltOn( String timelineServiceVersionBuiltOn) { this.timelineServiceVersionBuiltOn = timelineServiceVersionBuiltOn; } @XmlElement(name = "hadoop-version") public String getHadoopVersion() { return hadoopVersion; } public void setHadoopVersion(String hadoopVersion) { this.hadoopVersion = hadoopVersion; } @XmlElement(name = "hadoop-build-version") public String getHadoopBuildVersion() { return hadoopBuildVersion; } public void setHadoopBuildVersion(String hadoopBuildVersion) { this.hadoopBuildVersion = hadoopBuildVersion; } @XmlElement(name = "hadoop-version-built-on") public String getHadoopVersionBuiltOn() { return hadoopVersionBuiltOn; } public void setHadoopVersionBuiltOn(String hadoopVersionBuiltOn) { this.hadoopVersionBuiltOn = hadoopVersionBuiltOn; } }
3,472
28.683761
75
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timeline/TimelineDelegationTokenResponse.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.api.records.timeline; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlRootElement; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Evolving; /** * The response of delegation token related request */ @XmlRootElement(name = "delegationtoken") @XmlAccessorType(XmlAccessType.NONE) @Public @Evolving public class TimelineDelegationTokenResponse { private String type; private Object content; public TimelineDelegationTokenResponse() { } @XmlElement(name = "type") public String getType() { return type; } public void setType(String type) { this.type = type; } @XmlElement(name = "content") public Object getContent() { return content; } public void setContent(Object content) { this.content = content; } }
1,806
27.234375
75
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timeline/package-info.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ @InterfaceAudience.Public @InterfaceStability.Evolving package org.apache.hadoop.yarn.api.records.timeline; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability;
1,035
42.166667
75
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timeline/TimelineDomains.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.api.records.timeline; import java.util.ArrayList; import java.util.List; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlRootElement; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Evolving; /** * The class that hosts a list of timeline domains. */ @XmlRootElement(name = "domains") @XmlAccessorType(XmlAccessType.NONE) @Public @Evolving public class TimelineDomains { private List<TimelineDomain> domains = new ArrayList<TimelineDomain>(); public TimelineDomains() { } /** * Get a list of domains * * @return a list of domains */ @XmlElement(name = "domains") public List<TimelineDomain> getDomains() { return domains; } /** * Add a single domain into the existing domain list * * @param domain * a single domain */ public void addDomain(TimelineDomain domain) { domains.add(domain); } /** * All a list of domains into the existing domain list * * @param domains * a list of domains */ public void addDomains(List<TimelineDomain> domains) { this.domains.addAll(domains); } /** * Set the domain list to the given list of domains * * @param domains * a list of domains */ public void setDomains(List<TimelineDomain> domains) { this.domains = domains; } }
2,351
26.034483
75
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timeline/TimelineDomain.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.api.records.timeline; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlRootElement; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Evolving; /** * <p> * This class contains the information about a timeline domain, which is used * to a user to host a number of timeline entities, isolating them from others'. * The user can also define the reader and writer users/groups for the the * domain, which is used to control the access to its entities. * </p> * * <p> * The reader and writer users/groups pattern that the user can supply is the * same as what <code>AccessControlList</code> takes. * </p> * */ @XmlRootElement(name = "domain") @XmlAccessorType(XmlAccessType.NONE) @Public @Evolving public class TimelineDomain { private String id; private String description; private String owner; private String readers; private String writers; private Long createdTime; private Long modifiedTime; public TimelineDomain() { } /** * Get the domain ID * * @return the domain ID */ @XmlElement(name = "id") public String getId() { return id; } /** * Set the domain ID * * @param id the domain ID */ public void setId(String id) { this.id = id; } /** * Get the domain description * * @return the domain description */ @XmlElement(name = "description") public String getDescription() { return description; } /** * Set the domain description * * @param description the domain description */ public void setDescription(String description) { this.description = description; } /** * Get the domain owner * * @return the domain owner */ @XmlElement(name = "owner") public String getOwner() { return owner; } /** * Set the domain owner. The user doesn't need to set it, which will * automatically set to the user who puts the domain. * * @param owner the domain owner */ public void setOwner(String owner) { this.owner = owner; } /** * Get the reader (and/or reader group) list string * * @return the reader (and/or reader group) list string */ @XmlElement(name = "readers") public String getReaders() { return readers; } /** * Set the reader (and/or reader group) list string * * @param readers the reader (and/or reader group) list string */ public void setReaders(String readers) { this.readers = readers; } /** * Get the writer (and/or writer group) list string * * @return the writer (and/or writer group) list string */ @XmlElement(name = "writers") public String getWriters() { return writers; } /** * Set the writer (and/or writer group) list string * * @param writers the writer (and/or writer group) list string */ public void setWriters(String writers) { this.writers = writers; } /** * Get the created time of the domain * * @return the created time of the domain */ @XmlElement(name = "createdtime") public Long getCreatedTime() { return createdTime; } /** * Set the created time of the domain * * @param createdTime the created time of the domain */ public void setCreatedTime(Long createdTime) { this.createdTime = createdTime; } /** * Get the modified time of the domain * * @return the modified time of the domain */ @XmlElement(name = "modifiedtime") public Long getModifiedTime() { return modifiedTime; } /** * Set the modified time of the domain * * @param modifiedTime the modified time of the domain */ public void setModifiedTime(Long modifiedTime) { this.modifiedTime = modifiedTime; } }
4,732
23.271795
80
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timeline/TimelinePutResponse.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.api.records.timeline; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Evolving; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlRootElement; import java.util.ArrayList; import java.util.List; /** * A class that holds a list of put errors. This is the response returned when a * list of {@link TimelineEntity} objects is added to the timeline. If there are errors * in storing individual entity objects, they will be indicated in the list of * errors. */ @XmlRootElement(name = "response") @XmlAccessorType(XmlAccessType.NONE) @Public @Evolving public class TimelinePutResponse { private List<TimelinePutError> errors = new ArrayList<TimelinePutError>(); public TimelinePutResponse() { } /** * Get a list of {@link TimelinePutError} instances * * @return a list of {@link TimelinePutError} instances */ @XmlElement(name = "errors") public List<TimelinePutError> getErrors() { return errors; } /** * Add a single {@link TimelinePutError} instance into the existing list * * @param error * a single {@link TimelinePutError} instance */ public void addError(TimelinePutError error) { errors.add(error); } /** * Add a list of {@link TimelinePutError} instances into the existing list * * @param errors * a list of {@link TimelinePutError} instances */ public void addErrors(List<TimelinePutError> errors) { this.errors.addAll(errors); } /** * Set the list to the given list of {@link TimelinePutError} instances * * @param errors * a list of {@link TimelinePutError} instances */ public void setErrors(List<TimelinePutError> errors) { this.errors.clear(); this.errors.addAll(errors); } /** * A class that holds the error code for one entity. */ @XmlRootElement(name = "error") @XmlAccessorType(XmlAccessType.NONE) @Public @Evolving public static class TimelinePutError { /** * Error code returned when no start time can be found when putting an * entity. This occurs when the entity does not already exist in the store * and it is put with no start time or events specified. */ public static final int NO_START_TIME = 1; /** * Error code returned if an IOException is encountered when putting an * entity. */ public static final int IO_EXCEPTION = 2; /** * Error code returned if the user specifies the timeline system reserved * filter key */ public static final int SYSTEM_FILTER_CONFLICT = 3; /** * Error code returned if the user is denied to access the timeline data */ public static final int ACCESS_DENIED = 4; /** * Error code returned if the entity doesn't have an valid domain ID */ public static final int NO_DOMAIN = 5; /** * Error code returned if the user is denied to relate the entity to another * one in different domain */ public static final int FORBIDDEN_RELATION = 6; /** * Error code returned if the entity start time is before the eviction * period of old data. */ public static final int EXPIRED_ENTITY = 7; private String entityId; private String entityType; private int errorCode; /** * Get the entity Id * * @return the entity Id */ @XmlElement(name = "entity") public String getEntityId() { return entityId; } /** * Set the entity Id * * @param entityId * the entity Id */ public void setEntityId(String entityId) { this.entityId = entityId; } /** * Get the entity type * * @return the entity type */ @XmlElement(name = "entitytype") public String getEntityType() { return entityType; } /** * Set the entity type * * @param entityType * the entity type */ public void setEntityType(String entityType) { this.entityType = entityType; } /** * Get the error code * * @return an error code */ @XmlElement(name = "errorcode") public int getErrorCode() { return errorCode; } /** * Set the error code to the given error code * * @param errorCode * an error code */ public void setErrorCode(int errorCode) { this.errorCode = errorCode; } } }
5,450
25.590244
87
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timeline/TimelineEvent.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.api.records.timeline; import java.util.HashMap; import java.util.Map; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlRootElement; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Evolving; /** * The class that contains the information of an event that is related to some * conceptual entity of an application. Users are free to define what the event * means, such as starting an application, getting allocated a container and * etc. */ @XmlRootElement(name = "event") @XmlAccessorType(XmlAccessType.NONE) @Public @Evolving public class TimelineEvent implements Comparable<TimelineEvent> { private long timestamp; private String eventType; private HashMap<String, Object> eventInfo = new HashMap<String, Object>(); public TimelineEvent() { } /** * Get the timestamp of the event * * @return the timestamp of the event */ @XmlElement(name = "timestamp") public long getTimestamp() { return timestamp; } /** * Set the timestamp of the event * * @param timestamp * the timestamp of the event */ public void setTimestamp(long timestamp) { this.timestamp = timestamp; } /** * Get the event type * * @return the event type */ @XmlElement(name = "eventtype") public String getEventType() { return eventType; } /** * Set the event type * * @param eventType * the event type */ public void setEventType(String eventType) { this.eventType = eventType; } /** * Set the information of the event * * @return the information of the event */ public Map<String, Object> getEventInfo() { return eventInfo; } // Required by JAXB @Private @XmlElement(name = "eventinfo") public HashMap<String, Object> getEventInfoJAXB() { return eventInfo; } /** * Add one piece of the information of the event to the existing information * map * * @param key * the information key * @param value * the information value */ public void addEventInfo(String key, Object value) { this.eventInfo.put(key, value); } /** * Add a map of the information of the event to the existing information map * * @param eventInfo * a map of of the information of the event */ public void addEventInfo(Map<String, Object> eventInfo) { this.eventInfo.putAll(eventInfo); } /** * Set the information map to the given map of the information of the event * * @param eventInfo * a map of of the information of the event */ public void setEventInfo(Map<String, Object> eventInfo) { if (eventInfo != null && !(eventInfo instanceof HashMap)) { this.eventInfo = new HashMap<String, Object>(eventInfo); } else { this.eventInfo = (HashMap<String, Object>) eventInfo; } } @Override public int compareTo(TimelineEvent other) { if (timestamp > other.timestamp) { return -1; } else if (timestamp < other.timestamp) { return 1; } else { return eventType.compareTo(other.eventType); } } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; TimelineEvent event = (TimelineEvent) o; if (timestamp != event.timestamp) return false; if (!eventType.equals(event.eventType)) return false; if (eventInfo != null ? !eventInfo.equals(event.eventInfo) : event.eventInfo != null) return false; return true; } @Override public int hashCode() { int result = (int) (timestamp ^ (timestamp >>> 32)); result = 31 * result + eventType.hashCode(); result = 31 * result + (eventInfo != null ? eventInfo.hashCode() : 0); return result; } }
4,909
25.684783
79
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timeline/TimelineEvents.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.api.records.timeline; import java.util.ArrayList; import java.util.List; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlRootElement; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Evolving; /** * The class that hosts a list of events, which are categorized according to * their related entities. */ @XmlRootElement(name = "events") @XmlAccessorType(XmlAccessType.NONE) @Public @Evolving public class TimelineEvents { private List<EventsOfOneEntity> allEvents = new ArrayList<EventsOfOneEntity>(); public TimelineEvents() { } /** * Get a list of {@link EventsOfOneEntity} instances * * @return a list of {@link EventsOfOneEntity} instances */ @XmlElement(name = "events") public List<EventsOfOneEntity> getAllEvents() { return allEvents; } /** * Add a single {@link EventsOfOneEntity} instance into the existing list * * @param eventsOfOneEntity * a single {@link EventsOfOneEntity} instance */ public void addEvent(EventsOfOneEntity eventsOfOneEntity) { allEvents.add(eventsOfOneEntity); } /** * Add a list of {@link EventsOfOneEntity} instances into the existing list * * @param allEvents * a list of {@link EventsOfOneEntity} instances */ public void addEvents(List<EventsOfOneEntity> allEvents) { this.allEvents.addAll(allEvents); } /** * Set the list to the given list of {@link EventsOfOneEntity} instances * * @param allEvents * a list of {@link EventsOfOneEntity} instances */ public void setEvents(List<EventsOfOneEntity> allEvents) { this.allEvents.clear(); this.allEvents.addAll(allEvents); } /** * The class that hosts a list of events that are only related to one entity. */ @XmlRootElement(name = "events") @XmlAccessorType(XmlAccessType.NONE) @Public @Evolving public static class EventsOfOneEntity { private String entityId; private String entityType; private List<TimelineEvent> events = new ArrayList<TimelineEvent>(); public EventsOfOneEntity() { } /** * Get the entity Id * * @return the entity Id */ @XmlElement(name = "entity") public String getEntityId() { return entityId; } /** * Set the entity Id * * @param entityId * the entity Id */ public void setEntityId(String entityId) { this.entityId = entityId; } /** * Get the entity type * * @return the entity type */ @XmlElement(name = "entitytype") public String getEntityType() { return entityType; } /** * Set the entity type * * @param entityType * the entity type */ public void setEntityType(String entityType) { this.entityType = entityType; } /** * Get a list of events * * @return a list of events */ @XmlElement(name = "events") public List<TimelineEvent> getEvents() { return events; } /** * Add a single event to the existing event list * * @param event * a single event */ public void addEvent(TimelineEvent event) { events.add(event); } /** * Add a list of event to the existing event list * * @param events * a list of events */ public void addEvents(List<TimelineEvent> events) { this.events.addAll(events); } /** * Set the event list to the given list of events * * @param events * a list of events */ public void setEvents(List<TimelineEvent> events) { this.events = events; } } }
4,728
23.889474
79
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timeline/TimelineEntity.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.api.records.timeline; import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Set; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlRootElement; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Evolving; /** * <p> * The class that contains the the meta information of some conceptual entity * and its related events. The entity can be an application, an application * attempt, a container or whatever the user-defined object. * </p> * * <p> * Primary filters will be used to index the entities in * <code>TimelineStore</code>, such that users should carefully choose the * information they want to store as the primary filters. The remaining can be * stored as other information. * </p> */ @XmlRootElement(name = "entity") @XmlAccessorType(XmlAccessType.NONE) @Public @Evolving public class TimelineEntity implements Comparable<TimelineEntity> { private String entityType; private String entityId; private Long startTime; private List<TimelineEvent> events = new ArrayList<TimelineEvent>(); private HashMap<String, Set<String>> relatedEntities = new HashMap<String, Set<String>>(); private HashMap<String, Set<Object>> primaryFilters = new HashMap<String, Set<Object>>(); private HashMap<String, Object> otherInfo = new HashMap<String, Object>(); private String domainId; public TimelineEntity() { } /** * Get the entity type * * @return the entity type */ @XmlElement(name = "entitytype") public String getEntityType() { return entityType; } /** * Set the entity type * * @param entityType * the entity type */ public void setEntityType(String entityType) { this.entityType = entityType; } /** * Get the entity Id * * @return the entity Id */ @XmlElement(name = "entity") public String getEntityId() { return entityId; } /** * Set the entity Id * * @param entityId * the entity Id */ public void setEntityId(String entityId) { this.entityId = entityId; } /** * Get the start time of the entity * * @return the start time of the entity */ @XmlElement(name = "starttime") public Long getStartTime() { return startTime; } /** * Set the start time of the entity * * @param startTime * the start time of the entity */ public void setStartTime(Long startTime) { this.startTime = startTime; } /** * Get a list of events related to the entity * * @return a list of events related to the entity */ @XmlElement(name = "events") public List<TimelineEvent> getEvents() { return events; } /** * Add a single event related to the entity to the existing event list * * @param event * a single event related to the entity */ public void addEvent(TimelineEvent event) { events.add(event); } /** * Add a list of events related to the entity to the existing event list * * @param events * a list of events related to the entity */ public void addEvents(List<TimelineEvent> events) { this.events.addAll(events); } /** * Set the event list to the given list of events related to the entity * * @param events * events a list of events related to the entity */ public void setEvents(List<TimelineEvent> events) { this.events = events; } /** * Get the related entities * * @return the related entities */ public Map<String, Set<String>> getRelatedEntities() { return relatedEntities; } // Required by JAXB @Private @XmlElement(name = "relatedentities") public HashMap<String, Set<String>> getRelatedEntitiesJAXB() { return relatedEntities; } /** * Add an entity to the existing related entity map * * @param entityType * the entity type * @param entityId * the entity Id */ public void addRelatedEntity(String entityType, String entityId) { Set<String> thisRelatedEntity = relatedEntities.get(entityType); if (thisRelatedEntity == null) { thisRelatedEntity = new HashSet<String>(); relatedEntities.put(entityType, thisRelatedEntity); } thisRelatedEntity.add(entityId); } /** * Add a map of related entities to the existing related entity map * * @param relatedEntities * a map of related entities */ public void addRelatedEntities(Map<String, Set<String>> relatedEntities) { for (Entry<String, Set<String>> relatedEntity : relatedEntities.entrySet()) { Set<String> thisRelatedEntity = this.relatedEntities.get(relatedEntity.getKey()); if (thisRelatedEntity == null) { this.relatedEntities.put( relatedEntity.getKey(), relatedEntity.getValue()); } else { thisRelatedEntity.addAll(relatedEntity.getValue()); } } } /** * Set the related entity map to the given map of related entities * * @param relatedEntities * a map of related entities */ public void setRelatedEntities( Map<String, Set<String>> relatedEntities) { if (relatedEntities != null && !(relatedEntities instanceof HashMap)) { this.relatedEntities = new HashMap<String, Set<String>>(relatedEntities); } else { this.relatedEntities = (HashMap<String, Set<String>>) relatedEntities; } } /** * Get the primary filters * * @return the primary filters */ public Map<String, Set<Object>> getPrimaryFilters() { return primaryFilters; } // Required by JAXB @Private @XmlElement(name = "primaryfilters") public HashMap<String, Set<Object>> getPrimaryFiltersJAXB() { return primaryFilters; } /** * Add a single piece of primary filter to the existing primary filter map * * @param key * the primary filter key * @param value * the primary filter value */ public void addPrimaryFilter(String key, Object value) { Set<Object> thisPrimaryFilter = primaryFilters.get(key); if (thisPrimaryFilter == null) { thisPrimaryFilter = new HashSet<Object>(); primaryFilters.put(key, thisPrimaryFilter); } thisPrimaryFilter.add(value); } /** * Add a map of primary filters to the existing primary filter map * * @param primaryFilters * a map of primary filters */ public void addPrimaryFilters(Map<String, Set<Object>> primaryFilters) { for (Entry<String, Set<Object>> primaryFilter : primaryFilters.entrySet()) { Set<Object> thisPrimaryFilter = this.primaryFilters.get(primaryFilter.getKey()); if (thisPrimaryFilter == null) { this.primaryFilters.put( primaryFilter.getKey(), primaryFilter.getValue()); } else { thisPrimaryFilter.addAll(primaryFilter.getValue()); } } } /** * Set the primary filter map to the given map of primary filters * * @param primaryFilters * a map of primary filters */ public void setPrimaryFilters(Map<String, Set<Object>> primaryFilters) { if (primaryFilters != null && !(primaryFilters instanceof HashMap)) { this.primaryFilters = new HashMap<String, Set<Object>>(primaryFilters); } else { this.primaryFilters = (HashMap<String, Set<Object>>) primaryFilters; } } /** * Get the other information of the entity * * @return the other information of the entity */ public Map<String, Object> getOtherInfo() { return otherInfo; } // Required by JAXB @Private @XmlElement(name = "otherinfo") public HashMap<String, Object> getOtherInfoJAXB() { return otherInfo; } /** * Add one piece of other information of the entity to the existing other info * map * * @param key * the other information key * @param value * the other information value */ public void addOtherInfo(String key, Object value) { this.otherInfo.put(key, value); } /** * Add a map of other information of the entity to the existing other info map * * @param otherInfo * a map of other information */ public void addOtherInfo(Map<String, Object> otherInfo) { this.otherInfo.putAll(otherInfo); } /** * Set the other info map to the given map of other information * * @param otherInfo * a map of other information */ public void setOtherInfo(Map<String, Object> otherInfo) { if (otherInfo != null && !(otherInfo instanceof HashMap)) { this.otherInfo = new HashMap<String, Object>(otherInfo); } else { this.otherInfo = (HashMap<String, Object>) otherInfo; } } /** * Get the ID of the domain that the entity is to be put * * @return the domain ID */ @XmlElement(name = "domain") public String getDomainId() { return domainId; } /** * Set the ID of the domain that the entity is to be put * * @param domainId * the name space ID */ public void setDomainId(String domainId) { this.domainId = domainId; } @Override public int hashCode() { // generated by eclipse final int prime = 31; int result = 1; result = prime * result + ((entityId == null) ? 0 : entityId.hashCode()); result = prime * result + ((entityType == null) ? 0 : entityType.hashCode()); result = prime * result + ((events == null) ? 0 : events.hashCode()); result = prime * result + ((otherInfo == null) ? 0 : otherInfo.hashCode()); result = prime * result + ((primaryFilters == null) ? 0 : primaryFilters.hashCode()); result = prime * result + ((relatedEntities == null) ? 0 : relatedEntities.hashCode()); result = prime * result + ((startTime == null) ? 0 : startTime.hashCode()); return result; } @Override public boolean equals(Object obj) { // generated by eclipse if (this == obj) return true; if (obj == null) return false; if (getClass() != obj.getClass()) return false; TimelineEntity other = (TimelineEntity) obj; if (entityId == null) { if (other.entityId != null) return false; } else if (!entityId.equals(other.entityId)) return false; if (entityType == null) { if (other.entityType != null) return false; } else if (!entityType.equals(other.entityType)) return false; if (events == null) { if (other.events != null) return false; } else if (!events.equals(other.events)) return false; if (otherInfo == null) { if (other.otherInfo != null) return false; } else if (!otherInfo.equals(other.otherInfo)) return false; if (primaryFilters == null) { if (other.primaryFilters != null) return false; } else if (!primaryFilters.equals(other.primaryFilters)) return false; if (relatedEntities == null) { if (other.relatedEntities != null) return false; } else if (!relatedEntities.equals(other.relatedEntities)) return false; if (startTime == null) { if (other.startTime != null) return false; } else if (!startTime.equals(other.startTime)) return false; return true; } @Override public int compareTo(TimelineEntity other) { int comparison = entityType.compareTo(other.entityType); if (comparison == 0) { long thisStartTime = startTime == null ? Long.MIN_VALUE : startTime; long otherStartTime = other.startTime == null ? Long.MIN_VALUE : other.startTime; if (thisStartTime > otherStartTime) { return -1; } else if (thisStartTime < otherStartTime) { return 1; } else { return entityId.compareTo(other.entityId); } } else { return comparison; } } }
13,067
26.863539
81
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timeline/TimelineEntities.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.api.records.timeline; import java.util.ArrayList; import java.util.List; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlRootElement; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Evolving; /** * The class that hosts a list of timeline entities. */ @XmlRootElement(name = "entities") @XmlAccessorType(XmlAccessType.NONE) @Public @Evolving public class TimelineEntities { private List<TimelineEntity> entities = new ArrayList<TimelineEntity>(); public TimelineEntities() { } /** * Get a list of entities * * @return a list of entities */ @XmlElement(name = "entities") public List<TimelineEntity> getEntities() { return entities; } /** * Add a single entity into the existing entity list * * @param entity * a single entity */ public void addEntity(TimelineEntity entity) { entities.add(entity); } /** * All a list of entities into the existing entity list * * @param entities * a list of entities */ public void addEntities(List<TimelineEntity> entities) { this.entities.addAll(entities); } /** * Set the entity list to the given list of entities * * @param entities * a list of entities */ public void setEntities(List<TimelineEntity> entities) { this.entities = entities; } }
2,383
25.786517
75
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/package-info.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.util;
843
43.421053
75
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/Records.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.util; import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; /** * Convenient API record utils */ @LimitedPrivate({ "MapReduce", "YARN" }) @Unstable public class Records { // The default record factory private static final RecordFactory factory = RecordFactoryProvider.getRecordFactory(null); public static <T> T newRecord(Class<T> cls) { return factory.newRecordInstance(cls); } }
1,456
36.358974
75
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/factories/package-info.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ @InterfaceAudience.Private package org.apache.hadoop.yarn.factories; import org.apache.hadoop.classification.InterfaceAudience;
933
45.7
75
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/factories/RecordFactory.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.factories; import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate; import org.apache.hadoop.classification.InterfaceStability.Unstable; @LimitedPrivate({ "MapReduce", "YARN" }) @Unstable public interface RecordFactory { public <T> T newRecordInstance(Class<T> clazz); }
1,131
38.034483
75
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/YarnException.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.exceptions; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Stable; /** * YarnException indicates exceptions from yarn servers. On the other hand, * IOExceptions indicates exceptions from RPC layer. */ @Public @Stable public class YarnException extends Exception { private static final long serialVersionUID = 1L; public YarnException() { super(); } public YarnException(String message) { super(message); } public YarnException(Throwable cause) { super(cause); } public YarnException(String message, Throwable cause) { super(message, cause); } }
1,506
29.14
75
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/InvalidResourceBlacklistRequestException.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.exceptions; import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; import org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest; import org.apache.hadoop.yarn.api.records.ResourceRequest; /** * This exception is thrown when an application provides an invalid * {@link ResourceBlacklistRequest} specification for blacklisting of resources * in {@link ApplicationMasterProtocol#allocate(AllocateRequest)} API. * * Currently this exceptions is thrown when an application tries to * blacklist {@link ResourceRequest#ANY}. */ public class InvalidResourceBlacklistRequestException extends YarnException { private static final long serialVersionUID = 384957911L; public InvalidResourceBlacklistRequestException(Throwable cause) { super(cause); } public InvalidResourceBlacklistRequestException(String message) { super(message); } public InvalidResourceBlacklistRequestException(String message, Throwable cause) { super(message, cause); } }
1,892
36.117647
84
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/package-info.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ @InterfaceAudience.Public package org.apache.hadoop.yarn.exceptions; import org.apache.hadoop.classification.InterfaceAudience;
933
45.7
75
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/InvalidContainerException.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.exceptions; import org.apache.hadoop.yarn.api.ContainerManagementProtocol; import org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest; /** * This exception is thrown by a NodeManager that is rejecting start-container * requests via * {@link ContainerManagementProtocol#startContainers(StartContainersRequest)} * for containers allocated by a previous instance of the RM. */ public class InvalidContainerException extends YarnException { private static final long serialVersionUID = 1L; public InvalidContainerException(String msg) { super(msg); } }
1,421
36.421053
78
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/InvalidResourceRequestException.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.exceptions; import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; import org.apache.hadoop.yarn.api.records.ResourceRequest; /** * This exception is thrown when a resource requested via * {@link ResourceRequest} in the * {@link ApplicationMasterProtocol#allocate(AllocateRequest)} API is out of the * range of the configured lower and upper limits on resources. * */ public class InvalidResourceRequestException extends YarnException { private static final long serialVersionUID = 13498237L; public InvalidResourceRequestException(Throwable cause) { super(cause); } public InvalidResourceRequestException(String message) { super(message); } public InvalidResourceRequestException(String message, Throwable cause) { super(message, cause); } }
1,695
33.612245
80
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/ApplicationMasterNotRegisteredException.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.exceptions; import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest; import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest; /** * This exception is thrown when an Application Master tries to unregister by calling * {@link ApplicationMasterProtocol#finishApplicationMaster(FinishApplicationMasterRequest)} * API without first registering by calling * {@link ApplicationMasterProtocol#registerApplicationMaster(RegisterApplicationMasterRequest)} * or after an RM restart. The ApplicationMaster is expected to call * {@link ApplicationMasterProtocol#registerApplicationMaster(RegisterApplicationMasterRequest)} * and retry. */ public class ApplicationMasterNotRegisteredException extends YarnException { private static final long serialVersionUID = 13498238L; public ApplicationMasterNotRegisteredException(Throwable cause) { super(cause);} public ApplicationMasterNotRegisteredException(String message) { super(message); } public ApplicationMasterNotRegisteredException(String message, Throwable cause) { super(message, cause); } }
2,019
41.083333
96
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/ApplicationIdNotProvidedException.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.exceptions; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; /** * Exception to be thrown when Client submit an application without * providing {@link ApplicationId} in {@link ApplicationSubmissionContext}. */ @Public @Unstable public class ApplicationIdNotProvidedException extends YarnException{ private static final long serialVersionUID = 911754350L; public ApplicationIdNotProvidedException(Throwable cause) { super(cause); } public ApplicationIdNotProvidedException(String message) { super(message); } public ApplicationIdNotProvidedException(String message, Throwable cause) { super(message, cause); } }
1,684
34.104167
77
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/YarnRuntimeException.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.exceptions; import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate; import org.apache.hadoop.classification.InterfaceStability.Unstable; /** Base Yarn Exception. * * NOTE: All derivatives of this exception, which may be thrown by a remote * service, must include a String only constructor for the exception to be * unwrapped on the client. */ @LimitedPrivate({ "MapReduce", "YARN" }) @Unstable public class YarnRuntimeException extends RuntimeException { private static final long serialVersionUID = -7153142425412203936L; public YarnRuntimeException(Throwable cause) { super(cause); } public YarnRuntimeException(String message) { super(message); } public YarnRuntimeException(String message, Throwable cause) { super(message, cause); } }
1,603
38.121951
75
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/ApplicationNotFoundException.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.exceptions; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.ApplicationClientProtocol; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest; /** * This exception is thrown on * {@link ApplicationClientProtocol#getApplicationReport * (GetApplicationReportRequest)} API * when the Application doesn't exist in RM and AHS */ @Public @Unstable public class ApplicationNotFoundException extends YarnException{ private static final long serialVersionUID = 8694408L; public ApplicationNotFoundException(Throwable cause) { super(cause); } public ApplicationNotFoundException(String message) { super(message); } public ApplicationNotFoundException(String message, Throwable cause) { super(message, cause); } }
1,729
32.921569
78
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/NMNotYetReadyException.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.exceptions; import org.apache.hadoop.yarn.api.ContainerManagementProtocol; import org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest; /** * This exception is thrown on * {@link ContainerManagementProtocol#startContainers(StartContainersRequest)} API * when an NM starts from scratch but has not yet connected with RM. */ public class NMNotYetReadyException extends YarnException { private static final long serialVersionUID = 1L; public NMNotYetReadyException(String msg) { super(msg); } }
1,362
35.837838
82
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/ApplicationAttemptNotFoundException.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.exceptions; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.ApplicationHistoryProtocol; import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest; /** * This exception is thrown on * {@link ApplicationHistoryProtocol#getApplicationAttemptReport (GetApplicationAttemptReportRequest)} * API when the Application Attempt doesn't exist in Application History Server or * {@link ApplicationMasterProtocol#allocate(AllocateRequest)} if application * doesn't exist in RM. */ @Public @Unstable public class ApplicationAttemptNotFoundException extends YarnException { private static final long serialVersionUID = 8694508L; public ApplicationAttemptNotFoundException(Throwable cause) { super(cause); } public ApplicationAttemptNotFoundException(String message) { super(message); } public ApplicationAttemptNotFoundException(String message, Throwable cause) { super(message, cause); } }
2,029
37.301887
102
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/ContainerNotFoundException.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.exceptions; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.ApplicationHistoryProtocol; import org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportRequest; /** * This exception is thrown on * {@link ApplicationHistoryProtocol#getContainerReport (GetContainerReportRequest)} * API when the container doesn't exist in AHS */ @Public @Unstable public class ContainerNotFoundException extends YarnException { private static final long serialVersionUID = 8694608L; public ContainerNotFoundException(Throwable cause) { super(cause); } public ContainerNotFoundException(String message) { super(message); } public ContainerNotFoundException(String message, Throwable cause) { super(message, cause); } }
1,700
33.714286
84
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/InvalidContainerReleaseException.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.exceptions; import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; /** * This exception is thrown when an Application Master tries to release * containers not belonging to it using * {@link ApplicationMasterProtocol#allocate(AllocateRequest)} API. */ public class InvalidContainerReleaseException extends YarnException { private static final long serialVersionUID = 13498237L; public InvalidContainerReleaseException(Throwable cause) { super(cause); } public InvalidContainerReleaseException(String message) { super(message); } public InvalidContainerReleaseException(String message, Throwable cause) { super(message, cause); } }
1,590
33.586957
75
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/InvalidAuxServiceException.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.exceptions; import org.apache.hadoop.yarn.api.ContainerManagementProtocol; import org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest; /** * This exception is thrown by a NodeManager that is rejecting start-container * requests via * {@link ContainerManagementProtocol#startContainers(StartContainersRequest)} * for auxservices does not exist. */ public class InvalidAuxServiceException extends YarnException { private static final long serialVersionUID = 1L; public InvalidAuxServiceException(String msg) { super(msg); } }
1,396
35.763158
78
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/InvalidApplicationMasterRequestException.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.exceptions; import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest; /** * This exception is thrown when an ApplicationMaster asks for resources by * calling {@link ApplicationMasterProtocol#allocate(AllocateRequest)} * without first registering by calling * {@link ApplicationMasterProtocol#registerApplicationMaster(RegisterApplicationMasterRequest)} * or if it tries to register more than once. */ public class InvalidApplicationMasterRequestException extends YarnException { private static final long serialVersionUID = 1357686L; public InvalidApplicationMasterRequestException(Throwable cause) { super(cause); } public InvalidApplicationMasterRequestException(String message) { super(message); } public InvalidApplicationMasterRequestException(String message, Throwable cause) { super(message, cause); } }
1,849
36.755102
96
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/package-info.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ @InterfaceAudience.Public package org.apache.hadoop.yarn.conf; import org.apache.hadoop.classification.InterfaceAudience;
929
41.272727
75
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.conf; import java.net.InetSocketAddress; import java.util.Arrays; import java.util.Collections; import java.util.List; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Evolving; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.api.ApplicationConstants; @Public @Evolving public class YarnConfiguration extends Configuration { @Private public static final String CS_CONFIGURATION_FILE= "capacity-scheduler.xml"; @Private public static final String HADOOP_POLICY_CONFIGURATION_FILE = "hadoop-policy.xml"; @Private public static final String YARN_SITE_CONFIGURATION_FILE = "yarn-site.xml"; private static final String YARN_DEFAULT_CONFIGURATION_FILE = "yarn-default.xml"; @Private public static final String CORE_SITE_CONFIGURATION_FILE = "core-site.xml"; @Private public static final List<String> RM_CONFIGURATION_FILES = Collections.unmodifiableList(Arrays.asList( CS_CONFIGURATION_FILE, HADOOP_POLICY_CONFIGURATION_FILE, YARN_SITE_CONFIGURATION_FILE, CORE_SITE_CONFIGURATION_FILE)); @Evolving public static final int APPLICATION_MAX_TAGS = 10; @Evolving public static final int APPLICATION_MAX_TAG_LENGTH = 100; static { addDeprecatedKeys(); Configuration.addDefaultResource(YARN_DEFAULT_CONFIGURATION_FILE); Configuration.addDefaultResource(YARN_SITE_CONFIGURATION_FILE); } private static void addDeprecatedKeys() { Configuration.addDeprecations(new DeprecationDelta[] { new DeprecationDelta("yarn.client.max-nodemanagers-proxies", NM_CLIENT_MAX_NM_PROXIES) }); } //Configurations public static final String YARN_PREFIX = "yarn."; /** Delay before deleting resource to ease debugging of NM issues */ public static final String DEBUG_NM_DELETE_DELAY_SEC = YarnConfiguration.NM_PREFIX + "delete.debug-delay-sec"; //////////////////////////////// // IPC Configs //////////////////////////////// public static final String IPC_PREFIX = YARN_PREFIX + "ipc."; /** Factory to create client IPC classes.*/ public static final String IPC_CLIENT_FACTORY_CLASS = IPC_PREFIX + "client.factory.class"; public static final String DEFAULT_IPC_CLIENT_FACTORY_CLASS = "org.apache.hadoop.yarn.factories.impl.pb.RpcClientFactoryPBImpl"; /** Factory to create server IPC classes.*/ public static final String IPC_SERVER_FACTORY_CLASS = IPC_PREFIX + "server.factory.class"; public static final String DEFAULT_IPC_SERVER_FACTORY_CLASS = "org.apache.hadoop.yarn.factories.impl.pb.RpcServerFactoryPBImpl"; /** Factory to create serializeable records.*/ public static final String IPC_RECORD_FACTORY_CLASS = IPC_PREFIX + "record.factory.class"; public static final String DEFAULT_IPC_RECORD_FACTORY_CLASS = "org.apache.hadoop.yarn.factories.impl.pb.RecordFactoryPBImpl"; /** RPC class implementation*/ public static final String IPC_RPC_IMPL = IPC_PREFIX + "rpc.class"; public static final String DEFAULT_IPC_RPC_IMPL = "org.apache.hadoop.yarn.ipc.HadoopYarnProtoRPC"; //////////////////////////////// // Resource Manager Configs //////////////////////////////// public static final String RM_PREFIX = "yarn.resourcemanager."; public static final String RM_CLUSTER_ID = RM_PREFIX + "cluster-id"; public static final String RM_HOSTNAME = RM_PREFIX + "hostname"; /** The address of the applications manager interface in the RM.*/ public static final String RM_ADDRESS = RM_PREFIX + "address"; public static final int DEFAULT_RM_PORT = 8032; public static final String DEFAULT_RM_ADDRESS = "0.0.0.0:" + DEFAULT_RM_PORT; /** The actual bind address for the RM.*/ public static final String RM_BIND_HOST = RM_PREFIX + "bind-host"; /** The number of threads used to handle applications manager requests.*/ public static final String RM_CLIENT_THREAD_COUNT = RM_PREFIX + "client.thread-count"; public static final int DEFAULT_RM_CLIENT_THREAD_COUNT = 50; /** Number of threads used to launch/cleanup AM.*/ public static final String RM_AMLAUNCHER_THREAD_COUNT = RM_PREFIX + "amlauncher.thread-count"; public static final int DEFAULT_RM_AMLAUNCHER_THREAD_COUNT = 50; /** Retry times to connect with NM.*/ public static final String RM_NODEMANAGER_CONNECT_RETIRES = RM_PREFIX + "nodemanager-connect-retries"; public static final int DEFAULT_RM_NODEMANAGER_CONNECT_RETIRES = 10; /** The Kerberos principal for the resource manager.*/ public static final String RM_PRINCIPAL = RM_PREFIX + "principal"; /** The address of the scheduler interface.*/ public static final String RM_SCHEDULER_ADDRESS = RM_PREFIX + "scheduler.address"; public static final int DEFAULT_RM_SCHEDULER_PORT = 8030; public static final String DEFAULT_RM_SCHEDULER_ADDRESS = "0.0.0.0:" + DEFAULT_RM_SCHEDULER_PORT; /** Miniumum request grant-able by the RM scheduler. */ public static final String RM_SCHEDULER_MINIMUM_ALLOCATION_MB = YARN_PREFIX + "scheduler.minimum-allocation-mb"; public static final int DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB = 1024; public static final String RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES = YARN_PREFIX + "scheduler.minimum-allocation-vcores"; public static final int DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES = 1; /** Maximum request grant-able by the RM scheduler. */ public static final String RM_SCHEDULER_MAXIMUM_ALLOCATION_MB = YARN_PREFIX + "scheduler.maximum-allocation-mb"; public static final int DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB = 8192; public static final String RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES = YARN_PREFIX + "scheduler.maximum-allocation-vcores"; public static final int DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES = 4; /** Number of threads to handle scheduler interface.*/ public static final String RM_SCHEDULER_CLIENT_THREAD_COUNT = RM_PREFIX + "scheduler.client.thread-count"; public static final int DEFAULT_RM_SCHEDULER_CLIENT_THREAD_COUNT = 50; /** If the port should be included or not in the node name. The node name * is used by the scheduler for resource requests allocation location * matching. Typically this is just the hostname, using the port is needed * when using minicluster and specific NM are required.*/ public static final String RM_SCHEDULER_INCLUDE_PORT_IN_NODE_NAME = YARN_PREFIX + "scheduler.include-port-in-node-name"; public static final boolean DEFAULT_RM_SCHEDULER_USE_PORT_FOR_NODE_NAME = false; /** Enable Resource Manager webapp ui actions */ public static final String RM_WEBAPP_UI_ACTIONS_ENABLED = RM_PREFIX + "webapp.ui-actions.enabled"; public static final boolean DEFAULT_RM_WEBAPP_UI_ACTIONS_ENABLED = true; /** Whether the RM should enable Reservation System */ public static final String RM_RESERVATION_SYSTEM_ENABLE = RM_PREFIX + "reservation-system.enable"; public static final boolean DEFAULT_RM_RESERVATION_SYSTEM_ENABLE = false; /** The class to use as the Reservation System. */ public static final String RM_RESERVATION_SYSTEM_CLASS = RM_PREFIX + "reservation-system.class"; /** The PlanFollower for the Reservation System. */ public static final String RM_RESERVATION_SYSTEM_PLAN_FOLLOWER = RM_PREFIX + "reservation-system.plan.follower"; /** The step size of the Reservation System. */ public static final String RM_RESERVATION_SYSTEM_PLAN_FOLLOWER_TIME_STEP = RM_PREFIX + "reservation-system.planfollower.time-step"; public static final long DEFAULT_RM_RESERVATION_SYSTEM_PLAN_FOLLOWER_TIME_STEP = 1000L; /** * Enable periodic monitor threads. * @see #RM_SCHEDULER_MONITOR_POLICIES */ public static final String RM_SCHEDULER_ENABLE_MONITORS = RM_PREFIX + "scheduler.monitor.enable"; public static final boolean DEFAULT_RM_SCHEDULER_ENABLE_MONITORS = false; /** List of SchedulingEditPolicy classes affecting the scheduler. */ public static final String RM_SCHEDULER_MONITOR_POLICIES = RM_PREFIX + "scheduler.monitor.policies"; /** The address of the RM web application.*/ public static final String RM_WEBAPP_ADDRESS = RM_PREFIX + "webapp.address"; public static final int DEFAULT_RM_WEBAPP_PORT = 8088; public static final String DEFAULT_RM_WEBAPP_ADDRESS = "0.0.0.0:" + DEFAULT_RM_WEBAPP_PORT; /** The https address of the RM web application.*/ public static final String RM_WEBAPP_HTTPS_ADDRESS = RM_PREFIX + "webapp.https.address"; public static final boolean YARN_SSL_CLIENT_HTTPS_NEED_AUTH_DEFAULT = false; public static final String YARN_SSL_SERVER_RESOURCE_DEFAULT = "ssl-server.xml"; public static final int DEFAULT_RM_WEBAPP_HTTPS_PORT = 8090; public static final String DEFAULT_RM_WEBAPP_HTTPS_ADDRESS = "0.0.0.0:" + DEFAULT_RM_WEBAPP_HTTPS_PORT; public static final String RM_RESOURCE_TRACKER_ADDRESS = RM_PREFIX + "resource-tracker.address"; public static final int DEFAULT_RM_RESOURCE_TRACKER_PORT = 8031; public static final String DEFAULT_RM_RESOURCE_TRACKER_ADDRESS = "0.0.0.0:" + DEFAULT_RM_RESOURCE_TRACKER_PORT; /** The expiry interval for application master reporting.*/ public static final String RM_AM_EXPIRY_INTERVAL_MS = YARN_PREFIX + "am.liveness-monitor.expiry-interval-ms"; public static final int DEFAULT_RM_AM_EXPIRY_INTERVAL_MS = 600000; /** How long to wait until a node manager is considered dead.*/ public static final String RM_NM_EXPIRY_INTERVAL_MS = YARN_PREFIX + "nm.liveness-monitor.expiry-interval-ms"; public static final int DEFAULT_RM_NM_EXPIRY_INTERVAL_MS = 600000; /** Are acls enabled.*/ public static final String YARN_ACL_ENABLE = YARN_PREFIX + "acl.enable"; public static final boolean DEFAULT_YARN_ACL_ENABLE = false; /** ACL of who can be admin of YARN cluster.*/ public static final String YARN_ADMIN_ACL = YARN_PREFIX + "admin.acl"; public static final String DEFAULT_YARN_ADMIN_ACL = "*"; /** ACL used in case none is found. Allows nothing. */ public static final String DEFAULT_YARN_APP_ACL = " "; /** * Enable/disable intermediate-data encryption at YARN level. For now, this * only is used by the FileSystemRMStateStore to setup right file-system * security attributes. */ @Private public static final String YARN_INTERMEDIATE_DATA_ENCRYPTION = YARN_PREFIX + "intermediate-data-encryption.enable"; @Private public static final boolean DEFAULT_YARN_INTERMEDIATE_DATA_ENCRYPTION = false; /** The address of the RM admin interface.*/ public static final String RM_ADMIN_ADDRESS = RM_PREFIX + "admin.address"; public static final int DEFAULT_RM_ADMIN_PORT = 8033; public static final String DEFAULT_RM_ADMIN_ADDRESS = "0.0.0.0:" + DEFAULT_RM_ADMIN_PORT; /**Number of threads used to handle RM admin interface.*/ public static final String RM_ADMIN_CLIENT_THREAD_COUNT = RM_PREFIX + "admin.client.thread-count"; public static final int DEFAULT_RM_ADMIN_CLIENT_THREAD_COUNT = 1; /** * The maximum number of application attempts. * It's a global setting for all application masters. */ public static final String RM_AM_MAX_ATTEMPTS = RM_PREFIX + "am.max-attempts"; public static final int DEFAULT_RM_AM_MAX_ATTEMPTS = 2; /** The keytab for the resource manager.*/ public static final String RM_KEYTAB = RM_PREFIX + "keytab"; /**The kerberos principal to be used for spnego filter for RM.*/ public static final String RM_WEBAPP_SPNEGO_USER_NAME_KEY = RM_PREFIX + "webapp.spnego-principal"; /**The kerberos keytab to be used for spnego filter for RM.*/ public static final String RM_WEBAPP_SPNEGO_KEYTAB_FILE_KEY = RM_PREFIX + "webapp.spnego-keytab-file"; /** * Flag to enable override of the default kerberos authentication filter with * the RM authentication filter to allow authentication using delegation * tokens(fallback to kerberos if the tokens are missing). Only applicable * when the http authentication type is kerberos. */ public static final String RM_WEBAPP_DELEGATION_TOKEN_AUTH_FILTER = RM_PREFIX + "webapp.delegation-token-auth-filter.enabled"; public static final boolean DEFAULT_RM_WEBAPP_DELEGATION_TOKEN_AUTH_FILTER = true; /** How long to wait until a container is considered dead.*/ public static final String RM_CONTAINER_ALLOC_EXPIRY_INTERVAL_MS = RM_PREFIX + "rm.container-allocation.expiry-interval-ms"; public static final int DEFAULT_RM_CONTAINER_ALLOC_EXPIRY_INTERVAL_MS = 600000; /** Path to file with nodes to include.*/ public static final String RM_NODES_INCLUDE_FILE_PATH = RM_PREFIX + "nodes.include-path"; public static final String DEFAULT_RM_NODES_INCLUDE_FILE_PATH = ""; /** Path to file with nodes to exclude.*/ public static final String RM_NODES_EXCLUDE_FILE_PATH = RM_PREFIX + "nodes.exclude-path"; public static final String DEFAULT_RM_NODES_EXCLUDE_FILE_PATH = ""; /** Number of threads to handle resource tracker calls.*/ public static final String RM_RESOURCE_TRACKER_CLIENT_THREAD_COUNT = RM_PREFIX + "resource-tracker.client.thread-count"; public static final int DEFAULT_RM_RESOURCE_TRACKER_CLIENT_THREAD_COUNT = 50; /** The class to use as the resource scheduler.*/ public static final String RM_SCHEDULER = RM_PREFIX + "scheduler.class"; public static final String DEFAULT_RM_SCHEDULER = "org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler"; /** RM set next Heartbeat interval for NM */ public static final String RM_NM_HEARTBEAT_INTERVAL_MS = RM_PREFIX + "nodemanagers.heartbeat-interval-ms"; public static final long DEFAULT_RM_NM_HEARTBEAT_INTERVAL_MS = 1000; /** Number of worker threads that write the history data. */ public static final String RM_HISTORY_WRITER_MULTI_THREADED_DISPATCHER_POOL_SIZE = RM_PREFIX + "history-writer.multi-threaded-dispatcher.pool-size"; public static final int DEFAULT_RM_HISTORY_WRITER_MULTI_THREADED_DISPATCHER_POOL_SIZE = 10; /** * The setting that controls whether yarn system metrics is published on the * timeline server or not by RM. */ public static final String RM_SYSTEM_METRICS_PUBLISHER_ENABLED = RM_PREFIX + "system-metrics-publisher.enabled"; public static final boolean DEFAULT_RM_SYSTEM_METRICS_PUBLISHER_ENABLED = false; public static final String RM_SYSTEM_METRICS_PUBLISHER_DISPATCHER_POOL_SIZE = RM_PREFIX + "system-metrics-publisher.dispatcher.pool-size"; public static final int DEFAULT_RM_SYSTEM_METRICS_PUBLISHER_DISPATCHER_POOL_SIZE = 10; //RM delegation token related keys public static final String RM_DELEGATION_KEY_UPDATE_INTERVAL_KEY = RM_PREFIX + "delegation.key.update-interval"; public static final long RM_DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT = 24*60*60*1000; // 1 day public static final String RM_DELEGATION_TOKEN_RENEW_INTERVAL_KEY = RM_PREFIX + "delegation.token.renew-interval"; public static final long RM_DELEGATION_TOKEN_RENEW_INTERVAL_DEFAULT = 24*60*60*1000; // 1 day public static final String RM_DELEGATION_TOKEN_MAX_LIFETIME_KEY = RM_PREFIX + "delegation.token.max-lifetime"; public static final long RM_DELEGATION_TOKEN_MAX_LIFETIME_DEFAULT = 7*24*60*60*1000; // 7 days public static final String RECOVERY_ENABLED = RM_PREFIX + "recovery.enabled"; public static final boolean DEFAULT_RM_RECOVERY_ENABLED = false; public static final String YARN_FAIL_FAST = YARN_PREFIX + "fail-fast"; public static final boolean DEFAULT_YARN_FAIL_FAST = true; public static final String RM_FAIL_FAST = RM_PREFIX + "fail-fast"; @Private public static final String RM_WORK_PRESERVING_RECOVERY_ENABLED = RM_PREFIX + "work-preserving-recovery.enabled"; @Private public static final boolean DEFAULT_RM_WORK_PRESERVING_RECOVERY_ENABLED = true; public static final String RM_WORK_PRESERVING_RECOVERY_SCHEDULING_WAIT_MS = RM_PREFIX + "work-preserving-recovery.scheduling-wait-ms"; public static final long DEFAULT_RM_WORK_PRESERVING_RECOVERY_SCHEDULING_WAIT_MS = 10000; /** Zookeeper interaction configs */ public static final String RM_ZK_PREFIX = RM_PREFIX + "zk-"; public static final String RM_ZK_ADDRESS = RM_ZK_PREFIX + "address"; public static final String RM_ZK_NUM_RETRIES = RM_ZK_PREFIX + "num-retries"; public static final int DEFAULT_ZK_RM_NUM_RETRIES = 1000; public static final String RM_ZK_RETRY_INTERVAL_MS = RM_ZK_PREFIX + "retry-interval-ms"; public static final int DEFAULT_RM_ZK_RETRY_INTERVAL_MS = 1000; public static final String RM_ZK_TIMEOUT_MS = RM_ZK_PREFIX + "timeout-ms"; public static final int DEFAULT_RM_ZK_TIMEOUT_MS = 10000; public static final String RM_ZK_ACL = RM_ZK_PREFIX + "acl"; public static final String DEFAULT_RM_ZK_ACL = "world:anyone:rwcda"; public static final String RM_ZK_AUTH = RM_ZK_PREFIX + "auth"; public static final String ZK_STATE_STORE_PREFIX = RM_PREFIX + "zk-state-store."; /** Parent znode path under which ZKRMStateStore will create znodes */ public static final String ZK_RM_STATE_STORE_PARENT_PATH = ZK_STATE_STORE_PREFIX + "parent-path"; public static final String DEFAULT_ZK_RM_STATE_STORE_PARENT_PATH = "/rmstore"; /** Root node ACLs for fencing */ public static final String ZK_RM_STATE_STORE_ROOT_NODE_ACL = ZK_STATE_STORE_PREFIX + "root-node.acl"; /** HA related configs */ public static final String RM_HA_PREFIX = RM_PREFIX + "ha."; public static final String RM_HA_ENABLED = RM_HA_PREFIX + "enabled"; public static final boolean DEFAULT_RM_HA_ENABLED = false; public static final String RM_HA_IDS = RM_HA_PREFIX + "rm-ids"; public static final String RM_HA_ID = RM_HA_PREFIX + "id"; /** Store the related configuration files in File System */ public static final String FS_BASED_RM_CONF_STORE = RM_PREFIX + "configuration.file-system-based-store"; public static final String DEFAULT_FS_BASED_RM_CONF_STORE = "/yarn/conf"; public static final String RM_CONFIGURATION_PROVIDER_CLASS = RM_PREFIX + "configuration.provider-class"; public static final String DEFAULT_RM_CONFIGURATION_PROVIDER_CLASS = "org.apache.hadoop.yarn.LocalConfigurationProvider"; public static final String YARN_AUTHORIZATION_PROVIDER = YARN_PREFIX + "authorization-provider"; private static final List<String> RM_SERVICES_ADDRESS_CONF_KEYS_HTTP = Collections.unmodifiableList(Arrays.asList( RM_ADDRESS, RM_SCHEDULER_ADDRESS, RM_ADMIN_ADDRESS, RM_RESOURCE_TRACKER_ADDRESS, RM_WEBAPP_ADDRESS)); private static final List<String> RM_SERVICES_ADDRESS_CONF_KEYS_HTTPS = Collections.unmodifiableList(Arrays.asList( RM_ADDRESS, RM_SCHEDULER_ADDRESS, RM_ADMIN_ADDRESS, RM_RESOURCE_TRACKER_ADDRESS, RM_WEBAPP_HTTPS_ADDRESS)); public static final String AUTO_FAILOVER_PREFIX = RM_HA_PREFIX + "automatic-failover."; public static final String AUTO_FAILOVER_ENABLED = AUTO_FAILOVER_PREFIX + "enabled"; public static final boolean DEFAULT_AUTO_FAILOVER_ENABLED = true; public static final String AUTO_FAILOVER_EMBEDDED = AUTO_FAILOVER_PREFIX + "embedded"; public static final boolean DEFAULT_AUTO_FAILOVER_EMBEDDED = true; public static final String AUTO_FAILOVER_ZK_BASE_PATH = AUTO_FAILOVER_PREFIX + "zk-base-path"; public static final String DEFAULT_AUTO_FAILOVER_ZK_BASE_PATH = "/yarn-leader-election"; public static final String CLIENT_FAILOVER_PREFIX = YARN_PREFIX + "client.failover-"; public static final String CLIENT_FAILOVER_PROXY_PROVIDER = CLIENT_FAILOVER_PREFIX + "proxy-provider"; public static final String DEFAULT_CLIENT_FAILOVER_PROXY_PROVIDER = "org.apache.hadoop.yarn.client.ConfiguredRMFailoverProxyProvider"; public static final String CLIENT_FAILOVER_MAX_ATTEMPTS = CLIENT_FAILOVER_PREFIX + "max-attempts"; public static final String CLIENT_FAILOVER_SLEEPTIME_BASE_MS = CLIENT_FAILOVER_PREFIX + "sleep-base-ms"; public static final String CLIENT_FAILOVER_SLEEPTIME_MAX_MS = CLIENT_FAILOVER_PREFIX + "sleep-max-ms"; public static final String CLIENT_FAILOVER_RETRIES = CLIENT_FAILOVER_PREFIX + "retries"; public static final int DEFAULT_CLIENT_FAILOVER_RETRIES = 0; public static final String CLIENT_FAILOVER_RETRIES_ON_SOCKET_TIMEOUTS = CLIENT_FAILOVER_PREFIX + "retries-on-socket-timeouts"; public static final int DEFAULT_CLIENT_FAILOVER_RETRIES_ON_SOCKET_TIMEOUTS = 0; //////////////////////////////// // RM state store configs //////////////////////////////// /** The class to use as the persistent store.*/ public static final String RM_STORE = RM_PREFIX + "store.class"; /** URI for FileSystemRMStateStore */ public static final String FS_RM_STATE_STORE_URI = RM_PREFIX + "fs.state-store.uri"; public static final String FS_RM_STATE_STORE_RETRY_POLICY_SPEC = RM_PREFIX + "fs.state-store.retry-policy-spec"; public static final String DEFAULT_FS_RM_STATE_STORE_RETRY_POLICY_SPEC = "2000, 500"; public static final String FS_RM_STATE_STORE_NUM_RETRIES = RM_PREFIX + "fs.state-store.num-retries"; public static final int DEFAULT_FS_RM_STATE_STORE_NUM_RETRIES = 0; public static final String FS_RM_STATE_STORE_RETRY_INTERVAL_MS = RM_PREFIX + "fs.state-store.retry-interval-ms"; public static final long DEFAULT_FS_RM_STATE_STORE_RETRY_INTERVAL_MS = 1000L; public static final String RM_LEVELDB_STORE_PATH = RM_PREFIX + "leveldb-state-store.path"; /** The maximum number of completed applications RM keeps. */ public static final String RM_MAX_COMPLETED_APPLICATIONS = RM_PREFIX + "max-completed-applications"; public static final int DEFAULT_RM_MAX_COMPLETED_APPLICATIONS = 10000; /** * The maximum number of completed applications RM state store keeps, by * default equals to DEFAULT_RM_MAX_COMPLETED_APPLICATIONS */ public static final String RM_STATE_STORE_MAX_COMPLETED_APPLICATIONS = RM_PREFIX + "state-store.max-completed-applications"; public static final int DEFAULT_RM_STATE_STORE_MAX_COMPLETED_APPLICATIONS = DEFAULT_RM_MAX_COMPLETED_APPLICATIONS; /** Default application name */ public static final String DEFAULT_APPLICATION_NAME = "N/A"; /** Default application type */ public static final String DEFAULT_APPLICATION_TYPE = "YARN"; /** Default application type length */ public static final int APPLICATION_TYPE_LENGTH = 20; /** Default queue name */ public static final String DEFAULT_QUEUE_NAME = "default"; /** * Buckets (in minutes) for the number of apps running in each queue. */ public static final String RM_METRICS_RUNTIME_BUCKETS = RM_PREFIX + "metrics.runtime.buckets"; /** * Default sizes of the runtime metric buckets in minutes. */ public static final String DEFAULT_RM_METRICS_RUNTIME_BUCKETS = "60,300,1440"; public static final String RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS = RM_PREFIX + "am-rm-tokens.master-key-rolling-interval-secs"; public static final long DEFAULT_RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS = 24 * 60 * 60; public static final String RM_CONTAINER_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS = RM_PREFIX + "container-tokens.master-key-rolling-interval-secs"; public static final long DEFAULT_RM_CONTAINER_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS = 24 * 60 * 60; public static final String RM_NMTOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS = RM_PREFIX + "nm-tokens.master-key-rolling-interval-secs"; public static final long DEFAULT_RM_NMTOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS = 24 * 60 * 60; public static final String RM_NODEMANAGER_MINIMUM_VERSION = RM_PREFIX + "nodemanager.minimum.version"; public static final String DEFAULT_RM_NODEMANAGER_MINIMUM_VERSION = "NONE"; /** * RM proxy users' prefix */ public static final String RM_PROXY_USER_PREFIX = RM_PREFIX + "proxyuser."; //////////////////////////////// // Node Manager Configs //////////////////////////////// /** Prefix for all node manager configs.*/ public static final String NM_PREFIX = "yarn.nodemanager."; /** Environment variables that will be sent to containers.*/ public static final String NM_ADMIN_USER_ENV = NM_PREFIX + "admin-env"; public static final String DEFAULT_NM_ADMIN_USER_ENV = "MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX"; /** Environment variables that containers may override rather than use NodeManager's default.*/ public static final String NM_ENV_WHITELIST = NM_PREFIX + "env-whitelist"; public static final String DEFAULT_NM_ENV_WHITELIST = StringUtils.join(",", Arrays.asList(ApplicationConstants.Environment.JAVA_HOME.key(), ApplicationConstants.Environment.HADOOP_COMMON_HOME.key(), ApplicationConstants.Environment.HADOOP_HDFS_HOME.key(), ApplicationConstants.Environment.HADOOP_CONF_DIR.key(), ApplicationConstants.Environment.CLASSPATH_PREPEND_DISTCACHE.key(), ApplicationConstants.Environment.HADOOP_YARN_HOME.key())); /** address of node manager IPC.*/ public static final String NM_ADDRESS = NM_PREFIX + "address"; public static final int DEFAULT_NM_PORT = 0; public static final String DEFAULT_NM_ADDRESS = "0.0.0.0:" + DEFAULT_NM_PORT; /** The actual bind address or the NM.*/ public static final String NM_BIND_HOST = NM_PREFIX + "bind-host"; /** who will execute(launch) the containers.*/ public static final String NM_CONTAINER_EXECUTOR = NM_PREFIX + "container-executor.class"; /** * Adjustment to make to the container os scheduling priority. * The valid values for this could vary depending on the platform. * On Linux, higher values mean run the containers at a less * favorable priority than the NM. * The value specified is an int. */ public static final String NM_CONTAINER_EXECUTOR_SCHED_PRIORITY = NM_PREFIX + "container-executor.os.sched.priority.adjustment"; public static final int DEFAULT_NM_CONTAINER_EXECUTOR_SCHED_PRIORITY = 0; /** Number of threads container manager uses.*/ public static final String NM_CONTAINER_MGR_THREAD_COUNT = NM_PREFIX + "container-manager.thread-count"; public static final int DEFAULT_NM_CONTAINER_MGR_THREAD_COUNT = 20; /** Number of threads used in cleanup.*/ public static final String NM_DELETE_THREAD_COUNT = NM_PREFIX + "delete.thread-count"; public static final int DEFAULT_NM_DELETE_THREAD_COUNT = 4; /** Keytab for NM.*/ public static final String NM_KEYTAB = NM_PREFIX + "keytab"; /**List of directories to store localized files in.*/ public static final String NM_LOCAL_DIRS = NM_PREFIX + "local-dirs"; public static final String DEFAULT_NM_LOCAL_DIRS = "/tmp/nm-local-dir"; /** * Number of files in each localized directories * Avoid tuning this too low. */ public static final String NM_LOCAL_CACHE_MAX_FILES_PER_DIRECTORY = NM_PREFIX + "local-cache.max-files-per-directory"; public static final int DEFAULT_NM_LOCAL_CACHE_MAX_FILES_PER_DIRECTORY = 8192; /** Address where the localizer IPC is.*/ public static final String NM_LOCALIZER_ADDRESS = NM_PREFIX + "localizer.address"; public static final int DEFAULT_NM_LOCALIZER_PORT = 8040; public static final String DEFAULT_NM_LOCALIZER_ADDRESS = "0.0.0.0:" + DEFAULT_NM_LOCALIZER_PORT; /** Interval in between cache cleanups.*/ public static final String NM_LOCALIZER_CACHE_CLEANUP_INTERVAL_MS = NM_PREFIX + "localizer.cache.cleanup.interval-ms"; public static final long DEFAULT_NM_LOCALIZER_CACHE_CLEANUP_INTERVAL_MS = 10 * 60 * 1000; /** * Target size of localizer cache in MB, per nodemanager. It is a target * retention size that only includes resources with PUBLIC and PRIVATE * visibility and excludes resources with APPLICATION visibility */ public static final String NM_LOCALIZER_CACHE_TARGET_SIZE_MB = NM_PREFIX + "localizer.cache.target-size-mb"; public static final long DEFAULT_NM_LOCALIZER_CACHE_TARGET_SIZE_MB = 10 * 1024; /** Number of threads to handle localization requests.*/ public static final String NM_LOCALIZER_CLIENT_THREAD_COUNT = NM_PREFIX + "localizer.client.thread-count"; public static final int DEFAULT_NM_LOCALIZER_CLIENT_THREAD_COUNT = 5; /** Number of threads to use for localization fetching.*/ public static final String NM_LOCALIZER_FETCH_THREAD_COUNT = NM_PREFIX + "localizer.fetch.thread-count"; public static final int DEFAULT_NM_LOCALIZER_FETCH_THREAD_COUNT = 4; /** Where to store container logs.*/ public static final String NM_LOG_DIRS = NM_PREFIX + "log-dirs"; public static final String DEFAULT_NM_LOG_DIRS = "/tmp/logs"; public static final String NM_RESOURCEMANAGER_MINIMUM_VERSION = NM_PREFIX + "resourcemanager.minimum.version"; public static final String DEFAULT_NM_RESOURCEMANAGER_MINIMUM_VERSION = "NONE"; /** Interval at which the delayed token removal thread runs */ public static final String RM_DELAYED_DELEGATION_TOKEN_REMOVAL_INTERVAL_MS = RM_PREFIX + "delayed.delegation-token.removal-interval-ms"; public static final long DEFAULT_RM_DELAYED_DELEGATION_TOKEN_REMOVAL_INTERVAL_MS = 30000l; /** Delegation Token renewer thread count */ public static final String RM_DELEGATION_TOKEN_RENEWER_THREAD_COUNT = RM_PREFIX + "delegation-token-renewer.thread-count"; public static final int DEFAULT_RM_DELEGATION_TOKEN_RENEWER_THREAD_COUNT = 50; public static final String RM_PROXY_USER_PRIVILEGES_ENABLED = RM_PREFIX + "proxy-user-privileges.enabled"; public static final boolean DEFAULT_RM_PROXY_USER_PRIVILEGES_ENABLED = false; /** * How many diagnostics/failure messages can be saved in RM for * log aggregation. It also defines the number of diagnostics/failure * messages can be shown in log aggregation web ui. */ public static final String RM_MAX_LOG_AGGREGATION_DIAGNOSTICS_IN_MEMORY = RM_PREFIX + "max-log-aggregation-diagnostics-in-memory"; public static final int DEFAULT_RM_MAX_LOG_AGGREGATION_DIAGNOSTICS_IN_MEMORY = 10; /** Whether to enable log aggregation */ public static final String LOG_AGGREGATION_ENABLED = YARN_PREFIX + "log-aggregation-enable"; public static final boolean DEFAULT_LOG_AGGREGATION_ENABLED = false; /** * How long to wait before deleting aggregated logs, -1 disables. * Be careful set this too small and you will spam the name node. */ public static final String LOG_AGGREGATION_RETAIN_SECONDS = YARN_PREFIX + "log-aggregation.retain-seconds"; public static final long DEFAULT_LOG_AGGREGATION_RETAIN_SECONDS = -1; /** * How long to wait between aggregated log retention checks. If set to * a value {@literal <=} 0 then the value is computed as one-tenth of the * log retention setting. Be careful set this too small and you will spam * the name node. */ public static final String LOG_AGGREGATION_RETAIN_CHECK_INTERVAL_SECONDS = YARN_PREFIX + "log-aggregation.retain-check-interval-seconds"; public static final long DEFAULT_LOG_AGGREGATION_RETAIN_CHECK_INTERVAL_SECONDS = -1; /** * How long for ResourceManager to wait for NodeManager to report its * log aggregation status. If waiting time of which the log aggregation status * is reported from NodeManager exceeds the configured value, RM will report * log aggregation status for this NodeManager as TIME_OUT */ public static final String LOG_AGGREGATION_STATUS_TIME_OUT_MS = YARN_PREFIX + "log-aggregation-status.time-out.ms"; public static final long DEFAULT_LOG_AGGREGATION_STATUS_TIME_OUT_MS = 10 * 60 * 1000; /** * Number of seconds to retain logs on the NodeManager. Only applicable if Log * aggregation is disabled */ public static final String NM_LOG_RETAIN_SECONDS = NM_PREFIX + "log.retain-seconds"; public static final long DEFAULT_NM_LOG_RETAIN_SECONDS = 3 * 60 * 60; /** * Define how often NMs wake up and upload log files */ public static final String NM_LOG_AGGREGATION_ROLL_MONITORING_INTERVAL_SECONDS = NM_PREFIX + "log-aggregation.roll-monitoring-interval-seconds"; public static final long DEFAULT_NM_LOG_AGGREGATION_ROLL_MONITORING_INTERVAL_SECONDS = -1; /** * Number of threads used in log cleanup. Only applicable if Log aggregation * is disabled */ public static final String NM_LOG_DELETION_THREADS_COUNT = NM_PREFIX + "log.deletion-threads-count"; public static final int DEFAULT_NM_LOG_DELETE_THREAD_COUNT = 4; /** Where to aggregate logs to.*/ public static final String NM_REMOTE_APP_LOG_DIR = NM_PREFIX + "remote-app-log-dir"; public static final String DEFAULT_NM_REMOTE_APP_LOG_DIR = "/tmp/logs"; /** * The remote log dir will be created at * NM_REMOTE_APP_LOG_DIR/${user}/NM_REMOTE_APP_LOG_DIR_SUFFIX/${appId} */ public static final String NM_REMOTE_APP_LOG_DIR_SUFFIX = NM_PREFIX + "remote-app-log-dir-suffix"; public static final String DEFAULT_NM_REMOTE_APP_LOG_DIR_SUFFIX="logs"; public static final String YARN_LOG_SERVER_URL = YARN_PREFIX + "log.server.url"; public static final String YARN_TRACKING_URL_GENERATOR = YARN_PREFIX + "tracking.url.generator"; /** Amount of memory in MB that can be allocated for containers.*/ public static final String NM_PMEM_MB = NM_PREFIX + "resource.memory-mb"; public static final int DEFAULT_NM_PMEM_MB = 8 * 1024; /** Amount of memory in MB that has been reserved for non-yarn use. */ public static final String NM_SYSTEM_RESERVED_PMEM_MB = NM_PREFIX + "resource.system-reserved-memory-mb"; /** Specifies whether physical memory check is enabled. */ public static final String NM_PMEM_CHECK_ENABLED = NM_PREFIX + "pmem-check-enabled"; public static final boolean DEFAULT_NM_PMEM_CHECK_ENABLED = true; /** Specifies whether physical memory check is enabled. */ public static final String NM_VMEM_CHECK_ENABLED = NM_PREFIX + "vmem-check-enabled"; public static final boolean DEFAULT_NM_VMEM_CHECK_ENABLED = true; /** Conversion ratio for physical memory to virtual memory. */ public static final String NM_VMEM_PMEM_RATIO = NM_PREFIX + "vmem-pmem-ratio"; public static final float DEFAULT_NM_VMEM_PMEM_RATIO = 2.1f; /** Number of Virtual CPU Cores which can be allocated for containers.*/ public static final String NM_VCORES = NM_PREFIX + "resource.cpu-vcores"; public static final int DEFAULT_NM_VCORES = 8; /** Count logical processors(like hyperthreads) as cores. */ public static final String NM_COUNT_LOGICAL_PROCESSORS_AS_CORES = NM_PREFIX + "resource.count-logical-processors-as-cores"; public static final boolean DEFAULT_NM_COUNT_LOGICAL_PROCESSORS_AS_CORES = false; /** Multiplier to convert physical cores to vcores. */ public static final String NM_PCORES_VCORES_MULTIPLIER = NM_PREFIX + "resource.pcores-vcores-multiplier"; public static final float DEFAULT_NM_PCORES_VCORES_MULTIPLIER = 1.0f; /** Percentage of overall CPU which can be allocated for containers. */ public static final String NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT = NM_PREFIX + "resource.percentage-physical-cpu-limit"; public static final int DEFAULT_NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT = 100; /** Enable or disable node hardware capability detection. */ public static final String NM_ENABLE_HARDWARE_CAPABILITY_DETECTION = NM_PREFIX + "resource.detect-hardware-capabilities"; public static final boolean DEFAULT_NM_ENABLE_HARDWARE_CAPABILITY_DETECTION = false; /** * Prefix for disk configurations. Work in progress: This configuration * parameter may be changed/removed in the future. */ @Private public static final String NM_DISK_RESOURCE_PREFIX = NM_PREFIX + "resource.disk."; /** * This setting controls if resource handling for disk operations is enabled. * Work in progress: This configuration parameter may be changed/removed in * the future */ @Private public static final String NM_DISK_RESOURCE_ENABLED = NM_DISK_RESOURCE_PREFIX + "enabled"; /** Disk as a resource is disabled by default. **/ @Private public static final boolean DEFAULT_NM_DISK_RESOURCE_ENABLED = false; public static final String NM_NETWORK_RESOURCE_PREFIX = NM_PREFIX + "resource.network."; /** * This setting controls if resource handling for network bandwidth is * enabled. Work in progress: This configuration parameter may be * changed/removed in the future */ @Private public static final String NM_NETWORK_RESOURCE_ENABLED = NM_NETWORK_RESOURCE_PREFIX + "enabled"; /** Network as a resource is disabled by default. **/ @Private public static final boolean DEFAULT_NM_NETWORK_RESOURCE_ENABLED = false; /** * Specifies the interface to be used for applying network throttling rules. * Work in progress: This configuration parameter may be changed/removed in * the future */ @Private public static final String NM_NETWORK_RESOURCE_INTERFACE = NM_NETWORK_RESOURCE_PREFIX + "interface"; @Private public static final String DEFAULT_NM_NETWORK_RESOURCE_INTERFACE = "eth0"; /** * Specifies the total available outbound bandwidth on the node. Work in * progress: This configuration parameter may be changed/removed in the future */ @Private public static final String NM_NETWORK_RESOURCE_OUTBOUND_BANDWIDTH_MBIT = NM_NETWORK_RESOURCE_PREFIX + "outbound-bandwidth-mbit"; @Private public static final int DEFAULT_NM_NETWORK_RESOURCE_OUTBOUND_BANDWIDTH_MBIT = 1000; /** * Specifies the total outbound bandwidth available to YARN containers. * defaults to NM_NETWORK_RESOURCE_OUTBOUND_BANDWIDTH_MBIT if not specified. * Work in progress: This configuration parameter may be changed/removed in * the future */ @Private public static final String NM_NETWORK_RESOURCE_OUTBOUND_BANDWIDTH_YARN_MBIT = NM_NETWORK_RESOURCE_PREFIX + "outbound-bandwidth-yarn-mbit"; /** NM Webapp address.**/ public static final String NM_WEBAPP_ADDRESS = NM_PREFIX + "webapp.address"; public static final int DEFAULT_NM_WEBAPP_PORT = 8042; public static final String DEFAULT_NM_WEBAPP_ADDRESS = "0.0.0.0:" + DEFAULT_NM_WEBAPP_PORT; /** NM Webapp https address.**/ public static final String NM_WEBAPP_HTTPS_ADDRESS = NM_PREFIX + "webapp.https.address"; public static final int DEFAULT_NM_WEBAPP_HTTPS_PORT = 8044; public static final String DEFAULT_NM_WEBAPP_HTTPS_ADDRESS = "0.0.0.0:" + DEFAULT_NM_WEBAPP_HTTPS_PORT; /** How often to monitor containers.*/ public final static String NM_CONTAINER_MON_INTERVAL_MS = NM_PREFIX + "container-monitor.interval-ms"; public final static int DEFAULT_NM_CONTAINER_MON_INTERVAL_MS = 3000; /** Class that calculates containers current resource utilization.*/ public static final String NM_CONTAINER_MON_RESOURCE_CALCULATOR = NM_PREFIX + "container-monitor.resource-calculator.class"; /** Class that calculates process tree resource utilization.*/ public static final String NM_CONTAINER_MON_PROCESS_TREE = NM_PREFIX + "container-monitor.process-tree.class"; public static final String PROCFS_USE_SMAPS_BASED_RSS_ENABLED = NM_PREFIX + "container-monitor.procfs-tree.smaps-based-rss.enabled"; public static final boolean DEFAULT_PROCFS_USE_SMAPS_BASED_RSS_ENABLED = false; /** Enable/disable container metrics. */ @Private public static final String NM_CONTAINER_METRICS_ENABLE = NM_PREFIX + "container-metrics.enable"; @Private public static final boolean DEFAULT_NM_CONTAINER_METRICS_ENABLE = true; /** Container metrics flush period. -1 for flush on completion. */ @Private public static final String NM_CONTAINER_METRICS_PERIOD_MS = NM_PREFIX + "container-metrics.period-ms"; @Private public static final int DEFAULT_NM_CONTAINER_METRICS_PERIOD_MS = -1; /** Prefix for all node manager disk health checker configs. */ private static final String NM_DISK_HEALTH_CHECK_PREFIX = "yarn.nodemanager.disk-health-checker."; /** * Enable/Disable disks' health checker. Default is true. An expert level * configuration property. */ public static final String NM_DISK_HEALTH_CHECK_ENABLE = NM_DISK_HEALTH_CHECK_PREFIX + "enable"; /** Frequency of running disks' health checker. */ public static final String NM_DISK_HEALTH_CHECK_INTERVAL_MS = NM_DISK_HEALTH_CHECK_PREFIX + "interval-ms"; /** By default, disks' health is checked every 2 minutes. */ public static final long DEFAULT_NM_DISK_HEALTH_CHECK_INTERVAL_MS = 2 * 60 * 1000; /** * The minimum fraction of number of disks to be healthy for the nodemanager * to launch new containers. This applies to nm-local-dirs and nm-log-dirs. */ public static final String NM_MIN_HEALTHY_DISKS_FRACTION = NM_DISK_HEALTH_CHECK_PREFIX + "min-healthy-disks"; /** * By default, at least 25% of disks are to be healthy to say that the node is * healthy in terms of disks. */ public static final float DEFAULT_NM_MIN_HEALTHY_DISKS_FRACTION = 0.25F; /** * The maximum percentage of disk space that can be used after which a disk is * marked as offline. Values can range from 0.0 to 100.0. If the value is * greater than or equal to 100, NM will check for full disk. This applies to * nm-local-dirs and nm-log-dirs. */ public static final String NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE = NM_DISK_HEALTH_CHECK_PREFIX + "max-disk-utilization-per-disk-percentage"; /** * By default, 90% of the disk can be used before it is marked as offline. */ public static final float DEFAULT_NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE = 90.0F; /** * The minimum space that must be available on a local dir for it to be used. * This applies to nm-local-dirs and nm-log-dirs. */ public static final String NM_MIN_PER_DISK_FREE_SPACE_MB = NM_DISK_HEALTH_CHECK_PREFIX + "min-free-space-per-disk-mb"; /** * By default, all of the disk can be used before it is marked as offline. */ public static final long DEFAULT_NM_MIN_PER_DISK_FREE_SPACE_MB = 0; /** Frequency of running node health script.*/ public static final String NM_HEALTH_CHECK_INTERVAL_MS = NM_PREFIX + "health-checker.interval-ms"; public static final long DEFAULT_NM_HEALTH_CHECK_INTERVAL_MS = 10 * 60 * 1000; /** Health check script time out period.*/ public static final String NM_HEALTH_CHECK_SCRIPT_TIMEOUT_MS = NM_PREFIX + "health-checker.script.timeout-ms"; public static final long DEFAULT_NM_HEALTH_CHECK_SCRIPT_TIMEOUT_MS = 2 * DEFAULT_NM_HEALTH_CHECK_INTERVAL_MS; /** The health check script to run.*/ public static final String NM_HEALTH_CHECK_SCRIPT_PATH = NM_PREFIX + "health-checker.script.path"; /** The arguments to pass to the health check script.*/ public static final String NM_HEALTH_CHECK_SCRIPT_OPTS = NM_PREFIX + "health-checker.script.opts"; /** The JVM options used on forking ContainerLocalizer process by container executor. */ public static final String NM_CONTAINER_LOCALIZER_JAVA_OPTS_KEY = NM_PREFIX + "container-localizer.java.opts"; public static final String NM_CONTAINER_LOCALIZER_JAVA_OPTS_DEFAULT = "-Xmx256m"; /** The Docker image name(For DockerContainerExecutor).*/ public static final String NM_DOCKER_CONTAINER_EXECUTOR_IMAGE_NAME = NM_PREFIX + "docker-container-executor.image-name"; /** The name of the docker executor (For DockerContainerExecutor).*/ public static final String NM_DOCKER_CONTAINER_EXECUTOR_EXEC_NAME = NM_PREFIX + "docker-container-executor.exec-name"; /** The default docker executor (For DockerContainerExecutor).*/ public static final String NM_DEFAULT_DOCKER_CONTAINER_EXECUTOR_EXEC_NAME = "/usr/bin/docker"; /** The path to the Linux container executor.*/ public static final String NM_LINUX_CONTAINER_EXECUTOR_PATH = NM_PREFIX + "linux-container-executor.path"; /** * The UNIX group that the linux-container-executor should run as. * This is intended to be set as part of container-executor.cfg. */ public static final String NM_LINUX_CONTAINER_GROUP = NM_PREFIX + "linux-container-executor.group"; /** * If linux-container-executor should limit itself to one user * when running in non-secure mode. */ public static final String NM_NONSECURE_MODE_LIMIT_USERS= NM_PREFIX + "linux-container-executor.nonsecure-mode.limit-users"; public static final boolean DEFAULT_NM_NONSECURE_MODE_LIMIT_USERS = true; /** * The UNIX user that containers will run as when Linux-container-executor * is used in nonsecure mode (a use case for this is using cgroups). */ public static final String NM_NONSECURE_MODE_LOCAL_USER_KEY = NM_PREFIX + "linux-container-executor.nonsecure-mode.local-user"; public static final String DEFAULT_NM_NONSECURE_MODE_LOCAL_USER = "nobody"; /** * The allowed pattern for UNIX user names enforced by * Linux-container-executor when used in nonsecure mode (use case for this * is using cgroups). The default value is taken from /usr/sbin/adduser */ public static final String NM_NONSECURE_MODE_USER_PATTERN_KEY = NM_PREFIX + "linux-container-executor.nonsecure-mode.user-pattern"; public static final String DEFAULT_NM_NONSECURE_MODE_USER_PATTERN = "^[_.A-Za-z0-9][-@_.A-Za-z0-9]{0,255}?[$]?$"; /** The type of resource enforcement to use with the * linux container executor. */ public static final String NM_LINUX_CONTAINER_RESOURCES_HANDLER = NM_PREFIX + "linux-container-executor.resources-handler.class"; /** The path the linux container executor should use for cgroups */ public static final String NM_LINUX_CONTAINER_CGROUPS_HIERARCHY = NM_PREFIX + "linux-container-executor.cgroups.hierarchy"; /** Whether the linux container executor should mount cgroups if not found */ public static final String NM_LINUX_CONTAINER_CGROUPS_MOUNT = NM_PREFIX + "linux-container-executor.cgroups.mount"; /** Where the linux container executor should mount cgroups if not found */ public static final String NM_LINUX_CONTAINER_CGROUPS_MOUNT_PATH = NM_PREFIX + "linux-container-executor.cgroups.mount-path"; /** * Whether the apps should run in strict resource usage mode(not allowed to * use spare CPU) */ public static final String NM_LINUX_CONTAINER_CGROUPS_STRICT_RESOURCE_USAGE = NM_PREFIX + "linux-container-executor.cgroups.strict-resource-usage"; public static final boolean DEFAULT_NM_LINUX_CONTAINER_CGROUPS_STRICT_RESOURCE_USAGE = false; /** * Interval of time the linux container executor should try cleaning up * cgroups entry when cleaning up a container. This is required due to what * it seems a race condition because the SIGTERM/SIGKILL is asynch. */ public static final String NM_LINUX_CONTAINER_CGROUPS_DELETE_TIMEOUT = NM_PREFIX + "linux-container-executor.cgroups.delete-timeout-ms"; public static final long DEFAULT_NM_LINUX_CONTAINER_CGROUPS_DELETE_TIMEOUT = 1000; /** * Delay between attempts to remove linux cgroup. */ public static final String NM_LINUX_CONTAINER_CGROUPS_DELETE_DELAY = NM_PREFIX + "linux-container-executor.cgroups.delete-delay-ms"; public static final long DEFAULT_NM_LINUX_CONTAINER_CGROUPS_DELETE_DELAY = 20; /** * Indicates if memory and CPU limits will be set for the Windows Job * Object for the containers launched by the default container executor. */ public static final String NM_WINDOWS_CONTAINER_MEMORY_LIMIT_ENABLED = NM_PREFIX + "windows-container.memory-limit.enabled"; public static final boolean DEFAULT_NM_WINDOWS_CONTAINER_MEMORY_LIMIT_ENABLED = false; public static final String NM_WINDOWS_CONTAINER_CPU_LIMIT_ENABLED = NM_PREFIX + "windows-container.cpu-limit.enabled"; public static final boolean DEFAULT_NM_WINDOWS_CONTAINER_CPU_LIMIT_ENABLED = false; /** /* The Windows group that the windows-secure-container-executor should run as. */ public static final String NM_WINDOWS_SECURE_CONTAINER_GROUP = NM_PREFIX + "windows-secure-container-executor.group"; /** T-file compression types used to compress aggregated logs.*/ public static final String NM_LOG_AGG_COMPRESSION_TYPE = NM_PREFIX + "log-aggregation.compression-type"; public static final String DEFAULT_NM_LOG_AGG_COMPRESSION_TYPE = "none"; /** The kerberos principal for the node manager.*/ public static final String NM_PRINCIPAL = NM_PREFIX + "principal"; public static final String NM_AUX_SERVICES = NM_PREFIX + "aux-services"; public static final String NM_AUX_SERVICE_FMT = NM_PREFIX + "aux-services.%s.class"; public static final String NM_USER_HOME_DIR = NM_PREFIX + "user-home-dir"; /**The kerberos principal to be used for spnego filter for NM.*/ public static final String NM_WEBAPP_SPNEGO_USER_NAME_KEY = NM_PREFIX + "webapp.spnego-principal"; /**The kerberos keytab to be used for spnego filter for NM.*/ public static final String NM_WEBAPP_SPNEGO_KEYTAB_FILE_KEY = NM_PREFIX + "webapp.spnego-keytab-file"; public static final String DEFAULT_NM_USER_HOME_DIR= "/home/"; public static final String NM_RECOVERY_PREFIX = NM_PREFIX + "recovery."; public static final String NM_RECOVERY_ENABLED = NM_RECOVERY_PREFIX + "enabled"; public static final boolean DEFAULT_NM_RECOVERY_ENABLED = false; public static final String NM_RECOVERY_DIR = NM_RECOVERY_PREFIX + "dir"; public static final String NM_RECOVERY_SUPERVISED = NM_RECOVERY_PREFIX + "supervised"; public static final boolean DEFAULT_NM_RECOVERY_SUPERVISED = false; //////////////////////////////// // Web Proxy Configs //////////////////////////////// public static final String PROXY_PREFIX = "yarn.web-proxy."; /** The kerberos principal for the proxy.*/ public static final String PROXY_PRINCIPAL = PROXY_PREFIX + "principal"; /** Keytab for Proxy.*/ public static final String PROXY_KEYTAB = PROXY_PREFIX + "keytab"; /** The address for the web proxy.*/ public static final String PROXY_ADDRESS = PROXY_PREFIX + "address"; public static final int DEFAULT_PROXY_PORT = 9099; public static final String DEFAULT_PROXY_ADDRESS = "0.0.0.0:" + DEFAULT_PROXY_PORT; /** * YARN Service Level Authorization */ public static final String YARN_SECURITY_SERVICE_AUTHORIZATION_RESOURCETRACKER_PROTOCOL = "security.resourcetracker.protocol.acl"; public static final String YARN_SECURITY_SERVICE_AUTHORIZATION_APPLICATIONCLIENT_PROTOCOL = "security.applicationclient.protocol.acl"; public static final String YARN_SECURITY_SERVICE_AUTHORIZATION_RESOURCEMANAGER_ADMINISTRATION_PROTOCOL = "security.resourcemanager-administration.protocol.acl"; public static final String YARN_SECURITY_SERVICE_AUTHORIZATION_APPLICATIONMASTER_PROTOCOL = "security.applicationmaster.protocol.acl"; public static final String YARN_SECURITY_SERVICE_AUTHORIZATION_CONTAINER_MANAGEMENT_PROTOCOL = "security.containermanagement.protocol.acl"; public static final String YARN_SECURITY_SERVICE_AUTHORIZATION_RESOURCE_LOCALIZER = "security.resourcelocalizer.protocol.acl"; public static final String YARN_SECURITY_SERVICE_AUTHORIZATION_APPLICATIONHISTORY_PROTOCOL = "security.applicationhistory.protocol.acl"; /** No. of milliseconds to wait between sending a SIGTERM and SIGKILL * to a running container */ public static final String NM_SLEEP_DELAY_BEFORE_SIGKILL_MS = NM_PREFIX + "sleep-delay-before-sigkill.ms"; public static final long DEFAULT_NM_SLEEP_DELAY_BEFORE_SIGKILL_MS = 250; /** Max time to wait for a process to come up when trying to cleanup * container resources */ public static final String NM_PROCESS_KILL_WAIT_MS = NM_PREFIX + "process-kill-wait.ms"; public static final long DEFAULT_NM_PROCESS_KILL_WAIT_MS = 2000; /** Max time to wait to establish a connection to RM */ public static final String RESOURCEMANAGER_CONNECT_MAX_WAIT_MS = RM_PREFIX + "connect.max-wait.ms"; public static final long DEFAULT_RESOURCEMANAGER_CONNECT_MAX_WAIT_MS = 15 * 60 * 1000; /** Time interval between each attempt to connect to RM */ public static final String RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS = RM_PREFIX + "connect.retry-interval.ms"; public static final long DEFAULT_RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS = 30 * 1000; /** * CLASSPATH for YARN applications. A comma-separated list of CLASSPATH * entries */ public static final String YARN_APPLICATION_CLASSPATH = YARN_PREFIX + "application.classpath"; /** * Default platform-agnostic CLASSPATH for YARN applications. A * comma-separated list of CLASSPATH entries. The parameter expansion marker * will be replaced with real parameter expansion marker ('%' for Windows and * '$' for Linux) by NodeManager on container launch. For example: {{VAR}} * will be replaced as $VAR on Linux, and %VAR% on Windows. */ @Public @Unstable public static final String[] DEFAULT_YARN_CROSS_PLATFORM_APPLICATION_CLASSPATH= { ApplicationConstants.Environment.HADOOP_CONF_DIR.$$(), ApplicationConstants.Environment.HADOOP_COMMON_HOME.$$() + "/share/hadoop/common/*", ApplicationConstants.Environment.HADOOP_COMMON_HOME.$$() + "/share/hadoop/common/lib/*", ApplicationConstants.Environment.HADOOP_HDFS_HOME.$$() + "/share/hadoop/hdfs/*", ApplicationConstants.Environment.HADOOP_HDFS_HOME.$$() + "/share/hadoop/hdfs/lib/*", ApplicationConstants.Environment.HADOOP_YARN_HOME.$$() + "/share/hadoop/yarn/*", ApplicationConstants.Environment.HADOOP_YARN_HOME.$$() + "/share/hadoop/yarn/lib/*" }; /** * <p> * Default platform-specific CLASSPATH for YARN applications. A * comma-separated list of CLASSPATH entries constructed based on the client * OS environment expansion syntax. * </p> * <p> * Note: Use {@link #DEFAULT_YARN_CROSS_PLATFORM_APPLICATION_CLASSPATH} for * cross-platform practice i.e. submit an application from a Windows client to * a Linux/Unix server or vice versa. * </p> */ public static final String[] DEFAULT_YARN_APPLICATION_CLASSPATH = { ApplicationConstants.Environment.HADOOP_CONF_DIR.$(), ApplicationConstants.Environment.HADOOP_COMMON_HOME.$() + "/share/hadoop/common/*", ApplicationConstants.Environment.HADOOP_COMMON_HOME.$() + "/share/hadoop/common/lib/*", ApplicationConstants.Environment.HADOOP_HDFS_HOME.$() + "/share/hadoop/hdfs/*", ApplicationConstants.Environment.HADOOP_HDFS_HOME.$() + "/share/hadoop/hdfs/lib/*", ApplicationConstants.Environment.HADOOP_YARN_HOME.$() + "/share/hadoop/yarn/*", ApplicationConstants.Environment.HADOOP_YARN_HOME.$() + "/share/hadoop/yarn/lib/*" }; /** Container temp directory */ public static final String DEFAULT_CONTAINER_TEMP_DIR = "./tmp"; public static final String IS_MINI_YARN_CLUSTER = YARN_PREFIX + "is.minicluster"; public static final String YARN_MC_PREFIX = YARN_PREFIX + "minicluster."; /** Whether to use fixed ports with the minicluster. */ public static final String YARN_MINICLUSTER_FIXED_PORTS = YARN_MC_PREFIX + "fixed.ports"; /** * Default is false to be able to run tests concurrently without port * conflicts. */ public static final boolean DEFAULT_YARN_MINICLUSTER_FIXED_PORTS = false; /** * Whether the NM should use RPC to connect to the RM. Default is false. * Can be set to true only when using fixed ports. */ public static final String YARN_MINICLUSTER_USE_RPC = YARN_MC_PREFIX + "use-rpc"; public static final boolean DEFAULT_YARN_MINICLUSTER_USE_RPC = false; /** * Whether users are explicitly trying to control resource monitoring * configuration for the MiniYARNCluster. Disabled by default. */ public static final String YARN_MINICLUSTER_CONTROL_RESOURCE_MONITORING = YARN_MC_PREFIX + "control-resource-monitoring"; public static final boolean DEFAULT_YARN_MINICLUSTER_CONTROL_RESOURCE_MONITORING = false; /** Allow changing the memory for the NodeManager in the MiniYARNCluster */ public static final String YARN_MINICLUSTER_NM_PMEM_MB = YARN_MC_PREFIX + YarnConfiguration.NM_PMEM_MB; public static final int DEFAULT_YARN_MINICLUSTER_NM_PMEM_MB = 4 * 1024; /** The log directory for the containers */ public static final String YARN_APP_CONTAINER_LOG_DIR = YARN_PREFIX + "app.container.log.dir"; public static final String YARN_APP_CONTAINER_LOG_SIZE = YARN_PREFIX + "app.container.log.filesize"; public static final String YARN_APP_CONTAINER_LOG_BACKUPS = YARN_PREFIX + "app.container.log.backups"; //////////////////////////////// // Timeline Service Configs //////////////////////////////// public static final String TIMELINE_SERVICE_PREFIX = YARN_PREFIX + "timeline-service."; // mark app-history related configs @Private as application history is going // to be integrated into the timeline service @Private public static final String APPLICATION_HISTORY_PREFIX = TIMELINE_SERVICE_PREFIX + "generic-application-history."; /** * The setting that controls whether application history service is * enabled or not. */ @Private public static final String APPLICATION_HISTORY_ENABLED = APPLICATION_HISTORY_PREFIX + "enabled"; @Private public static final boolean DEFAULT_APPLICATION_HISTORY_ENABLED = false; /** Application history store class */ @Private public static final String APPLICATION_HISTORY_STORE = APPLICATION_HISTORY_PREFIX + "store-class"; /** Save container meta-info in the application history store. */ @Private public static final String APPLICATION_HISTORY_SAVE_NON_AM_CONTAINER_META_INFO = APPLICATION_HISTORY_PREFIX + "save-non-am-container-meta-info"; @Private public static final boolean DEFAULT_APPLICATION_HISTORY_SAVE_NON_AM_CONTAINER_META_INFO = true; /** URI for FileSystemApplicationHistoryStore */ @Private public static final String FS_APPLICATION_HISTORY_STORE_URI = APPLICATION_HISTORY_PREFIX + "fs-history-store.uri"; /** T-file compression types used to compress history data.*/ @Private public static final String FS_APPLICATION_HISTORY_STORE_COMPRESSION_TYPE = APPLICATION_HISTORY_PREFIX + "fs-history-store.compression-type"; @Private public static final String DEFAULT_FS_APPLICATION_HISTORY_STORE_COMPRESSION_TYPE = "none"; /** The setting that controls whether timeline service is enabled or not. */ public static final String TIMELINE_SERVICE_ENABLED = TIMELINE_SERVICE_PREFIX + "enabled"; public static final boolean DEFAULT_TIMELINE_SERVICE_ENABLED = false; /** host:port address for timeline service RPC APIs. */ public static final String TIMELINE_SERVICE_ADDRESS = TIMELINE_SERVICE_PREFIX + "address"; public static final int DEFAULT_TIMELINE_SERVICE_PORT = 10200; public static final String DEFAULT_TIMELINE_SERVICE_ADDRESS = "0.0.0.0:" + DEFAULT_TIMELINE_SERVICE_PORT; /** The listening endpoint for the timeline service application.*/ public static final String TIMELINE_SERVICE_BIND_HOST = TIMELINE_SERVICE_PREFIX + "bind-host"; /** The number of threads to handle client RPC API requests. */ public static final String TIMELINE_SERVICE_HANDLER_THREAD_COUNT = TIMELINE_SERVICE_PREFIX + "handler-thread-count"; public static final int DEFAULT_TIMELINE_SERVICE_CLIENT_THREAD_COUNT = 10; /** The address of the timeline service web application.*/ public static final String TIMELINE_SERVICE_WEBAPP_ADDRESS = TIMELINE_SERVICE_PREFIX + "webapp.address"; public static final int DEFAULT_TIMELINE_SERVICE_WEBAPP_PORT = 8188; public static final String DEFAULT_TIMELINE_SERVICE_WEBAPP_ADDRESS = "0.0.0.0:" + DEFAULT_TIMELINE_SERVICE_WEBAPP_PORT; /** The https address of the timeline service web application.*/ public static final String TIMELINE_SERVICE_WEBAPP_HTTPS_ADDRESS = TIMELINE_SERVICE_PREFIX + "webapp.https.address"; public static final int DEFAULT_TIMELINE_SERVICE_WEBAPP_HTTPS_PORT = 8190; public static final String DEFAULT_TIMELINE_SERVICE_WEBAPP_HTTPS_ADDRESS = "0.0.0.0:" + DEFAULT_TIMELINE_SERVICE_WEBAPP_HTTPS_PORT; /** * Defines the max number of applications could be fetched using * REST API or application history protocol and shown in timeline * server web ui. */ public static final String APPLICATION_HISTORY_MAX_APPS = APPLICATION_HISTORY_PREFIX + "max-applications"; public static final long DEFAULT_APPLICATION_HISTORY_MAX_APPS = 10000; /** Timeline service store class */ public static final String TIMELINE_SERVICE_STORE = TIMELINE_SERVICE_PREFIX + "store-class"; /** Timeline service enable data age off */ public static final String TIMELINE_SERVICE_TTL_ENABLE = TIMELINE_SERVICE_PREFIX + "ttl-enable"; /** Timeline service length of time to retain data */ public static final String TIMELINE_SERVICE_TTL_MS = TIMELINE_SERVICE_PREFIX + "ttl-ms"; public static final long DEFAULT_TIMELINE_SERVICE_TTL_MS = 1000 * 60 * 60 * 24 * 7; /** Timeline service rolling period. Valid values are daily, half_daily, * quarter_daily, and hourly. */ public static final String TIMELINE_SERVICE_ROLLING_PERIOD = TIMELINE_SERVICE_PREFIX + "rolling-period"; /** Roll a new database each hour. */ public static final String DEFAULT_TIMELINE_SERVICE_ROLLING_PERIOD = "hourly"; /** Implementation specific configuration prefix for Timeline Service * leveldb. */ public static final String TIMELINE_SERVICE_LEVELDB_PREFIX = TIMELINE_SERVICE_PREFIX + "leveldb-timeline-store."; /** Timeline service leveldb path */ public static final String TIMELINE_SERVICE_LEVELDB_PATH = TIMELINE_SERVICE_LEVELDB_PREFIX + "path"; /** Timeline service leveldb read cache (uncompressed blocks). This is * per rolling instance so should be tuned if using rolling leveldb * timeline store */ public static final String TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE = TIMELINE_SERVICE_LEVELDB_PREFIX + "read-cache-size"; /** Default leveldb read cache size if no configuration is specified. */ public static final long DEFAULT_TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE = 100 * 1024 * 1024; /** Timeline service leveldb write buffer size. */ public static final String TIMELINE_SERVICE_LEVELDB_WRITE_BUFFER_SIZE = TIMELINE_SERVICE_LEVELDB_PREFIX + "write-buffer-size"; /** Default leveldb write buffer size if no configuration is specified. This * is per rolling instance so should be tuned if using rolling leveldb * timeline store. */ public static final int DEFAULT_TIMELINE_SERVICE_LEVELDB_WRITE_BUFFER_SIZE = 16 * 1024 * 1024; /** Timeline service leveldb write batch size. This value can be tuned down * to reduce lock time for ttl eviction. */ public static final String TIMELINE_SERVICE_LEVELDB_WRITE_BATCH_SIZE = TIMELINE_SERVICE_LEVELDB_PREFIX + "write-batch-size"; /** Default leveldb write batch size is no configuration is specified */ public static final int DEFAULT_TIMELINE_SERVICE_LEVELDB_WRITE_BATCH_SIZE = 10000; /** Timeline service leveldb start time read cache (number of entities) */ public static final String TIMELINE_SERVICE_LEVELDB_START_TIME_READ_CACHE_SIZE = TIMELINE_SERVICE_LEVELDB_PREFIX + "start-time-read-cache-size"; public static final int DEFAULT_TIMELINE_SERVICE_LEVELDB_START_TIME_READ_CACHE_SIZE = 10000; /** Timeline service leveldb start time write cache (number of entities) */ public static final String TIMELINE_SERVICE_LEVELDB_START_TIME_WRITE_CACHE_SIZE = TIMELINE_SERVICE_LEVELDB_PREFIX + "start-time-write-cache-size"; public static final int DEFAULT_TIMELINE_SERVICE_LEVELDB_START_TIME_WRITE_CACHE_SIZE = 10000; /** Timeline service leveldb interval to wait between deletion rounds */ public static final String TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS = TIMELINE_SERVICE_LEVELDB_PREFIX + "ttl-interval-ms"; public static final long DEFAULT_TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS = 1000 * 60 * 5; /** Timeline service leveldb number of concurrent open files. Tuned this * configuration to stay within system limits. This is per rolling instance * so should be tuned if using rolling leveldb timeline store. */ public static final String TIMELINE_SERVICE_LEVELDB_MAX_OPEN_FILES = TIMELINE_SERVICE_LEVELDB_PREFIX + "max-open-files"; /** Default leveldb max open files if no configuration is specified. */ public static final int DEFAULT_TIMELINE_SERVICE_LEVELDB_MAX_OPEN_FILES = 1000; /** The Kerberos principal for the timeline server.*/ public static final String TIMELINE_SERVICE_PRINCIPAL = TIMELINE_SERVICE_PREFIX + "principal"; /** The Kerberos keytab for the timeline server.*/ public static final String TIMELINE_SERVICE_KEYTAB = TIMELINE_SERVICE_PREFIX + "keytab"; /** Enables cross origin support for timeline server.*/ public static final String TIMELINE_SERVICE_HTTP_CROSS_ORIGIN_ENABLED = TIMELINE_SERVICE_PREFIX + "http-cross-origin.enabled"; /** Default value for cross origin support for timeline server.*/ public static final boolean TIMELINE_SERVICE_HTTP_CROSS_ORIGIN_ENABLED_DEFAULT = false; /** Timeline client settings */ public static final String TIMELINE_SERVICE_CLIENT_PREFIX = TIMELINE_SERVICE_PREFIX + "client."; /** Timeline client call, max retries (-1 means no limit) */ public static final String TIMELINE_SERVICE_CLIENT_MAX_RETRIES = TIMELINE_SERVICE_CLIENT_PREFIX + "max-retries"; public static final int DEFAULT_TIMELINE_SERVICE_CLIENT_MAX_RETRIES = 30; /** Timeline client call, retry interval */ public static final String TIMELINE_SERVICE_CLIENT_RETRY_INTERVAL_MS = TIMELINE_SERVICE_CLIENT_PREFIX + "retry-interval-ms"; public static final long DEFAULT_TIMELINE_SERVICE_CLIENT_RETRY_INTERVAL_MS = 1000; /** Timeline client policy for whether connections are fatal */ public static final String TIMELINE_SERVICE_CLIENT_BEST_EFFORT = TIMELINE_SERVICE_CLIENT_PREFIX + "best-effort"; public static final boolean DEFAULT_TIMELINE_SERVICE_CLIENT_BEST_EFFORT = false; /** Flag to enable recovery of timeline service */ public static final String TIMELINE_SERVICE_RECOVERY_ENABLED = TIMELINE_SERVICE_PREFIX + "recovery.enabled"; public static final boolean DEFAULT_TIMELINE_SERVICE_RECOVERY_ENABLED = false; /** Timeline service state store class */ public static final String TIMELINE_SERVICE_STATE_STORE_CLASS = TIMELINE_SERVICE_PREFIX + "state-store-class"; public static final String TIMELINE_SERVICE_LEVELDB_STATE_STORE_PREFIX = TIMELINE_SERVICE_PREFIX + "leveldb-state-store."; /** Timeline service state store leveldb path */ public static final String TIMELINE_SERVICE_LEVELDB_STATE_STORE_PATH = TIMELINE_SERVICE_LEVELDB_STATE_STORE_PREFIX + "path"; // Timeline delegation token related keys public static final String TIMELINE_DELEGATION_KEY_UPDATE_INTERVAL = TIMELINE_SERVICE_PREFIX + "delegation.key.update-interval"; public static final long DEFAULT_TIMELINE_DELEGATION_KEY_UPDATE_INTERVAL = 24*60*60*1000; // 1 day public static final String TIMELINE_DELEGATION_TOKEN_RENEW_INTERVAL = TIMELINE_SERVICE_PREFIX + "delegation.token.renew-interval"; public static final long DEFAULT_TIMELINE_DELEGATION_TOKEN_RENEW_INTERVAL = 24*60*60*1000; // 1 day public static final String TIMELINE_DELEGATION_TOKEN_MAX_LIFETIME = TIMELINE_SERVICE_PREFIX + "delegation.token.max-lifetime"; public static final long DEFAULT_TIMELINE_DELEGATION_TOKEN_MAX_LIFETIME = 7*24*60*60*1000; // 7 days // /////////////////////////////// // Shared Cache Configs // /////////////////////////////// public static final String SHARED_CACHE_PREFIX = "yarn.sharedcache."; // common configs /** whether the shared cache is enabled/disabled */ public static final String SHARED_CACHE_ENABLED = SHARED_CACHE_PREFIX + "enabled"; public static final boolean DEFAULT_SHARED_CACHE_ENABLED = false; /** The config key for the shared cache root directory. */ public static final String SHARED_CACHE_ROOT = SHARED_CACHE_PREFIX + "root-dir"; public static final String DEFAULT_SHARED_CACHE_ROOT = "/sharedcache"; /** The config key for the level of nested directories before getting to the * checksum directory. */ public static final String SHARED_CACHE_NESTED_LEVEL = SHARED_CACHE_PREFIX + "nested-level"; public static final int DEFAULT_SHARED_CACHE_NESTED_LEVEL = 3; // Shared Cache Manager Configs public static final String SCM_STORE_PREFIX = SHARED_CACHE_PREFIX + "store."; public static final String SCM_STORE_CLASS = SCM_STORE_PREFIX + "class"; public static final String DEFAULT_SCM_STORE_CLASS = "org.apache.hadoop.yarn.server.sharedcachemanager.store.InMemorySCMStore"; public static final String SCM_APP_CHECKER_CLASS = SHARED_CACHE_PREFIX + "app-checker.class"; public static final String DEFAULT_SCM_APP_CHECKER_CLASS = "org.apache.hadoop.yarn.server.sharedcachemanager.RemoteAppChecker"; /** The address of the SCM admin interface. */ public static final String SCM_ADMIN_ADDRESS = SHARED_CACHE_PREFIX + "admin.address"; public static final int DEFAULT_SCM_ADMIN_PORT = 8047; public static final String DEFAULT_SCM_ADMIN_ADDRESS = "0.0.0.0:" + DEFAULT_SCM_ADMIN_PORT; /** Number of threads used to handle SCM admin interface. */ public static final String SCM_ADMIN_CLIENT_THREAD_COUNT = SHARED_CACHE_PREFIX + "admin.thread-count"; public static final int DEFAULT_SCM_ADMIN_CLIENT_THREAD_COUNT = 1; /** The address of the SCM web application. */ public static final String SCM_WEBAPP_ADDRESS = SHARED_CACHE_PREFIX + "webapp.address"; public static final int DEFAULT_SCM_WEBAPP_PORT = 8788; public static final String DEFAULT_SCM_WEBAPP_ADDRESS = "0.0.0.0:" + DEFAULT_SCM_WEBAPP_PORT; // In-memory SCM store configuration public static final String IN_MEMORY_STORE_PREFIX = SCM_STORE_PREFIX + "in-memory."; /** * A resource in the InMemorySCMStore is considered stale if the time since * the last reference exceeds the staleness period. This value is specified in * minutes. */ public static final String IN_MEMORY_STALENESS_PERIOD_MINS = IN_MEMORY_STORE_PREFIX + "staleness-period-mins"; public static final int DEFAULT_IN_MEMORY_STALENESS_PERIOD_MINS = 7 * 24 * 60; /** * Initial delay before the in-memory store runs its first check to remove * dead initial applications. Specified in minutes. */ public static final String IN_MEMORY_INITIAL_DELAY_MINS = IN_MEMORY_STORE_PREFIX + "initial-delay-mins"; public static final int DEFAULT_IN_MEMORY_INITIAL_DELAY_MINS = 10; /** * The frequency at which the in-memory store checks to remove dead initial * applications. Specified in minutes. */ public static final String IN_MEMORY_CHECK_PERIOD_MINS = IN_MEMORY_STORE_PREFIX + "check-period-mins"; public static final int DEFAULT_IN_MEMORY_CHECK_PERIOD_MINS = 12 * 60; // SCM Cleaner service configuration private static final String SCM_CLEANER_PREFIX = SHARED_CACHE_PREFIX + "cleaner."; /** * The frequency at which a cleaner task runs. Specified in minutes. */ public static final String SCM_CLEANER_PERIOD_MINS = SCM_CLEANER_PREFIX + "period-mins"; public static final int DEFAULT_SCM_CLEANER_PERIOD_MINS = 24 * 60; /** * Initial delay before the first cleaner task is scheduled. Specified in * minutes. */ public static final String SCM_CLEANER_INITIAL_DELAY_MINS = SCM_CLEANER_PREFIX + "initial-delay-mins"; public static final int DEFAULT_SCM_CLEANER_INITIAL_DELAY_MINS = 10; /** * The time to sleep between processing each shared cache resource. Specified * in milliseconds. */ public static final String SCM_CLEANER_RESOURCE_SLEEP_MS = SCM_CLEANER_PREFIX + "resource-sleep-ms"; public static final long DEFAULT_SCM_CLEANER_RESOURCE_SLEEP_MS = 0L; /** The address of the node manager interface in the SCM. */ public static final String SCM_UPLOADER_SERVER_ADDRESS = SHARED_CACHE_PREFIX + "uploader.server.address"; public static final int DEFAULT_SCM_UPLOADER_SERVER_PORT = 8046; public static final String DEFAULT_SCM_UPLOADER_SERVER_ADDRESS = "0.0.0.0:" + DEFAULT_SCM_UPLOADER_SERVER_PORT; /** * The number of SCM threads used to handle notify requests from the node * manager. */ public static final String SCM_UPLOADER_SERVER_THREAD_COUNT = SHARED_CACHE_PREFIX + "uploader.server.thread-count"; public static final int DEFAULT_SCM_UPLOADER_SERVER_THREAD_COUNT = 50; /** The address of the client interface in the SCM. */ public static final String SCM_CLIENT_SERVER_ADDRESS = SHARED_CACHE_PREFIX + "client-server.address"; public static final int DEFAULT_SCM_CLIENT_SERVER_PORT = 8045; public static final String DEFAULT_SCM_CLIENT_SERVER_ADDRESS = "0.0.0.0:" + DEFAULT_SCM_CLIENT_SERVER_PORT; /** The number of threads used to handle shared cache manager requests. */ public static final String SCM_CLIENT_SERVER_THREAD_COUNT = SHARED_CACHE_PREFIX + "client-server.thread-count"; public static final int DEFAULT_SCM_CLIENT_SERVER_THREAD_COUNT = 50; /** the checksum algorithm implementation **/ public static final String SHARED_CACHE_CHECKSUM_ALGO_IMPL = SHARED_CACHE_PREFIX + "checksum.algo.impl"; public static final String DEFAULT_SHARED_CACHE_CHECKSUM_ALGO_IMPL = "org.apache.hadoop.yarn.sharedcache.ChecksumSHA256Impl"; // node manager (uploader) configs /** * The replication factor for the node manager uploader for the shared cache. */ public static final String SHARED_CACHE_NM_UPLOADER_REPLICATION_FACTOR = SHARED_CACHE_PREFIX + "nm.uploader.replication.factor"; public static final int DEFAULT_SHARED_CACHE_NM_UPLOADER_REPLICATION_FACTOR = 10; public static final String SHARED_CACHE_NM_UPLOADER_THREAD_COUNT = SHARED_CACHE_PREFIX + "nm.uploader.thread-count"; public static final int DEFAULT_SHARED_CACHE_NM_UPLOADER_THREAD_COUNT = 20; //////////////////////////////// // Other Configs //////////////////////////////// /** * Use YARN_CLIENT_APPLICATION_CLIENT_PROTOCOL_POLL_INTERVAL_MS instead. * The interval of the yarn client's querying application state after * application submission. The unit is millisecond. */ @Deprecated public static final String YARN_CLIENT_APP_SUBMISSION_POLL_INTERVAL_MS = YARN_PREFIX + "client.app-submission.poll-interval"; /** * The interval that the yarn client library uses to poll the completion * status of the asynchronous API of application client protocol. */ public static final String YARN_CLIENT_APPLICATION_CLIENT_PROTOCOL_POLL_INTERVAL_MS = YARN_PREFIX + "client.application-client-protocol.poll-interval-ms"; public static final long DEFAULT_YARN_CLIENT_APPLICATION_CLIENT_PROTOCOL_POLL_INTERVAL_MS = 200; /** * The duration that the yarn client library waits, cumulatively across polls, * for an expected state change to occur. Defaults to -1, which indicates no * limit. */ public static final String YARN_CLIENT_APPLICATION_CLIENT_PROTOCOL_POLL_TIMEOUT_MS = YARN_PREFIX + "client.application-client-protocol.poll-timeout-ms"; public static final long DEFAULT_YARN_CLIENT_APPLICATION_CLIENT_PROTOCOL_POLL_TIMEOUT_MS = -1; /** * Max number of threads in NMClientAsync to process container management * events */ public static final String NM_CLIENT_ASYNC_THREAD_POOL_MAX_SIZE = YARN_PREFIX + "client.nodemanager-client-async.thread-pool-max-size"; public static final int DEFAULT_NM_CLIENT_ASYNC_THREAD_POOL_MAX_SIZE = 500; /** * Maximum number of proxy connections to cache for node managers. If set * to a value greater than zero then the cache is enabled and the NMClient * and MRAppMaster will cache the specified number of node manager proxies. * There will be at max one proxy per node manager. Ex. configuring it to a * value of 5 will make sure that client will at max have 5 proxies cached * with 5 different node managers. These connections for these proxies will * be timed out if idle for more than the system wide idle timeout period. * Note that this could cause issues on large clusters as many connections * could linger simultaneously and lead to a large number of connection * threads. The token used for authentication will be used only at * connection creation time. If a new token is received then the earlier * connection should be closed in order to use the new token. This and * {@link YarnConfiguration#NM_CLIENT_ASYNC_THREAD_POOL_MAX_SIZE} are related * and should be in sync (no need for them to be equal). * If the value of this property is zero then the connection cache is * disabled and connections will use a zero idle timeout to prevent too * many connection threads on large clusters. */ public static final String NM_CLIENT_MAX_NM_PROXIES = YARN_PREFIX + "client.max-cached-nodemanagers-proxies"; public static final int DEFAULT_NM_CLIENT_MAX_NM_PROXIES = 0; /** Max time to wait to establish a connection to NM */ public static final String CLIENT_NM_CONNECT_MAX_WAIT_MS = YARN_PREFIX + "client.nodemanager-connect.max-wait-ms"; public static final long DEFAULT_CLIENT_NM_CONNECT_MAX_WAIT_MS = 3 * 60 * 1000; /** Time interval between each attempt to connect to NM */ public static final String CLIENT_NM_CONNECT_RETRY_INTERVAL_MS = YARN_PREFIX + "client.nodemanager-connect.retry-interval-ms"; public static final long DEFAULT_CLIENT_NM_CONNECT_RETRY_INTERVAL_MS = 10 * 1000; public static final String YARN_HTTP_POLICY_KEY = YARN_PREFIX + "http.policy"; public static final String YARN_HTTP_POLICY_DEFAULT = HttpConfig.Policy.HTTP_ONLY .name(); /** * Node-labels configurations */ public static final String NODE_LABELS_PREFIX = YARN_PREFIX + "node-labels."; /** URI for NodeLabelManager */ public static final String FS_NODE_LABELS_STORE_ROOT_DIR = NODE_LABELS_PREFIX + "fs-store.root-dir"; public static final String FS_NODE_LABELS_STORE_RETRY_POLICY_SPEC = NODE_LABELS_PREFIX + "fs-store.retry-policy-spec"; public static final String DEFAULT_FS_NODE_LABELS_STORE_RETRY_POLICY_SPEC = "2000, 500"; /** * Flag to indicate if the node labels feature enabled, by default it's * disabled */ public static final String NODE_LABELS_ENABLED = NODE_LABELS_PREFIX + "enabled"; public static final boolean DEFAULT_NODE_LABELS_ENABLED = false; public static final String NODELABEL_CONFIGURATION_TYPE = NODE_LABELS_PREFIX + "configuration-type"; public static final String CENTALIZED_NODELABEL_CONFIGURATION_TYPE = "centralized"; public static final String DISTRIBUTED_NODELABEL_CONFIGURATION_TYPE = "distributed"; public static final String DEFAULT_NODELABEL_CONFIGURATION_TYPE = CENTALIZED_NODELABEL_CONFIGURATION_TYPE; public static final String MAX_CLUSTER_LEVEL_APPLICATION_PRIORITY = YARN_PREFIX + "cluster.max-application-priority"; public static final int DEFAULT_CLUSTER_LEVEL_APPLICATION_PRIORITY = 0; @Private public static boolean isDistributedNodeLabelConfiguration(Configuration conf) { return DISTRIBUTED_NODELABEL_CONFIGURATION_TYPE.equals(conf.get( NODELABEL_CONFIGURATION_TYPE, DEFAULT_NODELABEL_CONFIGURATION_TYPE)); } public YarnConfiguration() { super(); } public YarnConfiguration(Configuration conf) { super(conf); if (! (conf instanceof YarnConfiguration)) { this.reloadConfiguration(); } } @Private public static List<String> getServiceAddressConfKeys(Configuration conf) { return useHttps(conf) ? RM_SERVICES_ADDRESS_CONF_KEYS_HTTPS : RM_SERVICES_ADDRESS_CONF_KEYS_HTTP; } /** * Get the socket address for <code>name</code> property as a * <code>InetSocketAddress</code>. On a HA cluster, * this fetches the address corresponding to the RM identified by * {@link #RM_HA_ID}. * @param name property name. * @param defaultAddress the default value * @param defaultPort the default port * @return InetSocketAddress */ @Override public InetSocketAddress getSocketAddr( String name, String defaultAddress, int defaultPort) { String address; if (HAUtil.isHAEnabled(this) && getServiceAddressConfKeys(this).contains(name)) { address = HAUtil.getConfValueForRMInstance(name, defaultAddress, this); } else { address = get(name, defaultAddress); } return NetUtils.createSocketAddr(address, defaultPort, name); } @Override public InetSocketAddress updateConnectAddr(String name, InetSocketAddress addr) { String prefix = name; if (HAUtil.isHAEnabled(this) && getServiceAddressConfKeys(this).contains(name)) { prefix = HAUtil.addSuffix(prefix, HAUtil.getRMHAId(this)); } return super.updateConnectAddr(prefix, addr); } @Private public static int getRMDefaultPortNumber(String addressPrefix, Configuration conf) { if (addressPrefix.equals(YarnConfiguration.RM_ADDRESS)) { return YarnConfiguration.DEFAULT_RM_PORT; } else if (addressPrefix.equals(YarnConfiguration.RM_SCHEDULER_ADDRESS)) { return YarnConfiguration.DEFAULT_RM_SCHEDULER_PORT; } else if (addressPrefix.equals(YarnConfiguration.RM_WEBAPP_ADDRESS)) { return YarnConfiguration.DEFAULT_RM_WEBAPP_PORT; } else if (addressPrefix.equals(YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS)) { return YarnConfiguration.DEFAULT_RM_WEBAPP_HTTPS_PORT; } else if (addressPrefix .equals(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS)) { return YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT; } else if (addressPrefix.equals(YarnConfiguration.RM_ADMIN_ADDRESS)) { return YarnConfiguration.DEFAULT_RM_ADMIN_PORT; } else { throw new HadoopIllegalArgumentException( "Invalid RM RPC address Prefix: " + addressPrefix + ". The valid value should be one of " + getServiceAddressConfKeys(conf)); } } public static boolean useHttps(Configuration conf) { return HttpConfig.Policy.HTTPS_ONLY == HttpConfig.Policy.fromString(conf .get(YARN_HTTP_POLICY_KEY, YARN_HTTP_POLICY_DEFAULT)); } public static boolean shouldRMFailFast(Configuration conf) { return conf.getBoolean(YarnConfiguration.RM_FAIL_FAST, conf.getBoolean(YarnConfiguration.YARN_FAIL_FAST, YarnConfiguration.DEFAULT_YARN_FAIL_FAST)); } @Private public static String getClusterId(Configuration conf) { String clusterId = conf.get(YarnConfiguration.RM_CLUSTER_ID); if (clusterId == null) { throw new HadoopIllegalArgumentException("Configuration doesn't specify " + YarnConfiguration.RM_CLUSTER_ID); } return clusterId; } /* For debugging. mp configurations to system output as XML format. */ public static void main(String[] args) throws Exception { new YarnConfiguration(new Configuration()).writeXml(System.out); } }
85,920
40.79037
97
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/HAUtil.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.conf; import com.google.common.annotations.VisibleForTesting; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import java.net.InetSocketAddress; import java.util.Collection; @InterfaceAudience.Private public class HAUtil { private static Log LOG = LogFactory.getLog(HAUtil.class); public static final String BAD_CONFIG_MESSAGE_PREFIX = "Invalid configuration! "; private HAUtil() { /* Hidden constructor */ } private static void throwBadConfigurationException(String msg) { throw new YarnRuntimeException(BAD_CONFIG_MESSAGE_PREFIX + msg); } /** * Returns true if Resource Manager HA is configured. * * @param conf Configuration * @return true if HA is configured in the configuration; else false. */ public static boolean isHAEnabled(Configuration conf) { return conf.getBoolean(YarnConfiguration.RM_HA_ENABLED, YarnConfiguration.DEFAULT_RM_HA_ENABLED); } public static boolean isAutomaticFailoverEnabled(Configuration conf) { return conf.getBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED, YarnConfiguration.DEFAULT_AUTO_FAILOVER_ENABLED); } public static boolean isAutomaticFailoverEnabledAndEmbedded( Configuration conf) { return isAutomaticFailoverEnabled(conf) && isAutomaticFailoverEmbedded(conf); } public static boolean isAutomaticFailoverEmbedded(Configuration conf) { return conf.getBoolean(YarnConfiguration.AUTO_FAILOVER_EMBEDDED, YarnConfiguration.DEFAULT_AUTO_FAILOVER_EMBEDDED); } /** * Verify configuration for Resource Manager HA. * @param conf Configuration * @throws YarnRuntimeException */ public static void verifyAndSetConfiguration(Configuration conf) throws YarnRuntimeException { verifyAndSetRMHAIdsList(conf); verifyAndSetCurrentRMHAId(conf); verifyAndSetAllServiceAddresses(conf); } /** * Verify configuration that there are at least two RM-ids * and RPC addresses are specified for each RM-id. * Then set the RM-ids. */ private static void verifyAndSetRMHAIdsList(Configuration conf) { Collection<String> ids = conf.getTrimmedStringCollection(YarnConfiguration.RM_HA_IDS); if (ids.size() < 2) { throwBadConfigurationException( getInvalidValueMessage(YarnConfiguration.RM_HA_IDS, conf.get(YarnConfiguration.RM_HA_IDS) + "\nHA mode requires atleast two RMs")); } StringBuilder setValue = new StringBuilder(); for (String id: ids) { // verify the RM service addresses configurations for every RMIds for (String prefix : YarnConfiguration.getServiceAddressConfKeys(conf)) { checkAndSetRMRPCAddress(prefix, id, conf); } setValue.append(id); setValue.append(","); } conf.set(YarnConfiguration.RM_HA_IDS, setValue.substring(0, setValue.length() - 1)); } private static void verifyAndSetCurrentRMHAId(Configuration conf) { String rmId = getRMHAId(conf); if (rmId == null) { StringBuilder msg = new StringBuilder(); msg.append("Can not find valid RM_HA_ID. None of "); for (String id : conf .getTrimmedStringCollection(YarnConfiguration.RM_HA_IDS)) { msg.append(addSuffix(YarnConfiguration.RM_ADDRESS, id) + " "); } msg.append(" are matching" + " the local address OR " + YarnConfiguration.RM_HA_ID + " is not" + " specified in HA Configuration"); throwBadConfigurationException(msg.toString()); } else { Collection<String> ids = getRMHAIds(conf); if (!ids.contains(rmId)) { throwBadConfigurationException( getRMHAIdNeedToBeIncludedMessage(ids.toString(), rmId)); } } conf.set(YarnConfiguration.RM_HA_ID, rmId); } private static void verifyAndSetConfValue(String prefix, Configuration conf) { String confKey = null; String confValue = null; try { confKey = getConfKeyForRMInstance(prefix, conf); confValue = getConfValueForRMInstance(prefix, conf); conf.set(prefix, confValue); } catch (YarnRuntimeException yre) { // Error at getRMHAId() throw yre; } catch (IllegalArgumentException iae) { String errmsg; if (confKey == null) { // Error at addSuffix errmsg = getInvalidValueMessage(YarnConfiguration.RM_HA_ID, getRMHAId(conf)); } else { // Error at Configuration#set. errmsg = getNeedToSetValueMessage(confKey); } throwBadConfigurationException(errmsg); } } public static void verifyAndSetAllServiceAddresses(Configuration conf) { for (String confKey : YarnConfiguration.getServiceAddressConfKeys(conf)) { verifyAndSetConfValue(confKey, conf); } } /** * @param conf Configuration. Please use getRMHAIds to check. * @return RM Ids on success */ public static Collection<String> getRMHAIds(Configuration conf) { return conf.getStringCollection(YarnConfiguration.RM_HA_IDS); } /** * @param conf Configuration. Please use verifyAndSetRMHAId to check. * @return RM Id on success */ public static String getRMHAId(Configuration conf) { int found = 0; String currentRMId = conf.getTrimmed(YarnConfiguration.RM_HA_ID); if(currentRMId == null) { for(String rmId : getRMHAIds(conf)) { String key = addSuffix(YarnConfiguration.RM_ADDRESS, rmId); String addr = conf.get(key); if (addr == null) { continue; } InetSocketAddress s; try { s = NetUtils.createSocketAddr(addr); } catch (Exception e) { LOG.warn("Exception in creating socket address " + addr, e); continue; } if (!s.isUnresolved() && NetUtils.isLocalAddress(s.getAddress())) { currentRMId = rmId.trim(); found++; } } } if (found > 1) { // Only one address must match the local address String msg = "The HA Configuration has multiple addresses that match " + "local node's address."; throw new HadoopIllegalArgumentException(msg); } return currentRMId; } @VisibleForTesting static String getNeedToSetValueMessage(String confKey) { return confKey + " needs to be set in a HA configuration."; } @VisibleForTesting static String getInvalidValueMessage(String confKey, String invalidValue){ return "Invalid value of " + confKey +". " + "Current value is " + invalidValue; } @VisibleForTesting static String getRMHAIdNeedToBeIncludedMessage(String ids, String rmId) { return YarnConfiguration.RM_HA_IDS + "(" + ids + ") need to contain " + YarnConfiguration.RM_HA_ID + "(" + rmId + ") in a HA configuration."; } @VisibleForTesting static String getRMHAIdsWarningMessage(String ids) { return "Resource Manager HA is enabled, but " + YarnConfiguration.RM_HA_IDS + " has only one id(" + ids.toString() + ")"; } @InterfaceAudience.Private @VisibleForTesting static String getConfKeyForRMInstance(String prefix, Configuration conf) { if (!YarnConfiguration.getServiceAddressConfKeys(conf).contains(prefix)) { return prefix; } else { String RMId = getRMHAId(conf); checkAndSetRMRPCAddress(prefix, RMId, conf); return addSuffix(prefix, RMId); } } public static String getConfValueForRMInstance(String prefix, Configuration conf) { String confKey = getConfKeyForRMInstance(prefix, conf); String retVal = conf.getTrimmed(confKey); if (LOG.isTraceEnabled()) { LOG.trace("getConfValueForRMInstance: prefix = " + prefix + "; confKey being looked up = " + confKey + "; value being set to = " + retVal); } return retVal; } public static String getConfValueForRMInstance( String prefix, String defaultValue, Configuration conf) { String value = getConfValueForRMInstance(prefix, conf); return (value == null) ? defaultValue : value; } /** Add non empty and non null suffix to a key */ public static String addSuffix(String key, String suffix) { if (suffix == null || suffix.isEmpty()) { return key; } if (suffix.startsWith(".")) { throw new IllegalArgumentException("suffix '" + suffix + "' should not " + "already have '.' prepended."); } return key + "." + suffix; } private static void checkAndSetRMRPCAddress(String prefix, String RMId, Configuration conf) { String rpcAddressConfKey = null; try { rpcAddressConfKey = addSuffix(prefix, RMId); if (conf.getTrimmed(rpcAddressConfKey) == null) { String hostNameConfKey = addSuffix(YarnConfiguration.RM_HOSTNAME, RMId); String confVal = conf.getTrimmed(hostNameConfKey); if (confVal == null) { throwBadConfigurationException(getNeedToSetValueMessage( hostNameConfKey + " or " + addSuffix(prefix, RMId))); } else { conf.set(addSuffix(prefix, RMId), confVal + ":" + YarnConfiguration.getRMDefaultPortNumber(prefix, conf)); } } } catch (IllegalArgumentException iae) { String errmsg = iae.getMessage(); if (rpcAddressConfKey == null) { // Error at addSuffix errmsg = getInvalidValueMessage(YarnConfiguration.RM_HA_ID, RMId); } throwBadConfigurationException(errmsg); } } }
10,676
34.121711
80
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/ConfigurationProvider.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.conf; import java.io.IOException; import java.io.InputStream; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.exceptions.YarnException; @Private @Unstable /** * Base class to implement ConfigurationProvider. * Real ConfigurationProvider implementations need to derive from it and * implement load methods to actually load the configuration. */ public abstract class ConfigurationProvider { public void init(Configuration bootstrapConf) throws Exception { initInternal(bootstrapConf); } public void close() throws Exception { closeInternal(); } /** * Opens an InputStream at the indicated file * @param bootstrapConf Configuration * @param name The configuration file name * @return configuration * @throws YarnException * @throws IOException */ public abstract InputStream getConfigurationInputStream( Configuration bootstrapConf, String name) throws YarnException, IOException; /** * Derived classes initialize themselves using this method. */ public abstract void initInternal(Configuration bootstrapConf) throws Exception; /** * Derived classes close themselves using this method. */ public abstract void closeInternal() throws Exception; }
2,237
31.434783
75
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/ConfigurationProviderFactory.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.conf; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; @Private @Unstable /** * Factory for {@link ConfigurationProvider} implementations. */ public class ConfigurationProviderFactory { /** * Creates an instance of {@link ConfigurationProvider} using given * configuration. * @param bootstrapConf * @return configurationProvider */ @SuppressWarnings("unchecked") public static ConfigurationProvider getConfigurationProvider(Configuration bootstrapConf) { Class<? extends ConfigurationProvider> defaultProviderClass; try { defaultProviderClass = (Class<? extends ConfigurationProvider>) Class.forName( YarnConfiguration.DEFAULT_RM_CONFIGURATION_PROVIDER_CLASS); } catch (Exception e) { throw new YarnRuntimeException( "Invalid default configuration provider class" + YarnConfiguration.DEFAULT_RM_CONFIGURATION_PROVIDER_CLASS, e); } ConfigurationProvider configurationProvider = ReflectionUtils.newInstance(bootstrapConf.getClass( YarnConfiguration.RM_CONFIGURATION_PROVIDER_CLASS, defaultProviderClass, ConfigurationProvider.class), bootstrapConf); return configurationProvider; } }
2,324
37.75
78
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ContainerType.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.api; /** * Container property encoding allocation and execution semantics. * * <p> * The container types are the following: * <ul> * <li>{@link #APPLICATION_MASTER} * <li>{@link #TASK} * </ul> * </p> */ public enum ContainerType { APPLICATION_MASTER, TASK }
1,117
31.882353
75
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ContainerTerminationContext.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.api; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Evolving; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.Resource; /** * Termination context for {@link AuxiliaryService} when stopping a * container. * */ @Public @Evolving public class ContainerTerminationContext extends ContainerContext { @Private @Unstable public ContainerTerminationContext(String user, ContainerId containerId, Resource resource) { super(user, containerId, resource); } @Private @Unstable public ContainerTerminationContext(String user, ContainerId containerId, Resource resource, ContainerType containerType) { super(user, containerId, resource, containerType); } }
1,782
33.288462
74
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ApplicationInitializationContext.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.api; import java.nio.ByteBuffer; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Evolving; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.ContainerManagementProtocol; import org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; /** * Initialization context for {@link AuxiliaryService} when starting an * application. */ @Public @Evolving public class ApplicationInitializationContext { private final String user; private final ApplicationId applicationId; private ByteBuffer appDataForService; @Private @Unstable public ApplicationInitializationContext(String user, ApplicationId applicationId, ByteBuffer appDataForService) { this.user = user; this.applicationId = applicationId; this.appDataForService = appDataForService; } /** * Get the user-name of the application-submitter * * @return user-name */ public String getUser() { return this.user; } /** * Get {@link ApplicationId} of the application * * @return applications ID */ public ApplicationId getApplicationId() { return this.applicationId; } /** * Get the data sent to the NodeManager via * {@link ContainerManagementProtocol#startContainers(StartContainersRequest)} * as part of {@link ContainerLaunchContext#getServiceData()} * * @return the servicesData for this application. */ public ByteBuffer getApplicationDataForService() { return this.appDataForService; } }
2,614
31.283951
83
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/SCMAdminProtocolPB.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.api; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.ipc.ProtocolInfo; import org.apache.hadoop.yarn.proto.SCMAdminProtocol.SCMAdminProtocolService; @Private @Unstable @ProtocolInfo(protocolName = "org.apache.hadoop.yarn.server.api.SCMAdminProtocolPB", protocolVersion = 1) public interface SCMAdminProtocolPB extends SCMAdminProtocolService.BlockingInterface { }
1,332
40.65625
84
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ContainerInitializationContext.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.api; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Evolving; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.Resource; /** * Initialization context for {@link AuxiliaryService} when starting a * container. * */ @Public @Evolving public class ContainerInitializationContext extends ContainerContext { @Private @Unstable public ContainerInitializationContext(String user, ContainerId containerId, Resource resource) { super(user, containerId, resource); } @Private @Unstable public ContainerInitializationContext(String user, ContainerId containerId, Resource resource, ContainerType containerType) { super(user, containerId, resource, containerType); } }
1,794
33.519231
77
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/SCMAdminProtocol.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.api; import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.server.api.protocolrecords.RunSharedCacheCleanerTaskRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RunSharedCacheCleanerTaskResponse; import org.apache.hadoop.yarn.exceptions.YarnException; /** * <p> * The protocol between administrators and the <code>SharedCacheManager</code> * </p> */ @Public @Unstable public interface SCMAdminProtocol { /** * <p> * The method used by administrators to ask SCM to run cleaner task right away * </p> * * @param request request <code>SharedCacheManager</code> to run a cleaner task * @return <code>SharedCacheManager</code> returns an empty response * on success and throws an exception on rejecting the request * @throws YarnException * @throws IOException */ @Public @Unstable public RunSharedCacheCleanerTaskResponse runCleanerTask( RunSharedCacheCleanerTaskRequest request) throws YarnException, IOException; }
1,976
35.611111
91
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ContainerContext.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.api; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Evolving; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.Resource; /** * Base context class for {@link AuxiliaryService} initializing and stopping a * container. */ @Public @Evolving public class ContainerContext { private final String user; private final ContainerId containerId; private final Resource resource; private final ContainerType containerType; @Private @Unstable public ContainerContext(String user, ContainerId containerId, Resource resource) { this(user, containerId, resource, ContainerType.TASK); } @Private @Unstable public ContainerContext(String user, ContainerId containerId, Resource resource, ContainerType containerType) { this.user = user; this.containerId = containerId; this.resource = resource; this.containerType = containerType; } /** * Get user of the container being initialized or stopped. * * @return the user */ public String getUser() { return user; } /** * Get {@link ContainerId} of the container being initialized or stopped. * * @return the container ID */ public ContainerId getContainerId() { return containerId; } /** * Get {@link Resource} the resource capability allocated to the container * being initialized or stopped. * * @return the resource capability. */ public Resource getResource() { return resource; } /** * Get {@link ContainerType} the type of the container * being initialized or stopped. * * @return the type of the container */ public ContainerType getContainerType() { return containerType; } }
2,764
28.105263
78
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ApplicationTerminationContext.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.api; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Evolving; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.records.ApplicationId; /** * Initialization context for {@link AuxiliaryService} when stopping an * application. * */ @Public @Evolving public class ApplicationTerminationContext { private final ApplicationId applicationId; @Private @Unstable public ApplicationTerminationContext(ApplicationId applicationId) { this.applicationId = applicationId; } /** * Get {@link ApplicationId} of the application being stopped. * * @return applications ID */ public ApplicationId getApplicationId() { return this.applicationId; } }
1,705
31.807692
74
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ResourceManagerAdministrationProtocol.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.api; import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.io.retry.Idempotent; import org.apache.hadoop.ipc.StandbyException; import org.apache.hadoop.tools.GetUserMappingsProtocol; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.ResourceOption; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshQueuesRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshQueuesResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshServiceAclsRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshServiceAclsResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshSuperUserGroupsConfigurationRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshSuperUserGroupsConfigurationResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshUserToGroupsMappingsRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshUserToGroupsMappingsResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.RemoveFromClusterNodeLabelsRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.RemoveFromClusterNodeLabelsResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.ReplaceLabelsOnNodeRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.ReplaceLabelsOnNodeResponse; import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceRequest; import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceResponse; @Private public interface ResourceManagerAdministrationProtocol extends GetUserMappingsProtocol { @Private @Idempotent public RefreshQueuesResponse refreshQueues(RefreshQueuesRequest request) throws StandbyException, YarnException, IOException; @Private @Idempotent public RefreshNodesResponse refreshNodes(RefreshNodesRequest request) throws StandbyException, YarnException, IOException; @Private @Idempotent public RefreshSuperUserGroupsConfigurationResponse refreshSuperUserGroupsConfiguration( RefreshSuperUserGroupsConfigurationRequest request) throws StandbyException, YarnException, IOException; @Private @Idempotent public RefreshUserToGroupsMappingsResponse refreshUserToGroupsMappings( RefreshUserToGroupsMappingsRequest request) throws StandbyException, YarnException, IOException; @Private @Idempotent public RefreshAdminAclsResponse refreshAdminAcls( RefreshAdminAclsRequest request) throws YarnException, IOException; @Private @Idempotent public RefreshServiceAclsResponse refreshServiceAcls( RefreshServiceAclsRequest request) throws YarnException, IOException; /** * <p>The interface used by admin to update nodes' resources to the * <code>ResourceManager</code> </p>. * * <p>The admin client is required to provide details such as a map from * {@link NodeId} to {@link ResourceOption} required to update resources on * a list of <code>RMNode</code> in <code>ResourceManager</code> etc. * via the {@link UpdateNodeResourceRequest}.</p> * * @param request request to update resource for a node in cluster. * @return (empty) response on accepting update. * @throws YarnException * @throws IOException */ @Private @Idempotent public UpdateNodeResourceResponse updateNodeResource( UpdateNodeResourceRequest request) throws YarnException, IOException; @Private @Idempotent public AddToClusterNodeLabelsResponse addToClusterNodeLabels( AddToClusterNodeLabelsRequest request) throws YarnException, IOException; @Private @Idempotent public RemoveFromClusterNodeLabelsResponse removeFromClusterNodeLabels( RemoveFromClusterNodeLabelsRequest request) throws YarnException, IOException; @Private @Idempotent public ReplaceLabelsOnNodeResponse replaceLabelsOnNode( ReplaceLabelsOnNodeRequest request) throws YarnException, IOException; @Private @Idempotent public CheckForDecommissioningNodesResponse checkForDecommissioningNodes( CheckForDecommissioningNodesRequest checkForDecommissioningNodesRequest) throws YarnException, IOException; }
5,837
43.227273
101
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/AuxiliaryService.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.api; import java.nio.ByteBuffer; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Evolving; import org.apache.hadoop.fs.Path; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.yarn.api.ContainerManagementProtocol; import org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest; import org.apache.hadoop.yarn.api.protocolrecords.StartContainersResponse; import org.apache.hadoop.yarn.conf.YarnConfiguration; /** * A generic service that will be started by the NodeManager. This is a service * that administrators have to configure on each node by setting * {@link YarnConfiguration#NM_AUX_SERVICES}. * */ @Public @Evolving public abstract class AuxiliaryService extends AbstractService { private Path recoveryPath = null; protected AuxiliaryService(String name) { super(name); } /** * Get the path specific to this auxiliary service to use for recovery. * * @return state storage path or null if recovery is not enabled */ protected Path getRecoveryPath() { return recoveryPath; } /** * A new application is started on this NodeManager. This is a signal to * this {@link AuxiliaryService} about the application initialization. * * @param initAppContext context for the application's initialization */ public abstract void initializeApplication( ApplicationInitializationContext initAppContext); /** * An application is finishing on this NodeManager. This is a signal to this * {@link AuxiliaryService} about the same. * * @param stopAppContext context for the application termination */ public abstract void stopApplication( ApplicationTerminationContext stopAppContext); /** * Retrieve meta-data for this {@link AuxiliaryService}. Applications using * this {@link AuxiliaryService} SHOULD know the format of the meta-data - * ideally each service should provide a method to parse out the information * to the applications. One example of meta-data is contact information so * that applications can access the service remotely. This will only be called * after the service's {@link #start()} method has finished. the result may be * cached. * * <p> * The information is passed along to applications via * {@link StartContainersResponse#getAllServicesMetaData()} that is returned by * {@link ContainerManagementProtocol#startContainers(StartContainersRequest)} * </p> * * @return meta-data for this service that should be made available to * applications. */ public abstract ByteBuffer getMetaData(); /** * A new container is started on this NodeManager. This is a signal to * this {@link AuxiliaryService} about the container initialization. * This method is called when the NodeManager receives the container launch * command from the ApplicationMaster and before the container process is * launched. * * @param initContainerContext context for the container's initialization */ public void initializeContainer(ContainerInitializationContext initContainerContext) { } /** * A container is finishing on this NodeManager. This is a signal to this * {@link AuxiliaryService} about the same. * * @param stopContainerContext context for the container termination */ public void stopContainer(ContainerTerminationContext stopContainerContext) { } /** * Set the path for this auxiliary service to use for storing state * that will be used during recovery. * * @param recoveryPath where recoverable state should be stored */ public void setRecoveryPath(Path recoveryPath) { this.recoveryPath = recoveryPath; } }
4,595
35.188976
81
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/package-info.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ @InterfaceAudience.Private package org.apache.hadoop.yarn.server.api.protocolrecords; import org.apache.hadoop.classification.InterfaceAudience;
952
42.318182
75
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RemoveFromClusterNodeLabelsResponse.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.api.protocolrecords; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Evolving; import org.apache.hadoop.yarn.util.Records; @Public @Evolving public abstract class RemoveFromClusterNodeLabelsResponse { public static RemoveFromClusterNodeLabelsResponse newInstance() { return Records.newRecord(RemoveFromClusterNodeLabelsResponse.class); } }
1,273
38.8125
75
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/ReplaceLabelsOnNodeRequest.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.api.protocolrecords; import java.util.Map; import java.util.Set; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Evolving; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.util.Records; @Public @Evolving public abstract class ReplaceLabelsOnNodeRequest { public static ReplaceLabelsOnNodeRequest newInstance( Map<NodeId, Set<String>> map) { ReplaceLabelsOnNodeRequest request = Records.newRecord(ReplaceLabelsOnNodeRequest.class); request.setNodeToLabels(map); return request; } @Public @Evolving public abstract void setNodeToLabels(Map<NodeId, Set<String>> map); @Public @Evolving public abstract Map<NodeId, Set<String>> getNodeToLabels(); }
1,646
33.3125
75
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RefreshAdminAclsResponse.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.api.protocolrecords; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Stable; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.util.Records; @Private @Stable public abstract class RefreshAdminAclsResponse { @Private @Unstable public static RefreshAdminAclsResponse newInstance() { RefreshAdminAclsResponse response = Records.newRecord(RefreshAdminAclsResponse.class); return response; } }
1,372
36.108108
74
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/AddToClusterNodeLabelsRequest.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.api.protocolrecords; import java.util.List; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.records.NodeLabel; import org.apache.hadoop.yarn.util.Records; @Public @Unstable public abstract class AddToClusterNodeLabelsRequest { @Public @Unstable public static AddToClusterNodeLabelsRequest newInstance( List<NodeLabel> NodeLabels) { AddToClusterNodeLabelsRequest request = Records.newRecord(AddToClusterNodeLabelsRequest.class); request.setNodeLabels(NodeLabels); return request; } @Public @Unstable public abstract void setNodeLabels(List<NodeLabel> NodeLabels); @Public @Unstable public abstract List<NodeLabel> getNodeLabels(); }
1,648
32.653061
75
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RefreshQueuesResponse.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.api.protocolrecords; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Stable; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.util.Records; @Private @Stable public abstract class RefreshQueuesResponse { @Private @Unstable public static RefreshQueuesResponse newInstance() { RefreshQueuesResponse response = Records.newRecord(RefreshQueuesResponse.class); return response; } }
1,360
35.783784
74
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/AddToClusterNodeLabelsResponse.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.api.protocolrecords; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Evolving; import org.apache.hadoop.yarn.util.Records; @Public @Evolving public abstract class AddToClusterNodeLabelsResponse { public static AddToClusterNodeLabelsResponse newInstance() { return Records.newRecord(AddToClusterNodeLabelsResponse.class); } }
1,258
38.34375
75
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RefreshNodesResponse.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.api.protocolrecords; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Stable; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.util.Records; @Private @Stable public abstract class RefreshNodesResponse { @Private @Unstable public static RefreshNodesResponse newInstance() { RefreshNodesResponse response = Records.newRecord(RefreshNodesResponse.class); return response; } }
1,356
35.675676
74
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RefreshSuperUserGroupsConfigurationResponse.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.api.protocolrecords; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Stable; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.util.Records; @Private @Stable public abstract class RefreshSuperUserGroupsConfigurationResponse { @Private @Unstable public static RefreshSuperUserGroupsConfigurationResponse newInstance() { RefreshSuperUserGroupsConfigurationResponse response = Records.newRecord(RefreshSuperUserGroupsConfigurationResponse.class); return response; } }
1,448
38.162162
77
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RefreshNodesRequest.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.api.protocolrecords; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Stable; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.records.DecommissionType; import org.apache.hadoop.yarn.util.Records; @Private @Unstable public abstract class RefreshNodesRequest { @Private @Stable public static RefreshNodesRequest newInstance() { RefreshNodesRequest request = Records.newRecord(RefreshNodesRequest.class); return request; } @Private @Unstable public static RefreshNodesRequest newInstance( DecommissionType decommissionType) { RefreshNodesRequest request = Records.newRecord(RefreshNodesRequest.class); request.setDecommissionType(decommissionType); return request; } /** * Set the DecommissionType * * @param decommissionType */ public abstract void setDecommissionType(DecommissionType decommissionType); /** * Get the DecommissionType * * @return decommissionType */ public abstract DecommissionType getDecommissionType(); }
1,967
31.8
79
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RunSharedCacheCleanerTaskResponse.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.api.protocolrecords; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Unstable; /** * <p> * The response to admin from the <code>SharedCacheManager</code> when * is asked to run the cleaner service. * </p> * * <p> * Currently, this is empty. * </p> */ @Public @Unstable public abstract class RunSharedCacheCleanerTaskResponse { /** * Get whether or not the shared cache manager has accepted the request. * Shared cache manager will reject the request if there is an ongoing task * * @return boolean True if the request has been accepted, false otherwise. */ @Public @Unstable public abstract boolean getAccepted(); /** * Set whether or not the shared cache manager has accepted the request Shared * cache manager will reject the request if there is an ongoing task * * @param b True if the request has been accepted, false otherwise. */ @Public @Unstable public abstract void setAccepted(boolean b); }
1,887
31
80
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RefreshServiceAclsResponse.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.api.protocolrecords; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Stable; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.util.Records; @Private @Stable public abstract class RefreshServiceAclsResponse { @Private @Unstable public static RefreshServiceAclsResponse newInstance() { RefreshServiceAclsResponse response = Records.newRecord(RefreshServiceAclsResponse.class); return response; } }
1,380
36.324324
74
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RefreshSuperUserGroupsConfigurationRequest.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.api.protocolrecords; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Stable; import org.apache.hadoop.yarn.util.Records; @Private @Stable public abstract class RefreshSuperUserGroupsConfigurationRequest { @Public @Stable public static RefreshSuperUserGroupsConfigurationRequest newInstance() { RefreshSuperUserGroupsConfigurationRequest request = Records.newRecord(RefreshSuperUserGroupsConfigurationRequest.class); return request; } }
1,436
37.837838
76
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/UpdateNodeResourceResponse.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.api.protocolrecords; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Evolving; import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocol; import org.apache.hadoop.yarn.util.Records; /** * <p>The response sent by the <code>ResourceManager</code> to Admin client on * node resource change.</p> * * <p>Currently, this is empty.</p> * * @see ResourceManagerAdministrationProtocol#updateNodeResource( * UpdateNodeResourceRequest) */ @Private @Evolving public abstract class UpdateNodeResourceResponse { public static UpdateNodeResourceResponse newInstance(){ UpdateNodeResourceResponse response = Records.newRecord(UpdateNodeResourceResponse.class); return response; } }
1,649
36.5
79
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RefreshUserToGroupsMappingsRequest.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.api.protocolrecords; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Stable; import org.apache.hadoop.yarn.util.Records; @Private @Stable public abstract class RefreshUserToGroupsMappingsRequest { @Public @Stable public static RefreshUserToGroupsMappingsRequest newInstance() { RefreshUserToGroupsMappingsRequest request = Records.newRecord(RefreshUserToGroupsMappingsRequest.class); return request; } }
1,404
36.972973
74
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RefreshQueuesRequest.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.api.protocolrecords; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Stable; import org.apache.hadoop.yarn.util.Records; @Private @Stable public abstract class RefreshQueuesRequest { @Public @Stable public static RefreshQueuesRequest newInstance() { RefreshQueuesRequest request = Records.newRecord(RefreshQueuesRequest.class); return request; } }
1,348
35.459459
74
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RefreshAdminAclsRequest.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.api.protocolrecords; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Stable; import org.apache.hadoop.yarn.util.Records; @Private @Stable public abstract class RefreshAdminAclsRequest { @Public @Stable public static RefreshAdminAclsRequest newInstance() { RefreshAdminAclsRequest request = Records.newRecord(RefreshAdminAclsRequest.class); return request; } }
1,360
35.783784
74
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RefreshServiceAclsRequest.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.api.protocolrecords; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Stable; import org.apache.hadoop.yarn.util.Records; @Private @Stable public abstract class RefreshServiceAclsRequest { @Public @Stable public static RefreshServiceAclsRequest newInstance() { RefreshServiceAclsRequest request = Records.newRecord(RefreshServiceAclsRequest.class); return request; } }
1,368
36
74
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/CheckForDecommissioningNodesRequest.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.api.protocolrecords; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.util.Records; @Private @Unstable public abstract class CheckForDecommissioningNodesRequest { @Private @Unstable public static CheckForDecommissioningNodesRequest newInstance() { CheckForDecommissioningNodesRequest request = Records .newRecord(CheckForDecommissioningNodesRequest.class); return request; } }
1,366
36.972222
75
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/UpdateNodeResourceRequest.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.api.protocolrecords; import java.util.Map; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Evolving; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.ResourceOption; import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocol; import org.apache.hadoop.yarn.util.Records; /** * <p>The request sent by admin to change a list of nodes' resource to the * <code>ResourceManager</code>.</p> * * <p>The request contains details such as a map from {@link NodeId} to * {@link ResourceOption} for updating the RMNodes' resources in * <code>ResourceManager</code>. * * @see ResourceManagerAdministrationProtocol#updateNodeResource( * UpdateNodeResourceRequest) */ @Public @Evolving public abstract class UpdateNodeResourceRequest { @Public @Evolving public static UpdateNodeResourceRequest newInstance( Map<NodeId, ResourceOption> nodeResourceMap) { UpdateNodeResourceRequest request = Records.newRecord(UpdateNodeResourceRequest.class); request.setNodeResourceMap(nodeResourceMap); return request; } /** * Get the map from <code>NodeId</code> to <code>ResourceOption</code>. * @return the map of {@code <NodeId, ResourceOption>} */ @Public @Evolving public abstract Map<NodeId, ResourceOption> getNodeResourceMap(); /** * Set the map from <code>NodeId</code> to <code>ResourceOption</code>. * @param nodeResourceMap the map of {@code <NodeId, ResourceOption>} */ @Public @Evolving public abstract void setNodeResourceMap(Map<NodeId, ResourceOption> nodeResourceMap); }
2,543
34.333333
87
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RemoveFromClusterNodeLabelsRequest.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.api.protocolrecords; import java.util.Set; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Evolving; import org.apache.hadoop.yarn.util.Records; @Public @Evolving public abstract class RemoveFromClusterNodeLabelsRequest { public static RemoveFromClusterNodeLabelsRequest newInstance( Set<String> labels) { RemoveFromClusterNodeLabelsRequest request = Records.newRecord(RemoveFromClusterNodeLabelsRequest.class); request.setNodeLabels(labels); return request; } @Public @Evolving public abstract void setNodeLabels(Set<String> labels); @Public @Evolving public abstract Set<String> getNodeLabels(); }
1,570
33.152174
75
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RefreshUserToGroupsMappingsResponse.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.api.protocolrecords; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Stable; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.util.Records; @Private @Stable public abstract class RefreshUserToGroupsMappingsResponse { @Private @Unstable public static RefreshUserToGroupsMappingsResponse newInstance() { RefreshUserToGroupsMappingsResponse response = Records.newRecord(RefreshUserToGroupsMappingsResponse.class); return response; } }
1,416
37.297297
74
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/ReplaceLabelsOnNodeResponse.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.api.protocolrecords; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Evolving; import org.apache.hadoop.yarn.util.Records; @Public @Evolving public abstract class ReplaceLabelsOnNodeResponse { public static ReplaceLabelsOnNodeResponse newInstance() { return Records.newRecord(ReplaceLabelsOnNodeResponse.class); } }
1,249
38.0625
75
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RunSharedCacheCleanerTaskRequest.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.api.protocolrecords; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Unstable; /** * <p> * The request from admin to ask the <code>SharedCacheManager</code> to run * cleaner service right away. * </p> * * <p> * Currently, this is empty. * </p> */ @Public @Unstable public abstract class RunSharedCacheCleanerTaskRequest { }
1,259
32.157895
75
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/CheckForDecommissioningNodesResponse.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.api.protocolrecords; import java.util.Set; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.util.Records; @Private @Unstable public abstract class CheckForDecommissioningNodesResponse { @Private @Unstable public static CheckForDecommissioningNodesResponse newInstance( Set<NodeId> decommissioningNodes) { CheckForDecommissioningNodesResponse response = Records .newRecord(CheckForDecommissioningNodesResponse.class); response.setDecommissioningNodes(decommissioningNodes); return response; } public abstract void setDecommissioningNodes(Set<NodeId> decommissioningNodes); public abstract Set<NodeId> getDecommissioningNodes(); }
1,684
37.295455
81
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShellWithNodeLabels.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.applications.distributedshell; import java.io.IOException; import java.util.HashSet; import java.util.Set; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.server.nodemanager.NodeManager; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import com.google.common.collect.ImmutableMap; public class TestDistributedShellWithNodeLabels { private static final Log LOG = LogFactory.getLog(TestDistributedShellWithNodeLabels.class); static final int NUM_NMS = 2; TestDistributedShell distShellTest; @Before public void setup() throws Exception { distShellTest = new TestDistributedShell(); distShellTest.setupInternal(NUM_NMS); } private void initializeNodeLabels() throws IOException { RMContext rmContext = distShellTest.yarnCluster.getResourceManager(0).getRMContext(); // Setup node labels RMNodeLabelsManager labelsMgr = rmContext.getNodeLabelManager(); Set<String> labels = new HashSet<String>(); labels.add("x"); labelsMgr.addToCluserNodeLabelsWithDefaultExclusivity(labels); // Setup queue access to node labels distShellTest.conf.set("yarn.scheduler.capacity.root.accessible-node-labels", "x"); distShellTest.conf.set("yarn.scheduler.capacity.root.accessible-node-labels.x.capacity", "100"); distShellTest.conf.set("yarn.scheduler.capacity.root.default.accessible-node-labels", "x"); distShellTest.conf.set( "yarn.scheduler.capacity.root.default.accessible-node-labels.x.capacity", "100"); rmContext.getScheduler().reinitialize(distShellTest.conf, rmContext); // Fetch node-ids from yarn cluster NodeId[] nodeIds = new NodeId[NUM_NMS]; for (int i = 0; i < NUM_NMS; i++) { NodeManager mgr = distShellTest.yarnCluster.getNodeManager(i); nodeIds[i] = mgr.getNMContext().getNodeId(); } // Set label x to NM[1] labelsMgr.addLabelsToNode(ImmutableMap.of(nodeIds[1], labels)); } @Test(timeout=90000) public void testDSShellWithNodeLabelExpression() throws Exception { initializeNodeLabels(); // Start NMContainerMonitor NMContainerMonitor mon = new NMContainerMonitor(); Thread t = new Thread(mon); t.start(); // Submit a job which will sleep for 60 sec String[] args = { "--jar", TestDistributedShell.APPMASTER_JAR, "--num_containers", "4", "--shell_command", "sleep", "--shell_args", "15", "--master_memory", "512", "--master_vcores", "2", "--container_memory", "128", "--container_vcores", "1", "--node_label_expression", "x" }; LOG.info("Initializing DS Client"); final Client client = new Client(new Configuration(distShellTest.yarnCluster.getConfig())); boolean initSuccess = client.init(args); Assert.assertTrue(initSuccess); LOG.info("Running DS Client"); boolean result = client.run(); LOG.info("Client run completed. Result=" + result); t.interrupt(); // Check maximum number of containers on each NMs int[] maxRunningContainersOnNMs = mon.getMaxRunningContainersReport(); // Check no container allocated on NM[0] Assert.assertEquals(0, maxRunningContainersOnNMs[0]); // Check there're some containers allocated on NM[1] Assert.assertTrue(maxRunningContainersOnNMs[1] > 0); } /** * Monitor containers running on NMs */ class NMContainerMonitor implements Runnable { // The interval of milliseconds of sampling (500ms) final static int SAMPLING_INTERVAL_MS = 500; // The maximum number of containers running on each NMs int[] maxRunningContainersOnNMs = new int[NUM_NMS]; @Override public void run() { while (true) { for (int i = 0; i < NUM_NMS; i++) { int nContainers = distShellTest.yarnCluster.getNodeManager(i).getNMContext() .getContainers().size(); if (nContainers > maxRunningContainersOnNMs[i]) { maxRunningContainersOnNMs[i] = nContainers; } } try { Thread.sleep(SAMPLING_INTERVAL_MS); } catch (InterruptedException e) { e.printStackTrace(); break; } } } public int[] getMaxRunningContainersReport() { return maxRunningContainersOnNMs; } } }
5,564
32.524096
95
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSSleepingAppMaster.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.applications.distributedshell; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; public class TestDSSleepingAppMaster extends ApplicationMaster{ private static final Log LOG = LogFactory.getLog(TestDSSleepingAppMaster.class); private static final long SLEEP_TIME = 5000; public static void main(String[] args) { boolean result = false; try { TestDSSleepingAppMaster appMaster = new TestDSSleepingAppMaster(); boolean doRun = appMaster.init(args); if (!doRun) { System.exit(0); } appMaster.run(); if (appMaster.appAttemptID.getAttemptId() <= 2) { try { // sleep some time Thread.sleep(SLEEP_TIME); } catch (InterruptedException e) {} // fail the first am. System.exit(100); } result = appMaster.finish(); } catch (Throwable t) { System.exit(1); } if (result) { LOG.info("Application Master completed successfully. exiting"); System.exit(0); } else { LOG.info("Application Master failed. exiting"); System.exit(2); } } }
1,971
32.423729
82
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/ContainerLaunchFailAppMaster.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.applications.distributedshell; import java.nio.ByteBuffer; import java.util.Map; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.yarn.api.records.ContainerId; public class ContainerLaunchFailAppMaster extends ApplicationMaster { private static final Log LOG = LogFactory.getLog(ContainerLaunchFailAppMaster.class); public ContainerLaunchFailAppMaster() { super(); } @Override NMCallbackHandler createNMCallbackHandler() { return new FailContainerLaunchNMCallbackHandler(this); } class FailContainerLaunchNMCallbackHandler extends ApplicationMaster.NMCallbackHandler { public FailContainerLaunchNMCallbackHandler( ApplicationMaster applicationMaster) { super(applicationMaster); } @Override public void onContainerStarted(ContainerId containerId, Map<String, ByteBuffer> allServiceResponse) { super.onStartContainerError(containerId, new RuntimeException("Inject Container Launch failure")); } } public static void main(String[] args) { boolean result = false; try { ContainerLaunchFailAppMaster appMaster = new ContainerLaunchFailAppMaster(); LOG.info("Initializing ApplicationMaster"); boolean doRun = appMaster.init(args); if (!doRun) { System.exit(0); } appMaster.run(); result = appMaster.finish(); } catch (Throwable t) { LOG.fatal("Error running ApplicationMaster", t); System.exit(1); } if (result) { LOG.info("Application Master completed successfully. exiting"); System.exit(0); } else { LOG.info("Application Master failed. exiting"); System.exit(2); } } }
2,619
29.823529
80
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.applications.distributedshell; import java.io.BufferedReader; import java.io.ByteArrayOutputStream; import java.io.File; import java.io.FileOutputStream; import java.io.FileReader; import java.io.IOException; import java.io.OutputStream; import java.io.PrintWriter; import java.net.InetAddress; import java.net.URL; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.concurrent.atomic.AtomicBoolean; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.Path; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.util.JarFinder; import org.apache.hadoop.util.Shell; import org.apache.hadoop.yarn.api.records.ApplicationReport; import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.api.records.timeline.TimelineDomain; import org.apache.hadoop.yarn.api.records.timeline.TimelineEntities; import org.apache.hadoop.yarn.client.api.YarnClient; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.MiniYARNCluster; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler; import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Test; public class TestDistributedShell { private static final Log LOG = LogFactory.getLog(TestDistributedShell.class); protected MiniYARNCluster yarnCluster = null; protected YarnConfiguration conf = null; private static final int NUM_NMS = 1; protected final static String APPMASTER_JAR = JarFinder.getJar(ApplicationMaster.class); @Before public void setup() throws Exception { setupInternal(NUM_NMS); } protected void setupInternal(int numNodeManager) throws Exception { LOG.info("Starting up YARN cluster"); conf = new YarnConfiguration(); conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 128); conf.set("yarn.log.dir", "target"); conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true); conf.set(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class.getName()); conf.setBoolean(YarnConfiguration.NODE_LABELS_ENABLED, true); if (yarnCluster == null) { yarnCluster = new MiniYARNCluster(TestDistributedShell.class.getSimpleName(), 1, numNodeManager, 1, 1); yarnCluster.init(conf); yarnCluster.start(); waitForNMsToRegister(); URL url = Thread.currentThread().getContextClassLoader().getResource("yarn-site.xml"); if (url == null) { throw new RuntimeException("Could not find 'yarn-site.xml' dummy file in classpath"); } Configuration yarnClusterConfig = yarnCluster.getConfig(); yarnClusterConfig.set("yarn.application.classpath", new File(url.getPath()).getParent()); //write the document to a buffer (not directly to the file, as that //can cause the file being written to get read -which will then fail. ByteArrayOutputStream bytesOut = new ByteArrayOutputStream(); yarnClusterConfig.writeXml(bytesOut); bytesOut.close(); //write the bytes to the file in the classpath OutputStream os = new FileOutputStream(new File(url.getPath())); os.write(bytesOut.toByteArray()); os.close(); } FileContext fsContext = FileContext.getLocalFSFileContext(); fsContext .delete( new Path(conf .get("yarn.timeline-service.leveldb-timeline-store.path")), true); try { Thread.sleep(2000); } catch (InterruptedException e) { LOG.info("setup thread sleep interrupted. message=" + e.getMessage()); } } @After public void tearDown() throws IOException { if (yarnCluster != null) { try { yarnCluster.stop(); } finally { yarnCluster = null; } } FileContext fsContext = FileContext.getLocalFSFileContext(); fsContext .delete( new Path(conf .get("yarn.timeline-service.leveldb-timeline-store.path")), true); } @Test(timeout=90000) public void testDSShellWithDomain() throws Exception { testDSShell(true); } @Test(timeout=90000) public void testDSShellWithoutDomain() throws Exception { testDSShell(false); } public void testDSShell(boolean haveDomain) throws Exception { String[] args = { "--jar", APPMASTER_JAR, "--num_containers", "2", "--shell_command", Shell.WINDOWS ? "dir" : "ls", "--master_memory", "512", "--master_vcores", "2", "--container_memory", "128", "--container_vcores", "1" }; if (haveDomain) { String[] domainArgs = { "--domain", "TEST_DOMAIN", "--view_acls", "reader_user reader_group", "--modify_acls", "writer_user writer_group", "--create" }; List<String> argsList = new ArrayList<String>(Arrays.asList(args)); argsList.addAll(Arrays.asList(domainArgs)); args = argsList.toArray(new String[argsList.size()]); } LOG.info("Initializing DS Client"); final Client client = new Client(new Configuration(yarnCluster.getConfig())); boolean initSuccess = client.init(args); Assert.assertTrue(initSuccess); LOG.info("Running DS Client"); final AtomicBoolean result = new AtomicBoolean(false); Thread t = new Thread() { public void run() { try { result.set(client.run()); } catch (Exception e) { throw new RuntimeException(e); } } }; t.start(); YarnClient yarnClient = YarnClient.createYarnClient(); yarnClient.init(new Configuration(yarnCluster.getConfig())); yarnClient.start(); String hostName = NetUtils.getHostname(); boolean verified = false; String errorMessage = ""; while(!verified) { List<ApplicationReport> apps = yarnClient.getApplications(); if (apps.size() == 0 ) { Thread.sleep(10); continue; } ApplicationReport appReport = apps.get(0); if(appReport.getHost().equals("N/A")) { Thread.sleep(10); continue; } errorMessage = "Expected host name to start with '" + hostName + "', was '" + appReport.getHost() + "'. Expected rpc port to be '-1', was '" + appReport.getRpcPort() + "'."; if (checkHostname(appReport.getHost()) && appReport.getRpcPort() == -1) { verified = true; } if (appReport.getYarnApplicationState() == YarnApplicationState.FINISHED) { break; } } Assert.assertTrue(errorMessage, verified); t.join(); LOG.info("Client run completed. Result=" + result); Assert.assertTrue(result.get()); TimelineDomain domain = null; if (haveDomain) { domain = yarnCluster.getApplicationHistoryServer() .getTimelineStore().getDomain("TEST_DOMAIN"); Assert.assertNotNull(domain); Assert.assertEquals("reader_user reader_group", domain.getReaders()); Assert.assertEquals("writer_user writer_group", domain.getWriters()); } TimelineEntities entitiesAttempts = yarnCluster .getApplicationHistoryServer() .getTimelineStore() .getEntities(ApplicationMaster.DSEntity.DS_APP_ATTEMPT.toString(), null, null, null, null, null, null, null, null, null); Assert.assertNotNull(entitiesAttempts); Assert.assertEquals(1, entitiesAttempts.getEntities().size()); Assert.assertEquals(2, entitiesAttempts.getEntities().get(0).getEvents() .size()); Assert.assertEquals(entitiesAttempts.getEntities().get(0).getEntityType() .toString(), ApplicationMaster.DSEntity.DS_APP_ATTEMPT.toString()); if (haveDomain) { Assert.assertEquals(domain.getId(), entitiesAttempts.getEntities().get(0).getDomainId()); } else { Assert.assertEquals("DEFAULT", entitiesAttempts.getEntities().get(0).getDomainId()); } TimelineEntities entities = yarnCluster .getApplicationHistoryServer() .getTimelineStore() .getEntities(ApplicationMaster.DSEntity.DS_CONTAINER.toString(), null, null, null, null, null, null, null, null, null); Assert.assertNotNull(entities); Assert.assertEquals(2, entities.getEntities().size()); Assert.assertEquals(entities.getEntities().get(0).getEntityType() .toString(), ApplicationMaster.DSEntity.DS_CONTAINER.toString()); if (haveDomain) { Assert.assertEquals(domain.getId(), entities.getEntities().get(0).getDomainId()); } else { Assert.assertEquals("DEFAULT", entities.getEntities().get(0).getDomainId()); } } /* * NetUtils.getHostname() returns a string in the form "hostname/ip". * Sometimes the hostname we get is the FQDN and sometimes the short name. In * addition, on machines with multiple network interfaces, it runs any one of * the ips. The function below compares the returns values for * NetUtils.getHostname() accounting for the conditions mentioned. */ private boolean checkHostname(String appHostname) throws Exception { String hostname = NetUtils.getHostname(); if (hostname.equals(appHostname)) { return true; } Assert.assertTrue("Unknown format for hostname " + appHostname, appHostname.contains("/")); Assert.assertTrue("Unknown format for hostname " + hostname, hostname.contains("/")); String[] appHostnameParts = appHostname.split("/"); String[] hostnameParts = hostname.split("/"); return (compareFQDNs(appHostnameParts[0], hostnameParts[0]) && checkIPs( hostnameParts[0], hostnameParts[1], appHostnameParts[1])); } private boolean compareFQDNs(String appHostname, String hostname) throws Exception { if (appHostname.equals(hostname)) { return true; } String appFQDN = InetAddress.getByName(appHostname).getCanonicalHostName(); String localFQDN = InetAddress.getByName(hostname).getCanonicalHostName(); return appFQDN.equals(localFQDN); } private boolean checkIPs(String hostname, String localIP, String appIP) throws Exception { if (localIP.equals(appIP)) { return true; } boolean appIPCheck = false; boolean localIPCheck = false; InetAddress[] addresses = InetAddress.getAllByName(hostname); for (InetAddress ia : addresses) { if (ia.getHostAddress().equals(appIP)) { appIPCheck = true; continue; } if (ia.getHostAddress().equals(localIP)) { localIPCheck = true; } } return (appIPCheck && localIPCheck); } @Test(timeout=90000) public void testDSRestartWithPreviousRunningContainers() throws Exception { String[] args = { "--jar", APPMASTER_JAR, "--num_containers", "1", "--shell_command", "sleep 8", "--master_memory", "512", "--container_memory", "128", "--keep_containers_across_application_attempts" }; LOG.info("Initializing DS Client"); Client client = new Client(TestDSFailedAppMaster.class.getName(), new Configuration(yarnCluster.getConfig())); client.init(args); LOG.info("Running DS Client"); boolean result = client.run(); LOG.info("Client run completed. Result=" + result); // application should succeed Assert.assertTrue(result); } /* * The sleeping period in TestDSSleepingAppMaster is set as 5 seconds. * Set attempt_failures_validity_interval as 2.5 seconds. It will check * how many attempt failures for previous 2.5 seconds. * The application is expected to be successful. */ @Test(timeout=90000) public void testDSAttemptFailuresValidityIntervalSucess() throws Exception { String[] args = { "--jar", APPMASTER_JAR, "--num_containers", "1", "--shell_command", "sleep 8", "--master_memory", "512", "--container_memory", "128", "--attempt_failures_validity_interval", "2500" }; LOG.info("Initializing DS Client"); Configuration conf = yarnCluster.getConfig(); conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2); Client client = new Client(TestDSSleepingAppMaster.class.getName(), new Configuration(conf)); client.init(args); LOG.info("Running DS Client"); boolean result = client.run(); LOG.info("Client run completed. Result=" + result); // application should succeed Assert.assertTrue(result); } /* * The sleeping period in TestDSSleepingAppMaster is set as 5 seconds. * Set attempt_failures_validity_interval as 15 seconds. It will check * how many attempt failure for previous 15 seconds. * The application is expected to be fail. */ @Test(timeout=90000) public void testDSAttemptFailuresValidityIntervalFailed() throws Exception { String[] args = { "--jar", APPMASTER_JAR, "--num_containers", "1", "--shell_command", "sleep 8", "--master_memory", "512", "--container_memory", "128", "--attempt_failures_validity_interval", "15000" }; LOG.info("Initializing DS Client"); Configuration conf = yarnCluster.getConfig(); conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2); Client client = new Client(TestDSSleepingAppMaster.class.getName(), new Configuration(conf)); client.init(args); LOG.info("Running DS Client"); boolean result = client.run(); LOG.info("Client run completed. Result=" + result); // application should be failed Assert.assertFalse(result); } @Test(timeout=90000) public void testDSShellWithCustomLogPropertyFile() throws Exception { final File basedir = new File("target", TestDistributedShell.class.getName()); final File tmpDir = new File(basedir, "tmpDir"); tmpDir.mkdirs(); final File customLogProperty = new File(tmpDir, "custom_log4j.properties"); if (customLogProperty.exists()) { customLogProperty.delete(); } if(!customLogProperty.createNewFile()) { Assert.fail("Can not create custom log4j property file."); } PrintWriter fileWriter = new PrintWriter(customLogProperty); // set the output to DEBUG level fileWriter.write("log4j.rootLogger=debug,stdout"); fileWriter.close(); String[] args = { "--jar", APPMASTER_JAR, "--num_containers", "3", "--shell_command", "echo", "--shell_args", "HADOOP", "--log_properties", customLogProperty.getAbsolutePath(), "--master_memory", "512", "--master_vcores", "2", "--container_memory", "128", "--container_vcores", "1" }; //Before run the DS, the default the log level is INFO final Log LOG_Client = LogFactory.getLog(Client.class); Assert.assertTrue(LOG_Client.isInfoEnabled()); Assert.assertFalse(LOG_Client.isDebugEnabled()); final Log LOG_AM = LogFactory.getLog(ApplicationMaster.class); Assert.assertTrue(LOG_AM.isInfoEnabled()); Assert.assertFalse(LOG_AM.isDebugEnabled()); LOG.info("Initializing DS Client"); final Client client = new Client(new Configuration(yarnCluster.getConfig())); boolean initSuccess = client.init(args); Assert.assertTrue(initSuccess); LOG.info("Running DS Client"); boolean result = client.run(); LOG.info("Client run completed. Result=" + result); Assert.assertTrue(verifyContainerLog(3, null, true, "DEBUG") > 10); //After DS is finished, the log level should be DEBUG Assert.assertTrue(LOG_Client.isInfoEnabled()); Assert.assertTrue(LOG_Client.isDebugEnabled()); Assert.assertTrue(LOG_AM.isInfoEnabled()); Assert.assertTrue(LOG_AM.isDebugEnabled()); } public void testDSShellWithCommands() throws Exception { String[] args = { "--jar", APPMASTER_JAR, "--num_containers", "2", "--shell_command", "\"echo output_ignored;echo output_expected\"", "--master_memory", "512", "--master_vcores", "2", "--container_memory", "128", "--container_vcores", "1" }; LOG.info("Initializing DS Client"); final Client client = new Client(new Configuration(yarnCluster.getConfig())); boolean initSuccess = client.init(args); Assert.assertTrue(initSuccess); LOG.info("Running DS Client"); boolean result = client.run(); LOG.info("Client run completed. Result=" + result); List<String> expectedContent = new ArrayList<String>(); expectedContent.add("output_expected"); verifyContainerLog(2, expectedContent, false, ""); } @Test(timeout=90000) public void testDSShellWithMultipleArgs() throws Exception { String[] args = { "--jar", APPMASTER_JAR, "--num_containers", "4", "--shell_command", "echo", "--shell_args", "HADOOP YARN MAPREDUCE HDFS", "--master_memory", "512", "--master_vcores", "2", "--container_memory", "128", "--container_vcores", "1" }; LOG.info("Initializing DS Client"); final Client client = new Client(new Configuration(yarnCluster.getConfig())); boolean initSuccess = client.init(args); Assert.assertTrue(initSuccess); LOG.info("Running DS Client"); boolean result = client.run(); LOG.info("Client run completed. Result=" + result); List<String> expectedContent = new ArrayList<String>(); expectedContent.add("HADOOP YARN MAPREDUCE HDFS"); verifyContainerLog(4, expectedContent, false, ""); } @Test(timeout=90000) public void testDSShellWithShellScript() throws Exception { final File basedir = new File("target", TestDistributedShell.class.getName()); final File tmpDir = new File(basedir, "tmpDir"); tmpDir.mkdirs(); final File customShellScript = new File(tmpDir, "custom_script.sh"); if (customShellScript.exists()) { customShellScript.delete(); } if (!customShellScript.createNewFile()) { Assert.fail("Can not create custom shell script file."); } PrintWriter fileWriter = new PrintWriter(customShellScript); // set the output to DEBUG level fileWriter.write("echo testDSShellWithShellScript"); fileWriter.close(); System.out.println(customShellScript.getAbsolutePath()); String[] args = { "--jar", APPMASTER_JAR, "--num_containers", "1", "--shell_script", customShellScript.getAbsolutePath(), "--master_memory", "512", "--master_vcores", "2", "--container_memory", "128", "--container_vcores", "1" }; LOG.info("Initializing DS Client"); final Client client = new Client(new Configuration(yarnCluster.getConfig())); boolean initSuccess = client.init(args); Assert.assertTrue(initSuccess); LOG.info("Running DS Client"); boolean result = client.run(); LOG.info("Client run completed. Result=" + result); List<String> expectedContent = new ArrayList<String>(); expectedContent.add("testDSShellWithShellScript"); verifyContainerLog(1, expectedContent, false, ""); } @Test(timeout=90000) public void testDSShellWithInvalidArgs() throws Exception { Client client = new Client(new Configuration(yarnCluster.getConfig())); LOG.info("Initializing DS Client with no args"); try { client.init(new String[]{}); Assert.fail("Exception is expected"); } catch (IllegalArgumentException e) { Assert.assertTrue("The throw exception is not expected", e.getMessage().contains("No args")); } LOG.info("Initializing DS Client with no jar file"); try { String[] args = { "--num_containers", "2", "--shell_command", Shell.WINDOWS ? "dir" : "ls", "--master_memory", "512", "--container_memory", "128" }; client.init(args); Assert.fail("Exception is expected"); } catch (IllegalArgumentException e) { Assert.assertTrue("The throw exception is not expected", e.getMessage().contains("No jar")); } LOG.info("Initializing DS Client with no shell command"); try { String[] args = { "--jar", APPMASTER_JAR, "--num_containers", "2", "--master_memory", "512", "--container_memory", "128" }; client.init(args); Assert.fail("Exception is expected"); } catch (IllegalArgumentException e) { Assert.assertTrue("The throw exception is not expected", e.getMessage().contains("No shell command")); } LOG.info("Initializing DS Client with invalid no. of containers"); try { String[] args = { "--jar", APPMASTER_JAR, "--num_containers", "-1", "--shell_command", Shell.WINDOWS ? "dir" : "ls", "--master_memory", "512", "--container_memory", "128" }; client.init(args); Assert.fail("Exception is expected"); } catch (IllegalArgumentException e) { Assert.assertTrue("The throw exception is not expected", e.getMessage().contains("Invalid no. of containers")); } LOG.info("Initializing DS Client with invalid no. of vcores"); try { String[] args = { "--jar", APPMASTER_JAR, "--num_containers", "2", "--shell_command", Shell.WINDOWS ? "dir" : "ls", "--master_memory", "512", "--master_vcores", "-2", "--container_memory", "128", "--container_vcores", "1" }; client.init(args); Assert.fail("Exception is expected"); } catch (IllegalArgumentException e) { Assert.assertTrue("The throw exception is not expected", e.getMessage().contains("Invalid virtual cores specified")); } LOG.info("Initializing DS Client with --shell_command and --shell_script"); try { String[] args = { "--jar", APPMASTER_JAR, "--num_containers", "2", "--shell_command", Shell.WINDOWS ? "dir" : "ls", "--master_memory", "512", "--master_vcores", "2", "--container_memory", "128", "--container_vcores", "1", "--shell_script", "test.sh" }; client.init(args); Assert.fail("Exception is expected"); } catch (IllegalArgumentException e) { Assert.assertTrue("The throw exception is not expected", e.getMessage().contains("Can not specify shell_command option " + "and shell_script option at the same time")); } LOG.info("Initializing DS Client without --shell_command and --shell_script"); try { String[] args = { "--jar", APPMASTER_JAR, "--num_containers", "2", "--master_memory", "512", "--master_vcores", "2", "--container_memory", "128", "--container_vcores", "1" }; client.init(args); Assert.fail("Exception is expected"); } catch (IllegalArgumentException e) { Assert.assertTrue("The throw exception is not expected", e.getMessage().contains("No shell command or shell script specified " + "to be executed by application master")); } } protected void waitForNMsToRegister() throws Exception { int sec = 60; while (sec >= 0) { if (yarnCluster.getResourceManager().getRMContext().getRMNodes().size() >= NUM_NMS) { break; } Thread.sleep(1000); sec--; } } @Test(timeout=90000) public void testContainerLaunchFailureHandling() throws Exception { String[] args = { "--jar", APPMASTER_JAR, "--num_containers", "2", "--shell_command", Shell.WINDOWS ? "dir" : "ls", "--master_memory", "512", "--container_memory", "128" }; LOG.info("Initializing DS Client"); Client client = new Client(ContainerLaunchFailAppMaster.class.getName(), new Configuration(yarnCluster.getConfig())); boolean initSuccess = client.init(args); Assert.assertTrue(initSuccess); LOG.info("Running DS Client"); boolean result = client.run(); LOG.info("Client run completed. Result=" + result); Assert.assertFalse(result); } @Test(timeout=90000) public void testDebugFlag() throws Exception { String[] args = { "--jar", APPMASTER_JAR, "--num_containers", "2", "--shell_command", Shell.WINDOWS ? "dir" : "ls", "--master_memory", "512", "--master_vcores", "2", "--container_memory", "128", "--container_vcores", "1", "--debug" }; LOG.info("Initializing DS Client"); Client client = new Client(new Configuration(yarnCluster.getConfig())); Assert.assertTrue(client.init(args)); LOG.info("Running DS Client"); Assert.assertTrue(client.run()); } private int verifyContainerLog(int containerNum, List<String> expectedContent, boolean count, String expectedWord) { File logFolder = new File(yarnCluster.getNodeManager(0).getConfig() .get(YarnConfiguration.NM_LOG_DIRS, YarnConfiguration.DEFAULT_NM_LOG_DIRS)); File[] listOfFiles = logFolder.listFiles(); int currentContainerLogFileIndex = -1; for (int i = listOfFiles.length - 1; i >= 0; i--) { if (listOfFiles[i].listFiles().length == containerNum + 1) { currentContainerLogFileIndex = i; break; } } Assert.assertTrue(currentContainerLogFileIndex != -1); File[] containerFiles = listOfFiles[currentContainerLogFileIndex].listFiles(); int numOfWords = 0; for (int i = 0; i < containerFiles.length; i++) { for (File output : containerFiles[i].listFiles()) { if (output.getName().trim().contains("stdout")) { BufferedReader br = null; List<String> stdOutContent = new ArrayList<String>(); try { String sCurrentLine; br = new BufferedReader(new FileReader(output)); int numOfline = 0; while ((sCurrentLine = br.readLine()) != null) { if (count) { if (sCurrentLine.contains(expectedWord)) { numOfWords++; } } else if (output.getName().trim().equals("stdout")){ if (! Shell.WINDOWS) { Assert.assertEquals("The current is" + sCurrentLine, expectedContent.get(numOfline), sCurrentLine.trim()); numOfline++; } else { stdOutContent.add(sCurrentLine.trim()); } } } /* By executing bat script using cmd /c, * it will output all contents from bat script first * It is hard for us to do check line by line * Simply check whether output from bat file contains * all the expected messages */ if (Shell.WINDOWS && !count && output.getName().trim().equals("stdout")) { Assert.assertTrue(stdOutContent.containsAll(expectedContent)); } } catch (IOException e) { e.printStackTrace(); } finally { try { if (br != null) br.close(); } catch (IOException ex) { ex.printStackTrace(); } } } } } return numOfWords; } }
29,337
31.346196
95
java
hadoop
hadoop-master/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSAppMaster.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.applications.distributedshell; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerExitStatus; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerState; import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.client.api.AMRMClient; import org.apache.hadoop.yarn.client.api.async.AMRMClientAsync; import org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.server.utils.BuilderUtils; import org.junit.Assert; import org.junit.Test; import org.mockito.Matchers; import org.mockito.Mockito; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.List; /** * A bunch of tests to make sure that the container allocations * and releases occur correctly. */ public class TestDSAppMaster { static class TestAppMaster extends ApplicationMaster { private int threadsLaunched = 0; public List<String> yarnShellIds = new ArrayList<String>(); @Override protected Thread createLaunchContainerThread(Container allocatedContainer, String shellId) { threadsLaunched++; launchedContainers.add(allocatedContainer.getId()); yarnShellIds.add(shellId); return new Thread(); } void setNumTotalContainers(int numTotalContainers) { this.numTotalContainers = numTotalContainers; } int getAllocatedContainers() { return this.numAllocatedContainers.get(); } @Override void startTimelineClient(final Configuration conf) throws YarnException, IOException, InterruptedException { timelineClient = null; } } @SuppressWarnings("unchecked") @Test public void testDSAppMasterAllocateHandler() throws Exception { TestAppMaster master = new TestAppMaster(); int targetContainers = 2; AMRMClientAsync mockClient = Mockito.mock(AMRMClientAsync.class); master.setAmRMClient(mockClient); master.setNumTotalContainers(targetContainers); Mockito.doNothing().when(mockClient) .addContainerRequest(Matchers.any(AMRMClient.ContainerRequest.class)); ApplicationMaster.RMCallbackHandler handler = master.getRMCallbackHandler(); List<Container> containers = new ArrayList<>(1); ContainerId id1 = BuilderUtils.newContainerId(1, 1, 1, 1); containers.add(generateContainer(id1)); master.numRequestedContainers.set(targetContainers); // first allocate a single container, everything should be fine handler.onContainersAllocated(containers); Assert.assertEquals("Wrong container allocation count", 1, master.getAllocatedContainers()); Mockito.verifyZeroInteractions(mockClient); Assert.assertEquals("Incorrect number of threads launched", 1, master.threadsLaunched); Assert.assertEquals("Incorrect YARN Shell IDs", Arrays.asList("1"), master.yarnShellIds); // now send 3 extra containers containers.clear(); ContainerId id2 = BuilderUtils.newContainerId(1, 1, 1, 2); containers.add(generateContainer(id2)); ContainerId id3 = BuilderUtils.newContainerId(1, 1, 1, 3); containers.add(generateContainer(id3)); ContainerId id4 = BuilderUtils.newContainerId(1, 1, 1, 4); containers.add(generateContainer(id4)); handler.onContainersAllocated(containers); Assert.assertEquals("Wrong final container allocation count", 4, master.getAllocatedContainers()); Assert.assertEquals("Incorrect number of threads launched", 4, master.threadsLaunched); Assert.assertEquals("Incorrect YARN Shell IDs", Arrays.asList("1", "2", "3", "4"), master.yarnShellIds); // make sure we handle completion events correctly List<ContainerStatus> status = new ArrayList<>(); status.add(generateContainerStatus(id1, ContainerExitStatus.SUCCESS)); status.add(generateContainerStatus(id2, ContainerExitStatus.SUCCESS)); status.add(generateContainerStatus(id3, ContainerExitStatus.ABORTED)); status.add(generateContainerStatus(id4, ContainerExitStatus.ABORTED)); handler.onContainersCompleted(status); Assert.assertEquals("Unexpected number of completed containers", targetContainers, master.getNumCompletedContainers()); Assert.assertTrue("Master didn't finish containers as expected", master.getDone()); // test for events from containers we know nothing about // these events should be ignored status = new ArrayList<>(); ContainerId id5 = BuilderUtils.newContainerId(1, 1, 1, 5); status.add(generateContainerStatus(id5, ContainerExitStatus.ABORTED)); Assert.assertEquals("Unexpected number of completed containers", targetContainers, master.getNumCompletedContainers()); Assert.assertTrue("Master didn't finish containers as expected", master.getDone()); status.add(generateContainerStatus(id5, ContainerExitStatus.SUCCESS)); Assert.assertEquals("Unexpected number of completed containers", targetContainers, master.getNumCompletedContainers()); Assert.assertTrue("Master didn't finish containers as expected", master.getDone()); } private Container generateContainer(ContainerId cid) { return Container.newInstance(cid, NodeId.newInstance("host", 5000), "host:80", Resource.newInstance(1024, 1), Priority.newInstance(0), null); } private ContainerStatus generateContainerStatus(ContainerId id, int exitStatus) { return ContainerStatus.newInstance(id, ContainerState.COMPLETE, "", exitStatus); } @Test public void testTimelineClientInDSAppMaster() throws Exception { ApplicationMaster appMaster = new ApplicationMaster(); appMaster.appSubmitterUgi = UserGroupInformation.createUserForTesting("foo", new String[]{"bar"}); Configuration conf = new YarnConfiguration(); conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true); appMaster.startTimelineClient(conf); Assert.assertEquals(appMaster.appSubmitterUgi, ((TimelineClientImpl)appMaster.timelineClient).getUgi()); } }
7,335
39.530387
80
java