repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/NoCacheFilter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.http;
import javax.servlet.Filter;
import javax.servlet.FilterChain;
import javax.servlet.FilterConfig;
import javax.servlet.ServletException;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import javax.servlet.http.HttpServletResponse;
import java.io.IOException;
public class NoCacheFilter implements Filter {
@Override
public void init(FilterConfig filterConfig) throws ServletException {
}
@Override
public void doFilter(ServletRequest req, ServletResponse res,
FilterChain chain)
throws IOException, ServletException {
HttpServletResponse httpRes = (HttpServletResponse) res;
httpRes.setHeader("Cache-Control", "no-cache");
long now = System.currentTimeMillis();
httpRes.addDateHeader("Expires", now);
httpRes.addDateHeader("Date", now);
httpRes.addHeader("Pragma", "no-cache");
chain.doFilter(req, res);
}
@Override
public void destroy() {
}
}
| 1,790 | 32.792453 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.LimitedPrivate({"HBase", "HDFS", "MapReduce"})
@InterfaceStability.Unstable
package org.apache.hadoop.http;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
| 1,052 | 44.782609 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HtmlQuoting.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.http;
import org.apache.commons.io.Charsets;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.OutputStream;
/**
* This class is responsible for quoting HTML characters.
*/
public class HtmlQuoting {
private static final byte[] ampBytes = "&".getBytes(Charsets.UTF_8);
private static final byte[] aposBytes = "'".getBytes(Charsets.UTF_8);
private static final byte[] gtBytes = ">".getBytes(Charsets.UTF_8);
private static final byte[] ltBytes = "<".getBytes(Charsets.UTF_8);
private static final byte[] quotBytes = """.getBytes(Charsets.UTF_8);
/**
* Does the given string need to be quoted?
* @param data the string to check
* @param off the starting position
* @param len the number of bytes to check
* @return does the string contain any of the active html characters?
*/
public static boolean needsQuoting(byte[] data, int off, int len) {
for(int i=off; i< off+len; ++i) {
switch(data[i]) {
case '&':
case '<':
case '>':
case '\'':
case '"':
return true;
default:
break;
}
}
return false;
}
/**
* Does the given string need to be quoted?
* @param str the string to check
* @return does the string contain any of the active html characters?
*/
public static boolean needsQuoting(String str) {
if (str == null) {
return false;
}
byte[] bytes = str.getBytes(Charsets.UTF_8);
return needsQuoting(bytes, 0 , bytes.length);
}
/**
* Quote all of the active HTML characters in the given string as they
* are added to the buffer.
* @param output the stream to write the output to
* @param buffer the byte array to take the characters from
* @param off the index of the first byte to quote
* @param len the number of bytes to quote
*/
public static void quoteHtmlChars(OutputStream output, byte[] buffer,
int off, int len) throws IOException {
for(int i=off; i < off+len; i++) {
switch (buffer[i]) {
case '&': output.write(ampBytes); break;
case '<': output.write(ltBytes); break;
case '>': output.write(gtBytes); break;
case '\'': output.write(aposBytes); break;
case '"': output.write(quotBytes); break;
default: output.write(buffer, i, 1);
}
}
}
/**
* Quote the given item to make it html-safe.
* @param item the string to quote
* @return the quoted string
*/
public static String quoteHtmlChars(String item) {
if (item == null) {
return null;
}
byte[] bytes = item.getBytes(Charsets.UTF_8);
if (needsQuoting(bytes, 0, bytes.length)) {
ByteArrayOutputStream buffer = new ByteArrayOutputStream();
try {
quoteHtmlChars(buffer, bytes, 0, bytes.length);
return buffer.toString("UTF-8");
} catch (IOException ioe) {
// Won't happen, since it is a bytearrayoutputstream
return null;
}
} else {
return item;
}
}
/**
* Return an output stream that quotes all of the output.
* @param out the stream to write the quoted output to
* @return a new stream that the application show write to
* @throws IOException if the underlying output fails
*/
public static OutputStream quoteOutputStream(final OutputStream out
) throws IOException {
return new OutputStream() {
private byte[] data = new byte[1];
@Override
public void write(byte[] data, int off, int len) throws IOException {
quoteHtmlChars(out, data, off, len);
}
@Override
public void write(int b) throws IOException {
data[0] = (byte) b;
quoteHtmlChars(out, data, 0, 1);
}
@Override
public void flush() throws IOException {
out.flush();
}
@Override
public void close() throws IOException {
out.close();
}
};
}
/**
* Remove HTML quoting from a string.
* @param item the string to unquote
* @return the unquoted string
*/
public static String unquoteHtmlChars(String item) {
if (item == null) {
return null;
}
int next = item.indexOf('&');
// nothing was quoted
if (next == -1) {
return item;
}
int len = item.length();
int posn = 0;
StringBuilder buffer = new StringBuilder();
while (next != -1) {
buffer.append(item.substring(posn, next));
if (item.startsWith("&", next)) {
buffer.append('&');
next += 5;
} else if (item.startsWith("'", next)) {
buffer.append('\'');
next += 6;
} else if (item.startsWith(">", next)) {
buffer.append('>');
next += 4;
} else if (item.startsWith("<", next)) {
buffer.append('<');
next += 4;
} else if (item.startsWith(""", next)) {
buffer.append('"');
next += 6;
} else {
int end = item.indexOf(';', next)+1;
if (end == 0) {
end = len;
}
throw new IllegalArgumentException("Bad HTML quoting for " +
item.substring(next,end));
}
posn = next;
next = item.indexOf('&', posn);
}
buffer.append(item.substring(posn, len));
return buffer.toString();
}
public static void main(String[] args) throws Exception {
for(String arg:args) {
System.out.println("Original: " + arg);
String quoted = quoteHtmlChars(arg);
System.out.println("Quoted: "+ quoted);
String unquoted = unquoteHtmlChars(quoted);
System.out.println("Unquoted: " + unquoted);
System.out.println();
}
}
}
| 6,611 | 30.336493 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/AdminAuthorizedServlet.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.http;
import java.io.IOException;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.mortbay.jetty.servlet.DefaultServlet;
/**
* General servlet which is admin-authorized.
*
*/
public class AdminAuthorizedServlet extends DefaultServlet {
private static final long serialVersionUID = 1L;
@Override
protected void doGet(HttpServletRequest request, HttpServletResponse response)
throws ServletException, IOException {
// Do the authorization
if (HttpServer2.hasAdministratorAccess(getServletContext(), request,
response)) {
// Authorization is done. Just call super.
super.doGet(request, response);
}
}
}
| 1,577 | 32.574468 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.http;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.PrintStream;
import java.net.BindException;
import java.net.InetSocketAddress;
import java.net.URL;
import java.security.GeneralSecurityException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import javax.net.ssl.SSLServerSocketFactory;
import javax.servlet.Filter;
import javax.servlet.FilterChain;
import javax.servlet.FilterConfig;
import javax.servlet.ServletContext;
import javax.servlet.ServletException;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletRequestWrapper;
import javax.servlet.http.HttpServletResponse;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.ConfServlet;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.jmx.JMXJsonServlet;
import org.apache.hadoop.log.LogLevel;
import org.apache.hadoop.metrics.MetricsServlet;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.security.ssl.SSLFactory;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.Shell;
import org.mortbay.io.Buffer;
import org.mortbay.jetty.Connector;
import org.mortbay.jetty.Handler;
import org.mortbay.jetty.MimeTypes;
import org.mortbay.jetty.Server;
import org.mortbay.jetty.handler.ContextHandler;
import org.mortbay.jetty.handler.ContextHandlerCollection;
import org.mortbay.jetty.nio.SelectChannelConnector;
import org.mortbay.jetty.security.SslSocketConnector;
import org.mortbay.jetty.servlet.Context;
import org.mortbay.jetty.servlet.DefaultServlet;
import org.mortbay.jetty.servlet.FilterHolder;
import org.mortbay.jetty.servlet.FilterMapping;
import org.mortbay.jetty.servlet.ServletHandler;
import org.mortbay.jetty.servlet.ServletHolder;
import org.mortbay.jetty.webapp.WebAppContext;
import org.mortbay.thread.QueuedThreadPool;
import org.mortbay.util.MultiException;
import com.sun.jersey.spi.container.servlet.ServletContainer;
/**
* Create a Jetty embedded server to answer http requests. The primary goal
* is to serve up status information for the server.
* There are three contexts:
* "/logs/" -> points to the log directory
* "/static/" -> points to common static files (src/webapps/static)
* "/" -> the jsp server code from (src/webapps/<name>)
*
* This class comes from the HttpServer in branch-2.2. HDFS and YARN have
* moved to use HttpServer2. This class exists for compatibility reasons, it
* stays in hadoop-common to allow HBase working with both Hadoop 2.2
* and Hadoop 2.3. See HBASE-10336 for more details.
*/
@InterfaceAudience.LimitedPrivate({"HBase"})
@InterfaceStability.Evolving
@Deprecated
public class HttpServer implements FilterContainer {
public static final Log LOG = LogFactory.getLog(HttpServer.class);
static final String FILTER_INITIALIZER_PROPERTY
= "hadoop.http.filter.initializers";
static final String HTTP_MAX_THREADS = "hadoop.http.max.threads";
// The ServletContext attribute where the daemon Configuration
// gets stored.
public static final String CONF_CONTEXT_ATTRIBUTE = "hadoop.conf";
public static final String ADMINS_ACL = "admins.acl";
public static final String SPNEGO_FILTER = "SpnegoFilter";
public static final String NO_CACHE_FILTER = "NoCacheFilter";
public static final String BIND_ADDRESS = "bind.address";
private AccessControlList adminsAcl;
private SSLFactory sslFactory;
protected final Server webServer;
protected final Connector listener;
protected final WebAppContext webAppContext;
protected final boolean findPort;
protected final Map<Context, Boolean> defaultContexts =
new HashMap<Context, Boolean>();
protected final List<String> filterNames = new ArrayList<String>();
private static final int MAX_RETRIES = 10;
static final String STATE_DESCRIPTION_ALIVE = " - alive";
static final String STATE_DESCRIPTION_NOT_LIVE = " - not live";
private final boolean listenerStartedExternally;
/** Same as this(name, bindAddress, port, findPort, null); */
public HttpServer(String name, String bindAddress, int port, boolean findPort
) throws IOException {
this(name, bindAddress, port, findPort, new Configuration());
}
public HttpServer(String name, String bindAddress, int port,
boolean findPort, Configuration conf, Connector connector) throws IOException {
this(name, bindAddress, port, findPort, conf, null, connector, null);
}
/**
* Create a status server on the given port. Allows you to specify the
* path specifications that this server will be serving so that they will be
* added to the filters properly.
*
* @param name The name of the server
* @param bindAddress The address for this server
* @param port The port to use on the server
* @param findPort whether the server should start at the given port and
* increment by 1 until it finds a free port.
* @param conf Configuration
* @param pathSpecs Path specifications that this httpserver will be serving.
* These will be added to any filters.
*/
public HttpServer(String name, String bindAddress, int port,
boolean findPort, Configuration conf, String[] pathSpecs) throws IOException {
this(name, bindAddress, port, findPort, conf, null, null, pathSpecs);
}
/**
* Create a status server on the given port.
* The jsp scripts are taken from src/webapps/<name>.
* @param name The name of the server
* @param port The port to use on the server
* @param findPort whether the server should start at the given port and
* increment by 1 until it finds a free port.
* @param conf Configuration
*/
public HttpServer(String name, String bindAddress, int port,
boolean findPort, Configuration conf) throws IOException {
this(name, bindAddress, port, findPort, conf, null, null, null);
}
public HttpServer(String name, String bindAddress, int port,
boolean findPort, Configuration conf, AccessControlList adminsAcl)
throws IOException {
this(name, bindAddress, port, findPort, conf, adminsAcl, null, null);
}
/**
* Create a status server on the given port.
* The jsp scripts are taken from src/webapps/<name>.
* @param name The name of the server
* @param bindAddress The address for this server
* @param port The port to use on the server
* @param findPort whether the server should start at the given port and
* increment by 1 until it finds a free port.
* @param conf Configuration
* @param adminsAcl {@link AccessControlList} of the admins
*/
public HttpServer(String name, String bindAddress, int port,
boolean findPort, Configuration conf, AccessControlList adminsAcl,
Connector connector) throws IOException {
this(name, bindAddress, port, findPort, conf, adminsAcl, connector, null);
}
/**
* Create a status server on the given port.
* The jsp scripts are taken from src/webapps/<name>.
* @param name The name of the server
* @param bindAddress The address for this server
* @param port The port to use on the server
* @param findPort whether the server should start at the given port and
* increment by 1 until it finds a free port.
* @param conf Configuration
* @param adminsAcl {@link AccessControlList} of the admins
* @param connector A jetty connection listener
* @param pathSpecs Path specifications that this httpserver will be serving.
* These will be added to any filters.
*/
public HttpServer(String name, String bindAddress, int port,
boolean findPort, Configuration conf, AccessControlList adminsAcl,
Connector connector, String[] pathSpecs) throws IOException {
webServer = new Server();
this.findPort = findPort;
this.adminsAcl = adminsAcl;
if(connector == null) {
listenerStartedExternally = false;
if (conf.getBoolean(CommonConfigurationKeysPublic.HADOOP_SSL_ENABLED_KEY,
CommonConfigurationKeysPublic.HADOOP_SSL_ENABLED_DEFAULT)) {
sslFactory = new SSLFactory(SSLFactory.Mode.SERVER, conf);
try {
sslFactory.init();
} catch (GeneralSecurityException ex) {
throw new IOException(ex);
}
SslSocketConnector sslListener = new SslSocketConnector() {
@Override
protected SSLServerSocketFactory createFactory() throws Exception {
return sslFactory.createSSLServerSocketFactory();
}
};
listener = sslListener;
} else {
listener = createBaseListener(conf);
}
listener.setHost(bindAddress);
listener.setPort(port);
} else {
listenerStartedExternally = true;
listener = connector;
}
webServer.addConnector(listener);
int maxThreads = conf.getInt(HTTP_MAX_THREADS, -1);
// If HTTP_MAX_THREADS is not configured, QueueThreadPool() will use the
// default value (currently 250).
QueuedThreadPool threadPool = maxThreads == -1 ?
new QueuedThreadPool() : new QueuedThreadPool(maxThreads);
threadPool.setDaemon(true);
webServer.setThreadPool(threadPool);
final String appDir = getWebAppsPath(name);
ContextHandlerCollection contexts = new ContextHandlerCollection();
webServer.setHandler(contexts);
webAppContext = new WebAppContext();
webAppContext.setDisplayName(name);
webAppContext.setContextPath("/");
webAppContext.setWar(appDir + "/" + name);
webAppContext.getServletContext().setAttribute(CONF_CONTEXT_ATTRIBUTE, conf);
webAppContext.getServletContext().setAttribute(ADMINS_ACL, adminsAcl);
addNoCacheFilter(webAppContext);
webServer.addHandler(webAppContext);
addDefaultApps(contexts, appDir, conf);
addGlobalFilter("safety", QuotingInputFilter.class.getName(), null);
final FilterInitializer[] initializers = getFilterInitializers(conf);
if (initializers != null) {
conf = new Configuration(conf);
conf.set(BIND_ADDRESS, bindAddress);
for(FilterInitializer c : initializers) {
c.initFilter(this, conf);
}
}
addDefaultServlets();
if (pathSpecs != null) {
for (String path : pathSpecs) {
LOG.info("adding path spec: " + path);
addFilterPathMapping(path, webAppContext);
}
}
}
@SuppressWarnings("unchecked")
private void addNoCacheFilter(WebAppContext ctxt) {
defineFilter(ctxt, NO_CACHE_FILTER,
NoCacheFilter.class.getName(), Collections.EMPTY_MAP, new String[] { "/*"});
}
/**
* Create a required listener for the Jetty instance listening on the port
* provided. This wrapper and all subclasses must create at least one
* listener.
*/
public Connector createBaseListener(Configuration conf) throws IOException {
return HttpServer.createDefaultChannelConnector();
}
private static class SelectChannelConnectorWithSafeStartup
extends SelectChannelConnector {
public SelectChannelConnectorWithSafeStartup() {
super();
}
/* Override the broken isRunning() method (JETTY-1316). This bug is present
* in 6.1.26. For the versions wihout this bug, it adds insignificant
* overhead.
*/
@Override
public boolean isRunning() {
if (super.isRunning()) {
return true;
}
// We might be hitting JETTY-1316. If the internal state changed from
// STARTING to STARTED in the middle of the check, the above call may
// return false. Check it one more time.
LOG.warn("HttpServer Acceptor: isRunning is false. Rechecking.");
try {
Thread.sleep(10);
} catch (InterruptedException ie) {
// Mark this thread as interrupted. Someone up in the call chain
// might care.
Thread.currentThread().interrupt();
}
boolean runState = super.isRunning();
LOG.warn("HttpServer Acceptor: isRunning is " + runState);
return runState;
}
}
@InterfaceAudience.Private
public static Connector createDefaultChannelConnector() {
SelectChannelConnector ret = new SelectChannelConnectorWithSafeStartup();
ret.setLowResourceMaxIdleTime(10000);
ret.setAcceptQueueSize(128);
ret.setResolveNames(false);
ret.setUseDirectBuffers(false);
if(Shell.WINDOWS) {
// result of setting the SO_REUSEADDR flag is different on Windows
// http://msdn.microsoft.com/en-us/library/ms740621(v=vs.85).aspx
// without this 2 NN's can start on the same machine and listen on
// the same port with indeterminate routing of incoming requests to them
ret.setReuseAddress(false);
}
ret.setHeaderBufferSize(1024*64);
return ret;
}
/** Get an array of FilterConfiguration specified in the conf */
private static FilterInitializer[] getFilterInitializers(Configuration conf) {
if (conf == null) {
return null;
}
Class<?>[] classes = conf.getClasses(FILTER_INITIALIZER_PROPERTY);
if (classes == null) {
return null;
}
FilterInitializer[] initializers = new FilterInitializer[classes.length];
for(int i = 0; i < classes.length; i++) {
initializers[i] = (FilterInitializer)ReflectionUtils.newInstance(
classes[i], conf);
}
return initializers;
}
/**
* Add default apps.
* @param appDir The application directory
* @throws IOException
*/
protected void addDefaultApps(ContextHandlerCollection parent,
final String appDir, Configuration conf) throws IOException {
// set up the context for "/logs/" if "hadoop.log.dir" property is defined.
String logDir = System.getProperty("hadoop.log.dir");
if (logDir != null) {
Context logContext = new Context(parent, "/logs");
logContext.setResourceBase(logDir);
logContext.addServlet(AdminAuthorizedServlet.class, "/*");
if (conf.getBoolean(
CommonConfigurationKeys.HADOOP_JETTY_LOGS_SERVE_ALIASES,
CommonConfigurationKeys.DEFAULT_HADOOP_JETTY_LOGS_SERVE_ALIASES)) {
logContext.getInitParams().put(
"org.mortbay.jetty.servlet.Default.aliases", "true");
}
logContext.setDisplayName("logs");
setContextAttributes(logContext, conf);
addNoCacheFilter(webAppContext);
defaultContexts.put(logContext, true);
}
// set up the context for "/static/*"
Context staticContext = new Context(parent, "/static");
staticContext.setResourceBase(appDir + "/static");
staticContext.addServlet(DefaultServlet.class, "/*");
staticContext.setDisplayName("static");
setContextAttributes(staticContext, conf);
defaultContexts.put(staticContext, true);
}
private void setContextAttributes(Context context, Configuration conf) {
context.getServletContext().setAttribute(CONF_CONTEXT_ATTRIBUTE, conf);
context.getServletContext().setAttribute(ADMINS_ACL, adminsAcl);
}
/**
* Add default servlets.
*/
protected void addDefaultServlets() {
// set up default servlets
addServlet("stacks", "/stacks", StackServlet.class);
addServlet("logLevel", "/logLevel", LogLevel.Servlet.class);
addServlet("metrics", "/metrics", MetricsServlet.class);
addServlet("jmx", "/jmx", JMXJsonServlet.class);
addServlet("conf", "/conf", ConfServlet.class);
}
public void addContext(Context ctxt, boolean isFiltered)
throws IOException {
webServer.addHandler(ctxt);
addNoCacheFilter(webAppContext);
defaultContexts.put(ctxt, isFiltered);
}
/**
* Add a context
* @param pathSpec The path spec for the context
* @param dir The directory containing the context
* @param isFiltered if true, the servlet is added to the filter path mapping
* @throws IOException
*/
protected void addContext(String pathSpec, String dir, boolean isFiltered) throws IOException {
if (0 == webServer.getHandlers().length) {
throw new RuntimeException("Couldn't find handler");
}
WebAppContext webAppCtx = new WebAppContext();
webAppCtx.setContextPath(pathSpec);
webAppCtx.setWar(dir);
addContext(webAppCtx, true);
}
/**
* Set a value in the webapp context. These values are available to the jsp
* pages as "application.getAttribute(name)".
* @param name The name of the attribute
* @param value The value of the attribute
*/
public void setAttribute(String name, Object value) {
webAppContext.setAttribute(name, value);
}
/**
* Add a Jersey resource package.
* @param packageName The Java package name containing the Jersey resource.
* @param pathSpec The path spec for the servlet
*/
public void addJerseyResourcePackage(final String packageName,
final String pathSpec) {
LOG.info("addJerseyResourcePackage: packageName=" + packageName
+ ", pathSpec=" + pathSpec);
final ServletHolder sh = new ServletHolder(ServletContainer.class);
sh.setInitParameter("com.sun.jersey.config.property.resourceConfigClass",
"com.sun.jersey.api.core.PackagesResourceConfig");
sh.setInitParameter("com.sun.jersey.config.property.packages", packageName);
webAppContext.addServlet(sh, pathSpec);
}
/**
* Add a servlet in the server.
* @param name The name of the servlet (can be passed as null)
* @param pathSpec The path spec for the servlet
* @param clazz The servlet class
*/
public void addServlet(String name, String pathSpec,
Class<? extends HttpServlet> clazz) {
addInternalServlet(name, pathSpec, clazz, false);
addFilterPathMapping(pathSpec, webAppContext);
}
/**
* Add an internal servlet in the server.
* Note: This method is to be used for adding servlets that facilitate
* internal communication and not for user facing functionality. For
* servlets added using this method, filters are not enabled.
*
* @param name The name of the servlet (can be passed as null)
* @param pathSpec The path spec for the servlet
* @param clazz The servlet class
*/
public void addInternalServlet(String name, String pathSpec,
Class<? extends HttpServlet> clazz) {
addInternalServlet(name, pathSpec, clazz, false);
}
/**
* Add an internal servlet in the server, specifying whether or not to
* protect with Kerberos authentication.
* Note: This method is to be used for adding servlets that facilitate
* internal communication and not for user facing functionality. For
+ * servlets added using this method, filters (except internal Kerberos
* filters) are not enabled.
*
* @param name The name of the servlet (can be passed as null)
* @param pathSpec The path spec for the servlet
* @param clazz The servlet class
* @param requireAuth Require Kerberos authenticate to access servlet
*/
public void addInternalServlet(String name, String pathSpec,
Class<? extends HttpServlet> clazz, boolean requireAuth) {
ServletHolder holder = new ServletHolder(clazz);
if (name != null) {
holder.setName(name);
}
webAppContext.addServlet(holder, pathSpec);
if(requireAuth && UserGroupInformation.isSecurityEnabled()) {
LOG.info("Adding Kerberos (SPNEGO) filter to " + name);
ServletHandler handler = webAppContext.getServletHandler();
FilterMapping fmap = new FilterMapping();
fmap.setPathSpec(pathSpec);
fmap.setFilterName(SPNEGO_FILTER);
fmap.setDispatches(Handler.ALL);
handler.addFilterMapping(fmap);
}
}
@Override
public void addFilter(String name, String classname,
Map<String, String> parameters) {
final String[] USER_FACING_URLS = { "*.html", "*.jsp" };
defineFilter(webAppContext, name, classname, parameters, USER_FACING_URLS);
LOG.info("Added filter " + name + " (class=" + classname
+ ") to context " + webAppContext.getDisplayName());
final String[] ALL_URLS = { "/*" };
for (Map.Entry<Context, Boolean> e : defaultContexts.entrySet()) {
if (e.getValue()) {
Context ctx = e.getKey();
defineFilter(ctx, name, classname, parameters, ALL_URLS);
LOG.info("Added filter " + name + " (class=" + classname
+ ") to context " + ctx.getDisplayName());
}
}
filterNames.add(name);
}
@Override
public void addGlobalFilter(String name, String classname,
Map<String, String> parameters) {
final String[] ALL_URLS = { "/*" };
defineFilter(webAppContext, name, classname, parameters, ALL_URLS);
for (Context ctx : defaultContexts.keySet()) {
defineFilter(ctx, name, classname, parameters, ALL_URLS);
}
LOG.info("Added global filter '" + name + "' (class=" + classname + ")");
}
/**
* Define a filter for a context and set up default url mappings.
*/
public void defineFilter(Context ctx, String name,
String classname, Map<String,String> parameters, String[] urls) {
FilterHolder holder = new FilterHolder();
holder.setName(name);
holder.setClassName(classname);
holder.setInitParameters(parameters);
FilterMapping fmap = new FilterMapping();
fmap.setPathSpecs(urls);
fmap.setDispatches(Handler.ALL);
fmap.setFilterName(name);
ServletHandler handler = ctx.getServletHandler();
handler.addFilter(holder, fmap);
}
/**
* Add the path spec to the filter path mapping.
* @param pathSpec The path spec
* @param webAppCtx The WebApplicationContext to add to
*/
protected void addFilterPathMapping(String pathSpec,
Context webAppCtx) {
ServletHandler handler = webAppCtx.getServletHandler();
for(String name : filterNames) {
FilterMapping fmap = new FilterMapping();
fmap.setPathSpec(pathSpec);
fmap.setFilterName(name);
fmap.setDispatches(Handler.ALL);
handler.addFilterMapping(fmap);
}
}
/**
* Get the value in the webapp context.
* @param name The name of the attribute
* @return The value of the attribute
*/
public Object getAttribute(String name) {
return webAppContext.getAttribute(name);
}
public WebAppContext getWebAppContext(){
return this.webAppContext;
}
/**
* Get the pathname to the webapps files.
* @param appName eg "secondary" or "datanode"
* @return the pathname as a URL
* @throws FileNotFoundException if 'webapps' directory cannot be found on CLASSPATH.
*/
protected String getWebAppsPath(String appName) throws FileNotFoundException {
URL url = getClass().getClassLoader().getResource("webapps/" + appName);
if (url == null)
throw new FileNotFoundException("webapps/" + appName
+ " not found in CLASSPATH");
String urlString = url.toString();
return urlString.substring(0, urlString.lastIndexOf('/'));
}
/**
* Get the port that the server is on
* @return the port
*/
public int getPort() {
return webServer.getConnectors()[0].getLocalPort();
}
/**
* Set the min, max number of worker threads (simultaneous connections).
*/
public void setThreads(int min, int max) {
QueuedThreadPool pool = (QueuedThreadPool) webServer.getThreadPool() ;
pool.setMinThreads(min);
pool.setMaxThreads(max);
}
/**
* Configure an ssl listener on the server.
* @param addr address to listen on
* @param keystore location of the keystore
* @param storPass password for the keystore
* @param keyPass password for the key
* @deprecated Use {@link #addSslListener(InetSocketAddress, Configuration, boolean)}
*/
@Deprecated
public void addSslListener(InetSocketAddress addr, String keystore,
String storPass, String keyPass) throws IOException {
if (webServer.isStarted()) {
throw new IOException("Failed to add ssl listener");
}
SslSocketConnector sslListener = new SslSocketConnector();
sslListener.setHost(addr.getHostName());
sslListener.setPort(addr.getPort());
sslListener.setKeystore(keystore);
sslListener.setPassword(storPass);
sslListener.setKeyPassword(keyPass);
webServer.addConnector(sslListener);
}
/**
* Configure an ssl listener on the server.
* @param addr address to listen on
* @param sslConf conf to retrieve ssl options
* @param needCertsAuth whether x509 certificate authentication is required
*/
public void addSslListener(InetSocketAddress addr, Configuration sslConf,
boolean needCertsAuth) throws IOException {
if (webServer.isStarted()) {
throw new IOException("Failed to add ssl listener");
}
if (needCertsAuth) {
// setting up SSL truststore for authenticating clients
System.setProperty("javax.net.ssl.trustStore", sslConf.get(
"ssl.server.truststore.location", ""));
System.setProperty("javax.net.ssl.trustStorePassword", sslConf.get(
"ssl.server.truststore.password", ""));
System.setProperty("javax.net.ssl.trustStoreType", sslConf.get(
"ssl.server.truststore.type", "jks"));
}
SslSocketConnector sslListener = new SslSocketConnector();
sslListener.setHost(addr.getHostName());
sslListener.setPort(addr.getPort());
sslListener.setKeystore(sslConf.get("ssl.server.keystore.location"));
sslListener.setPassword(sslConf.get("ssl.server.keystore.password", ""));
sslListener.setKeyPassword(sslConf.get("ssl.server.keystore.keypassword", ""));
sslListener.setKeystoreType(sslConf.get("ssl.server.keystore.type", "jks"));
sslListener.setNeedClientAuth(needCertsAuth);
webServer.addConnector(sslListener);
}
protected void initSpnego(Configuration conf,
String usernameConfKey, String keytabConfKey) throws IOException {
Map<String, String> params = new HashMap<String, String>();
String principalInConf = conf.get(usernameConfKey);
if (principalInConf != null && !principalInConf.isEmpty()) {
params.put("kerberos.principal",
SecurityUtil.getServerPrincipal(principalInConf, listener.getHost()));
}
String httpKeytab = conf.get(keytabConfKey);
if (httpKeytab != null && !httpKeytab.isEmpty()) {
params.put("kerberos.keytab", httpKeytab);
}
params.put(AuthenticationFilter.AUTH_TYPE, "kerberos");
defineFilter(webAppContext, SPNEGO_FILTER,
AuthenticationFilter.class.getName(), params, null);
}
/**
* Start the server. Does not wait for the server to start.
*/
public void start() throws IOException {
try {
try {
openListener();
LOG.info("Jetty bound to port " + listener.getLocalPort());
webServer.start();
} catch (IOException ex) {
LOG.info("HttpServer.start() threw a non Bind IOException", ex);
throw ex;
} catch (MultiException ex) {
LOG.info("HttpServer.start() threw a MultiException", ex);
throw ex;
}
// Make sure there is no handler failures.
Handler[] handlers = webServer.getHandlers();
for (int i = 0; i < handlers.length; i++) {
if (handlers[i].isFailed()) {
throw new IOException(
"Problem in starting http server. Server handlers failed");
}
}
// Make sure there are no errors initializing the context.
Throwable unavailableException = webAppContext.getUnavailableException();
if (unavailableException != null) {
// Have to stop the webserver, or else its non-daemon threads
// will hang forever.
webServer.stop();
throw new IOException("Unable to initialize WebAppContext",
unavailableException);
}
} catch (IOException e) {
throw e;
} catch (Exception e) {
throw new IOException("Problem starting http server", e);
}
}
/**
* Open the main listener for the server
* @throws Exception
*/
void openListener() throws Exception {
if (listener.getLocalPort() != -1) { // it's already bound
return;
}
if (listenerStartedExternally) { // Expect that listener was started securely
throw new Exception("Expected webserver's listener to be started " +
"previously but wasn't");
}
int port = listener.getPort();
while (true) {
// jetty has a bug where you can't reopen a listener that previously
// failed to open w/o issuing a close first, even if the port is changed
try {
listener.close();
listener.open();
break;
} catch (BindException ex) {
if (port == 0 || !findPort) {
BindException be = new BindException(
"Port in use: " + listener.getHost() + ":" + listener.getPort());
be.initCause(ex);
throw be;
}
}
// try the next port number
listener.setPort(++port);
Thread.sleep(100);
}
}
/**
* Return the bind address of the listener.
* @return InetSocketAddress of the listener
*/
public InetSocketAddress getListenerAddress() {
int port = listener.getLocalPort();
if (port == -1) { // not bound, return requested port
port = listener.getPort();
}
return new InetSocketAddress(listener.getHost(), port);
}
/**
* stop the server
*/
public void stop() throws Exception {
MultiException exception = null;
try {
listener.close();
} catch (Exception e) {
LOG.error("Error while stopping listener for webapp"
+ webAppContext.getDisplayName(), e);
exception = addMultiException(exception, e);
}
try {
if (sslFactory != null) {
sslFactory.destroy();
}
} catch (Exception e) {
LOG.error("Error while destroying the SSLFactory"
+ webAppContext.getDisplayName(), e);
exception = addMultiException(exception, e);
}
try {
// clear & stop webAppContext attributes to avoid memory leaks.
webAppContext.clearAttributes();
webAppContext.stop();
} catch (Exception e) {
LOG.error("Error while stopping web app context for webapp "
+ webAppContext.getDisplayName(), e);
exception = addMultiException(exception, e);
}
try {
webServer.stop();
} catch (Exception e) {
LOG.error("Error while stopping web server for webapp "
+ webAppContext.getDisplayName(), e);
exception = addMultiException(exception, e);
}
if (exception != null) {
exception.ifExceptionThrow();
}
}
private MultiException addMultiException(MultiException exception, Exception e) {
if(exception == null){
exception = new MultiException();
}
exception.add(e);
return exception;
}
public void join() throws InterruptedException {
webServer.join();
}
/**
* Test for the availability of the web server
* @return true if the web server is started, false otherwise
*/
public boolean isAlive() {
return webServer != null && webServer.isStarted();
}
/**
* Return the host and port of the HttpServer, if live
* @return the classname and any HTTP URL
*/
@Override
public String toString() {
return listener != null ?
("HttpServer at http://" + listener.getHost() + ":" + listener.getLocalPort() + "/"
+ (isAlive() ? STATE_DESCRIPTION_ALIVE : STATE_DESCRIPTION_NOT_LIVE))
: "Inactive HttpServer";
}
/**
* Checks the user has privileges to access to instrumentation servlets.
* <p/>
* If <code>hadoop.security.instrumentation.requires.admin</code> is set to FALSE
* (default value) it always returns TRUE.
* <p/>
* If <code>hadoop.security.instrumentation.requires.admin</code> is set to TRUE
* it will check that if the current user is in the admin ACLS. If the user is
* in the admin ACLs it returns TRUE, otherwise it returns FALSE.
*
* @param servletContext the servlet context.
* @param request the servlet request.
* @param response the servlet response.
* @return TRUE/FALSE based on the logic decribed above.
*/
public static boolean isInstrumentationAccessAllowed(
ServletContext servletContext, HttpServletRequest request,
HttpServletResponse response) throws IOException {
Configuration conf =
(Configuration) servletContext.getAttribute(CONF_CONTEXT_ATTRIBUTE);
boolean access = true;
boolean adminAccess = conf.getBoolean(
CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN,
false);
if (adminAccess) {
access = hasAdministratorAccess(servletContext, request, response);
}
return access;
}
/**
* Does the user sending the HttpServletRequest has the administrator ACLs? If
* it isn't the case, response will be modified to send an error to the user.
*
* @param servletContext
* @param request
* @param response used to send the error response if user does not have admin access.
* @return true if admin-authorized, false otherwise
* @throws IOException
*/
public static boolean hasAdministratorAccess(
ServletContext servletContext, HttpServletRequest request,
HttpServletResponse response) throws IOException {
Configuration conf =
(Configuration) servletContext.getAttribute(CONF_CONTEXT_ATTRIBUTE);
// If there is no authorization, anybody has administrator access.
if (!conf.getBoolean(
CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
return true;
}
String remoteUser = request.getRemoteUser();
if (remoteUser == null) {
response.sendError(HttpServletResponse.SC_UNAUTHORIZED,
"Unauthenticated users are not " +
"authorized to access this page.");
return false;
}
if (servletContext.getAttribute(ADMINS_ACL) != null &&
!userHasAdministratorAccess(servletContext, remoteUser)) {
response.sendError(HttpServletResponse.SC_UNAUTHORIZED, "User "
+ remoteUser + " is unauthorized to access this page.");
return false;
}
return true;
}
/**
* Get the admin ACLs from the given ServletContext and check if the given
* user is in the ACL.
*
* @param servletContext the context containing the admin ACL.
* @param remoteUser the remote user to check for.
* @return true if the user is present in the ACL, false if no ACL is set or
* the user is not present
*/
public static boolean userHasAdministratorAccess(ServletContext servletContext,
String remoteUser) {
AccessControlList adminsAcl = (AccessControlList) servletContext
.getAttribute(ADMINS_ACL);
UserGroupInformation remoteUserUGI =
UserGroupInformation.createRemoteUser(remoteUser);
return adminsAcl != null && adminsAcl.isUserAllowed(remoteUserUGI);
}
/**
* A very simple servlet to serve up a text representation of the current
* stack traces. It both returns the stacks to the caller and logs them.
* Currently the stack traces are done sequentially rather than exactly the
* same data.
*/
public static class StackServlet extends HttpServlet {
private static final long serialVersionUID = -6284183679759467039L;
@Override
public void doGet(HttpServletRequest request, HttpServletResponse response)
throws ServletException, IOException {
if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(),
request, response)) {
return;
}
response.setContentType("text/plain; charset=UTF-8");
try (PrintStream out = new PrintStream(
response.getOutputStream(), false, "UTF-8")) {
ReflectionUtils.printThreadInfo(out, "");
}
ReflectionUtils.logThreadInfo(LOG, "jsp requested", 1);
}
}
/**
* A Servlet input filter that quotes all HTML active characters in the
* parameter names and values. The goal is to quote the characters to make
* all of the servlets resistant to cross-site scripting attacks.
*/
public static class QuotingInputFilter implements Filter {
private FilterConfig config;
public static class RequestQuoter extends HttpServletRequestWrapper {
private final HttpServletRequest rawRequest;
public RequestQuoter(HttpServletRequest rawRequest) {
super(rawRequest);
this.rawRequest = rawRequest;
}
/**
* Return the set of parameter names, quoting each name.
*/
@SuppressWarnings("unchecked")
@Override
public Enumeration<String> getParameterNames() {
return new Enumeration<String>() {
private Enumeration<String> rawIterator =
rawRequest.getParameterNames();
@Override
public boolean hasMoreElements() {
return rawIterator.hasMoreElements();
}
@Override
public String nextElement() {
return HtmlQuoting.quoteHtmlChars(rawIterator.nextElement());
}
};
}
/**
* Unquote the name and quote the value.
*/
@Override
public String getParameter(String name) {
return HtmlQuoting.quoteHtmlChars(rawRequest.getParameter
(HtmlQuoting.unquoteHtmlChars(name)));
}
@Override
public String[] getParameterValues(String name) {
String unquoteName = HtmlQuoting.unquoteHtmlChars(name);
String[] unquoteValue = rawRequest.getParameterValues(unquoteName);
if (unquoteValue == null) {
return null;
}
String[] result = new String[unquoteValue.length];
for(int i=0; i < result.length; ++i) {
result[i] = HtmlQuoting.quoteHtmlChars(unquoteValue[i]);
}
return result;
}
@SuppressWarnings("unchecked")
@Override
public Map<String, String[]> getParameterMap() {
Map<String, String[]> result = new HashMap<String,String[]>();
Map<String, String[]> raw = rawRequest.getParameterMap();
for (Map.Entry<String,String[]> item: raw.entrySet()) {
String[] rawValue = item.getValue();
String[] cookedValue = new String[rawValue.length];
for(int i=0; i< rawValue.length; ++i) {
cookedValue[i] = HtmlQuoting.quoteHtmlChars(rawValue[i]);
}
result.put(HtmlQuoting.quoteHtmlChars(item.getKey()), cookedValue);
}
return result;
}
/**
* Quote the url so that users specifying the HOST HTTP header
* can't inject attacks.
*/
@Override
public StringBuffer getRequestURL(){
String url = rawRequest.getRequestURL().toString();
return new StringBuffer(HtmlQuoting.quoteHtmlChars(url));
}
/**
* Quote the server name so that users specifying the HOST HTTP header
* can't inject attacks.
*/
@Override
public String getServerName() {
return HtmlQuoting.quoteHtmlChars(rawRequest.getServerName());
}
}
@Override
public void init(FilterConfig config) throws ServletException {
this.config = config;
}
@Override
public void destroy() {
}
@Override
public void doFilter(ServletRequest request,
ServletResponse response,
FilterChain chain
) throws IOException, ServletException {
HttpServletRequestWrapper quoted =
new RequestQuoter((HttpServletRequest) request);
HttpServletResponse httpResponse = (HttpServletResponse) response;
String mime = inferMimeType(request);
if (mime == null) {
httpResponse.setContentType("text/plain; charset=utf-8");
} else if (mime.startsWith("text/html")) {
// HTML with unspecified encoding, we want to
// force HTML with utf-8 encoding
// This is to avoid the following security issue:
// http://openmya.hacker.jp/hasegawa/security/utf7cs.html
httpResponse.setContentType("text/html; charset=utf-8");
} else if (mime.startsWith("application/xml")) {
httpResponse.setContentType("text/xml; charset=utf-8");
}
chain.doFilter(quoted, httpResponse);
}
/**
* Infer the mime type for the response based on the extension of the request
* URI. Returns null if unknown.
*/
private String inferMimeType(ServletRequest request) {
String path = ((HttpServletRequest)request).getRequestURI();
ContextHandler.SContext sContext = (ContextHandler.SContext)config.getServletContext();
MimeTypes mimes = sContext.getContextHandler().getMimeTypes();
Buffer mimeBuffer = mimes.getMimeByExtension(path);
return (mimeBuffer == null) ? null : mimeBuffer.toString();
}
}
}
| 42,273 | 36.082456 | 97 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpConfig.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.http;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Singleton to get access to Http related configuration.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class HttpConfig {
public enum Policy {
HTTP_ONLY,
HTTPS_ONLY,
HTTP_AND_HTTPS;
private static final Policy[] VALUES = values();
public static Policy fromString(String value) {
for (Policy p : VALUES) {
if (p.name().equalsIgnoreCase(value)) {
return p;
}
}
return null;
}
public boolean isHttpEnabled() {
return this == HTTP_ONLY || this == HTTP_AND_HTTPS;
}
public boolean isHttpsEnabled() {
return this == HTTPS_ONLY || this == HTTP_AND_HTTPS;
}
}
}
| 1,643 | 30.018868 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/FilterContainer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.http;
import java.util.Map;
/**
* A container class for javax.servlet.Filter.
*/
public interface FilterContainer {
/**
* Add a filter to the container.
* @param name Filter name
* @param classname Filter class name
* @param parameters a map from parameter names to initial values
*/
void addFilter(String name, String classname, Map<String, String> parameters);
/**
* Add a global filter to the container.
* @param name filter name
* @param classname filter class name
* @param parameters a map from parameter names to initial values
*/
void addGlobalFilter(String name, String classname, Map<String, String> parameters);
}
| 1,501 | 35.634146 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpRequestLog.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.http;
import java.util.HashMap;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogConfigurationException;
import org.apache.commons.logging.LogFactory;
import org.apache.log4j.Appender;
import org.apache.log4j.Logger;
import org.mortbay.jetty.NCSARequestLog;
import org.mortbay.jetty.RequestLog;
/**
* RequestLog object for use with Http
*/
public class HttpRequestLog {
public static final Log LOG = LogFactory.getLog(HttpRequestLog.class);
private static final HashMap<String, String> serverToComponent;
static {
serverToComponent = new HashMap<String, String>();
serverToComponent.put("cluster", "resourcemanager");
serverToComponent.put("hdfs", "namenode");
serverToComponent.put("node", "nodemanager");
}
public static RequestLog getRequestLog(String name) {
String lookup = serverToComponent.get(name);
if (lookup != null) {
name = lookup;
}
String loggerName = "http.requests." + name;
String appenderName = name + "requestlog";
Log logger = LogFactory.getLog(loggerName);
boolean isLog4JLogger;;
try {
isLog4JLogger = logger instanceof Log4JLogger;
} catch (NoClassDefFoundError err) {
// In some dependent projects, log4j may not even be on the classpath at
// runtime, in which case the above instanceof check will throw
// NoClassDefFoundError.
LOG.debug("Could not load Log4JLogger class", err);
isLog4JLogger = false;
}
if (isLog4JLogger) {
Log4JLogger httpLog4JLog = (Log4JLogger)logger;
Logger httpLogger = httpLog4JLog.getLogger();
Appender appender = null;
try {
appender = httpLogger.getAppender(appenderName);
} catch (LogConfigurationException e) {
LOG.warn("Http request log for " + loggerName
+ " could not be created");
throw e;
}
if (appender == null) {
LOG.info("Http request log for " + loggerName
+ " is not defined");
return null;
}
if (appender instanceof HttpRequestLogAppender) {
HttpRequestLogAppender requestLogAppender
= (HttpRequestLogAppender)appender;
NCSARequestLog requestLog = new NCSARequestLog();
requestLog.setFilename(requestLogAppender.getFilename());
requestLog.setRetainDays(requestLogAppender.getRetainDays());
return requestLog;
}
else {
LOG.warn("Jetty request log for " + loggerName
+ " was of the wrong class");
return null;
}
}
else {
LOG.warn("Jetty request log can only be enabled using Log4j");
return null;
}
}
}
| 3,546 | 32.780952 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/FilterInitializer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.http;
import org.apache.hadoop.conf.Configuration;
/**
* Initialize a javax.servlet.Filter.
*/
public abstract class FilterInitializer {
/**
* Initialize a Filter to a FilterContainer.
* @param container The filter container
* @param conf Configuration for run-time parameters
*/
public abstract void initFilter(FilterContainer container, Configuration conf);
}
| 1,215 | 37 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpRequestLogAppender.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.http;
import org.apache.log4j.spi.LoggingEvent;
import org.apache.log4j.AppenderSkeleton;
/**
* Log4j Appender adapter for HttpRequestLog
*/
public class HttpRequestLogAppender extends AppenderSkeleton {
private String filename;
private int retainDays;
public HttpRequestLogAppender() {
}
public void setRetainDays(int retainDays) {
this.retainDays = retainDays;
}
public int getRetainDays() {
return retainDays;
}
public void setFilename(String filename) {
this.filename = filename;
}
public String getFilename() {
return filename;
}
@Override
public void append(LoggingEvent event) {
}
@Override
public void close() {
}
@Override
public boolean requiresLayout() {
return false;
}
}
| 1,592 | 24.285714 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.http;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InterruptedIOException;
import java.io.PrintStream;
import java.net.BindException;
import java.net.InetSocketAddress;
import java.net.URI;
import java.net.URL;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import javax.servlet.Filter;
import javax.servlet.FilterChain;
import javax.servlet.FilterConfig;
import javax.servlet.ServletContext;
import javax.servlet.ServletException;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletRequestWrapper;
import javax.servlet.http.HttpServletResponse;
import com.google.common.collect.ImmutableMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.ConfServlet;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.security.AuthenticationFilterInitializer;
import org.apache.hadoop.security.authentication.util.FileSignerSecretProvider;
import org.apache.hadoop.security.authentication.util.RandomSignerSecretProvider;
import org.apache.hadoop.security.authentication.util.SignerSecretProvider;
import org.apache.hadoop.security.authentication.util.ZKSignerSecretProvider;
import org.apache.hadoop.security.ssl.SslSocketConnectorSecure;
import org.apache.hadoop.jmx.JMXJsonServlet;
import org.apache.hadoop.log.LogLevel;
import org.apache.hadoop.metrics.MetricsServlet;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.Shell;
import org.mortbay.io.Buffer;
import org.mortbay.jetty.Connector;
import org.mortbay.jetty.Handler;
import org.mortbay.jetty.MimeTypes;
import org.mortbay.jetty.RequestLog;
import org.mortbay.jetty.Server;
import org.mortbay.jetty.SessionManager;
import org.mortbay.jetty.handler.ContextHandler;
import org.mortbay.jetty.handler.ContextHandlerCollection;
import org.mortbay.jetty.handler.HandlerCollection;
import org.mortbay.jetty.handler.RequestLogHandler;
import org.mortbay.jetty.nio.SelectChannelConnector;
import org.mortbay.jetty.security.SslSocketConnector;
import org.mortbay.jetty.servlet.AbstractSessionManager;
import org.mortbay.jetty.servlet.Context;
import org.mortbay.jetty.servlet.DefaultServlet;
import org.mortbay.jetty.servlet.FilterHolder;
import org.mortbay.jetty.servlet.FilterMapping;
import org.mortbay.jetty.servlet.ServletHandler;
import org.mortbay.jetty.servlet.ServletHolder;
import org.mortbay.jetty.webapp.WebAppContext;
import org.mortbay.thread.QueuedThreadPool;
import org.mortbay.util.MultiException;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.sun.jersey.spi.container.servlet.ServletContainer;
import static org.apache.hadoop.security.authentication.server
.AuthenticationFilter.*;
/**
* Create a Jetty embedded server to answer http requests. The primary goal is
* to serve up status information for the server. There are three contexts:
* "/logs/" -> points to the log directory "/static/" -> points to common static
* files (src/webapps/static) "/" -> the jsp server code from
* (src/webapps/<name>)
*
* This class is a fork of the old HttpServer. HttpServer exists for
* compatibility reasons. See HBASE-10336 for more details.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public final class HttpServer2 implements FilterContainer {
public static final Log LOG = LogFactory.getLog(HttpServer2.class);
static final String FILTER_INITIALIZER_PROPERTY
= "hadoop.http.filter.initializers";
public static final String HTTP_MAX_THREADS = "hadoop.http.max.threads";
// The ServletContext attribute where the daemon Configuration
// gets stored.
public static final String CONF_CONTEXT_ATTRIBUTE = "hadoop.conf";
public static final String ADMINS_ACL = "admins.acl";
public static final String SPNEGO_FILTER = "SpnegoFilter";
public static final String NO_CACHE_FILTER = "NoCacheFilter";
public static final String BIND_ADDRESS = "bind.address";
private final AccessControlList adminsAcl;
protected final Server webServer;
private final List<Connector> listeners = Lists.newArrayList();
protected final WebAppContext webAppContext;
protected final boolean findPort;
protected final Map<Context, Boolean> defaultContexts =
new HashMap<>();
protected final List<String> filterNames = new ArrayList<>();
static final String STATE_DESCRIPTION_ALIVE = " - alive";
static final String STATE_DESCRIPTION_NOT_LIVE = " - not live";
private final SignerSecretProvider secretProvider;
/**
* Class to construct instances of HTTP server with specific options.
*/
public static class Builder {
private ArrayList<URI> endpoints = Lists.newArrayList();
private String name;
private Configuration conf;
private String[] pathSpecs;
private AccessControlList adminsAcl;
private boolean securityEnabled = false;
private String usernameConfKey;
private String keytabConfKey;
private boolean needsClientAuth;
private String trustStore;
private String trustStorePassword;
private String trustStoreType;
private String keyStore;
private String keyStorePassword;
private String keyStoreType;
// The -keypass option in keytool
private String keyPassword;
private boolean findPort;
private String hostName;
private boolean disallowFallbackToRandomSignerSecretProvider;
private String authFilterConfigurationPrefix = "hadoop.http.authentication.";
public Builder setName(String name){
this.name = name;
return this;
}
/**
* Add an endpoint that the HTTP server should listen to.
*
* @param endpoint
* the endpoint of that the HTTP server should listen to. The
* scheme specifies the protocol (i.e. HTTP / HTTPS), the host
* specifies the binding address, and the port specifies the
* listening port. Unspecified or zero port means that the server
* can listen to any port.
*/
public Builder addEndpoint(URI endpoint) {
endpoints.add(endpoint);
return this;
}
/**
* Set the hostname of the http server. The host name is used to resolve the
* _HOST field in Kerberos principals. The hostname of the first listener
* will be used if the name is unspecified.
*/
public Builder hostName(String hostName) {
this.hostName = hostName;
return this;
}
public Builder trustStore(String location, String password, String type) {
this.trustStore = location;
this.trustStorePassword = password;
this.trustStoreType = type;
return this;
}
public Builder keyStore(String location, String password, String type) {
this.keyStore = location;
this.keyStorePassword = password;
this.keyStoreType = type;
return this;
}
public Builder keyPassword(String password) {
this.keyPassword = password;
return this;
}
/**
* Specify whether the server should authorize the client in SSL
* connections.
*/
public Builder needsClientAuth(boolean value) {
this.needsClientAuth = value;
return this;
}
public Builder setFindPort(boolean findPort) {
this.findPort = findPort;
return this;
}
public Builder setConf(Configuration conf) {
this.conf = conf;
return this;
}
public Builder setPathSpec(String[] pathSpec) {
this.pathSpecs = pathSpec;
return this;
}
public Builder setACL(AccessControlList acl) {
this.adminsAcl = acl;
return this;
}
public Builder setSecurityEnabled(boolean securityEnabled) {
this.securityEnabled = securityEnabled;
return this;
}
public Builder setUsernameConfKey(String usernameConfKey) {
this.usernameConfKey = usernameConfKey;
return this;
}
public Builder setKeytabConfKey(String keytabConfKey) {
this.keytabConfKey = keytabConfKey;
return this;
}
public Builder disallowFallbackToRandomSingerSecretProvider(boolean value) {
this.disallowFallbackToRandomSignerSecretProvider = value;
return this;
}
public Builder authFilterConfigurationPrefix(String value) {
this.authFilterConfigurationPrefix = value;
return this;
}
public HttpServer2 build() throws IOException {
Preconditions.checkNotNull(name, "name is not set");
Preconditions.checkState(!endpoints.isEmpty(), "No endpoints specified");
if (hostName == null) {
hostName = endpoints.get(0).getHost();
}
if (this.conf == null) {
conf = new Configuration();
}
HttpServer2 server = new HttpServer2(this);
if (this.securityEnabled) {
server.initSpnego(conf, hostName, usernameConfKey, keytabConfKey);
}
for (URI ep : endpoints) {
final Connector listener;
String scheme = ep.getScheme();
if ("http".equals(scheme)) {
listener = HttpServer2.createDefaultChannelConnector();
} else if ("https".equals(scheme)) {
SslSocketConnector c = new SslSocketConnectorSecure();
c.setHeaderBufferSize(1024*64);
c.setNeedClientAuth(needsClientAuth);
c.setKeyPassword(keyPassword);
if (keyStore != null) {
c.setKeystore(keyStore);
c.setKeystoreType(keyStoreType);
c.setPassword(keyStorePassword);
}
if (trustStore != null) {
c.setTruststore(trustStore);
c.setTruststoreType(trustStoreType);
c.setTrustPassword(trustStorePassword);
}
listener = c;
} else {
throw new HadoopIllegalArgumentException(
"unknown scheme for endpoint:" + ep);
}
listener.setHost(ep.getHost());
listener.setPort(ep.getPort() == -1 ? 0 : ep.getPort());
server.addListener(listener);
}
server.loadListeners();
return server;
}
}
private HttpServer2(final Builder b) throws IOException {
final String appDir = getWebAppsPath(b.name);
this.webServer = new Server();
this.adminsAcl = b.adminsAcl;
this.webAppContext = createWebAppContext(b.name, b.conf, adminsAcl, appDir);
try {
this.secretProvider =
constructSecretProvider(b, webAppContext.getServletContext());
this.webAppContext.getServletContext().setAttribute
(AuthenticationFilter.SIGNER_SECRET_PROVIDER_ATTRIBUTE,
secretProvider);
} catch(IOException e) {
throw e;
} catch (Exception e) {
throw new IOException(e);
}
this.findPort = b.findPort;
initializeWebServer(b.name, b.hostName, b.conf, b.pathSpecs);
}
private void initializeWebServer(String name, String hostName,
Configuration conf, String[] pathSpecs)
throws IOException {
Preconditions.checkNotNull(webAppContext);
int maxThreads = conf.getInt(HTTP_MAX_THREADS, -1);
// If HTTP_MAX_THREADS is not configured, QueueThreadPool() will use the
// default value (currently 250).
QueuedThreadPool threadPool = maxThreads == -1 ? new QueuedThreadPool()
: new QueuedThreadPool(maxThreads);
threadPool.setDaemon(true);
webServer.setThreadPool(threadPool);
SessionManager sm = webAppContext.getSessionHandler().getSessionManager();
if (sm instanceof AbstractSessionManager) {
AbstractSessionManager asm = (AbstractSessionManager)sm;
asm.setHttpOnly(true);
asm.setSecureCookies(true);
}
ContextHandlerCollection contexts = new ContextHandlerCollection();
RequestLog requestLog = HttpRequestLog.getRequestLog(name);
if (requestLog != null) {
RequestLogHandler requestLogHandler = new RequestLogHandler();
requestLogHandler.setRequestLog(requestLog);
HandlerCollection handlers = new HandlerCollection();
handlers.setHandlers(new Handler[] {contexts, requestLogHandler});
webServer.setHandler(handlers);
} else {
webServer.setHandler(contexts);
}
final String appDir = getWebAppsPath(name);
webServer.addHandler(webAppContext);
addDefaultApps(contexts, appDir, conf);
addGlobalFilter("safety", QuotingInputFilter.class.getName(), null);
final FilterInitializer[] initializers = getFilterInitializers(conf);
if (initializers != null) {
conf = new Configuration(conf);
conf.set(BIND_ADDRESS, hostName);
for (FilterInitializer c : initializers) {
c.initFilter(this, conf);
}
}
addDefaultServlets();
if (pathSpecs != null) {
for (String path : pathSpecs) {
LOG.info("adding path spec: " + path);
addFilterPathMapping(path, webAppContext);
}
}
}
private void addListener(Connector connector) {
listeners.add(connector);
}
private static WebAppContext createWebAppContext(String name,
Configuration conf, AccessControlList adminsAcl, final String appDir) {
WebAppContext ctx = new WebAppContext();
ctx.setDefaultsDescriptor(null);
ServletHolder holder = new ServletHolder(new DefaultServlet());
Map<String, String> params = ImmutableMap. <String, String> builder()
.put("acceptRanges", "true")
.put("dirAllowed", "false")
.put("gzip", "true")
.put("useFileMappedBuffer", "true")
.build();
holder.setInitParameters(params);
ctx.setWelcomeFiles(new String[] {"index.html"});
ctx.addServlet(holder, "/");
ctx.setDisplayName(name);
ctx.setContextPath("/");
ctx.setWar(appDir + "/" + name);
ctx.getServletContext().setAttribute(CONF_CONTEXT_ATTRIBUTE, conf);
ctx.getServletContext().setAttribute(ADMINS_ACL, adminsAcl);
addNoCacheFilter(ctx);
return ctx;
}
private static SignerSecretProvider constructSecretProvider(final Builder b,
ServletContext ctx)
throws Exception {
final Configuration conf = b.conf;
Properties config = getFilterProperties(conf,
b.authFilterConfigurationPrefix);
return AuthenticationFilter.constructSecretProvider(
ctx, config, b.disallowFallbackToRandomSignerSecretProvider);
}
private static Properties getFilterProperties(Configuration conf, String
prefix) {
Properties prop = new Properties();
Map<String, String> filterConfig = AuthenticationFilterInitializer
.getFilterConfigMap(conf, prefix);
prop.putAll(filterConfig);
return prop;
}
private static void addNoCacheFilter(WebAppContext ctxt) {
defineFilter(ctxt, NO_CACHE_FILTER, NoCacheFilter.class.getName(),
Collections.<String, String> emptyMap(), new String[] { "/*" });
}
private static class SelectChannelConnectorWithSafeStartup
extends SelectChannelConnector {
public SelectChannelConnectorWithSafeStartup() {
super();
}
/* Override the broken isRunning() method (JETTY-1316). This bug is present
* in 6.1.26. For the versions wihout this bug, it adds insignificant
* overhead.
*/
@Override
public boolean isRunning() {
if (super.isRunning()) {
return true;
}
// We might be hitting JETTY-1316. If the internal state changed from
// STARTING to STARTED in the middle of the check, the above call may
// return false. Check it one more time.
LOG.warn("HttpServer Acceptor: isRunning is false. Rechecking.");
try {
Thread.sleep(10);
} catch (InterruptedException ie) {
// Mark this thread as interrupted. Someone up in the call chain
// might care.
Thread.currentThread().interrupt();
}
boolean runState = super.isRunning();
LOG.warn("HttpServer Acceptor: isRunning is " + runState);
return runState;
}
}
@InterfaceAudience.Private
public static Connector createDefaultChannelConnector() {
SelectChannelConnector ret = new SelectChannelConnectorWithSafeStartup();
ret.setLowResourceMaxIdleTime(10000);
ret.setAcceptQueueSize(128);
ret.setResolveNames(false);
ret.setUseDirectBuffers(false);
if(Shell.WINDOWS) {
// result of setting the SO_REUSEADDR flag is different on Windows
// http://msdn.microsoft.com/en-us/library/ms740621(v=vs.85).aspx
// without this 2 NN's can start on the same machine and listen on
// the same port with indeterminate routing of incoming requests to them
ret.setReuseAddress(false);
}
ret.setHeaderBufferSize(1024*64);
return ret;
}
/** Get an array of FilterConfiguration specified in the conf */
private static FilterInitializer[] getFilterInitializers(Configuration conf) {
if (conf == null) {
return null;
}
Class<?>[] classes = conf.getClasses(FILTER_INITIALIZER_PROPERTY);
if (classes == null) {
return null;
}
FilterInitializer[] initializers = new FilterInitializer[classes.length];
for(int i = 0; i < classes.length; i++) {
initializers[i] = (FilterInitializer)ReflectionUtils.newInstance(
classes[i], conf);
}
return initializers;
}
/**
* Add default apps.
* @param appDir The application directory
* @throws IOException
*/
protected void addDefaultApps(ContextHandlerCollection parent,
final String appDir, Configuration conf) throws IOException {
// set up the context for "/logs/" if "hadoop.log.dir" property is defined.
String logDir = System.getProperty("hadoop.log.dir");
if (logDir != null) {
Context logContext = new Context(parent, "/logs");
logContext.setResourceBase(logDir);
logContext.addServlet(AdminAuthorizedServlet.class, "/*");
if (conf.getBoolean(
CommonConfigurationKeys.HADOOP_JETTY_LOGS_SERVE_ALIASES,
CommonConfigurationKeys.DEFAULT_HADOOP_JETTY_LOGS_SERVE_ALIASES)) {
@SuppressWarnings("unchecked")
Map<String, String> params = logContext.getInitParams();
params.put(
"org.mortbay.jetty.servlet.Default.aliases", "true");
}
logContext.setDisplayName("logs");
setContextAttributes(logContext, conf);
addNoCacheFilter(webAppContext);
defaultContexts.put(logContext, true);
}
// set up the context for "/static/*"
Context staticContext = new Context(parent, "/static");
staticContext.setResourceBase(appDir + "/static");
staticContext.addServlet(DefaultServlet.class, "/*");
staticContext.setDisplayName("static");
setContextAttributes(staticContext, conf);
defaultContexts.put(staticContext, true);
}
private void setContextAttributes(Context context, Configuration conf) {
context.getServletContext().setAttribute(CONF_CONTEXT_ATTRIBUTE, conf);
context.getServletContext().setAttribute(ADMINS_ACL, adminsAcl);
}
/**
* Add default servlets.
*/
protected void addDefaultServlets() {
// set up default servlets
addServlet("stacks", "/stacks", StackServlet.class);
addServlet("logLevel", "/logLevel", LogLevel.Servlet.class);
addServlet("metrics", "/metrics", MetricsServlet.class);
addServlet("jmx", "/jmx", JMXJsonServlet.class);
addServlet("conf", "/conf", ConfServlet.class);
}
public void addContext(Context ctxt, boolean isFiltered) {
webServer.addHandler(ctxt);
addNoCacheFilter(webAppContext);
defaultContexts.put(ctxt, isFiltered);
}
/**
* Set a value in the webapp context. These values are available to the jsp
* pages as "application.getAttribute(name)".
* @param name The name of the attribute
* @param value The value of the attribute
*/
public void setAttribute(String name, Object value) {
webAppContext.setAttribute(name, value);
}
/**
* Add a Jersey resource package.
* @param packageName The Java package name containing the Jersey resource.
* @param pathSpec The path spec for the servlet
*/
public void addJerseyResourcePackage(final String packageName,
final String pathSpec) {
LOG.info("addJerseyResourcePackage: packageName=" + packageName
+ ", pathSpec=" + pathSpec);
final ServletHolder sh = new ServletHolder(ServletContainer.class);
sh.setInitParameter("com.sun.jersey.config.property.resourceConfigClass",
"com.sun.jersey.api.core.PackagesResourceConfig");
sh.setInitParameter("com.sun.jersey.config.property.packages", packageName);
webAppContext.addServlet(sh, pathSpec);
}
/**
* Add a servlet in the server.
* @param name The name of the servlet (can be passed as null)
* @param pathSpec The path spec for the servlet
* @param clazz The servlet class
*/
public void addServlet(String name, String pathSpec,
Class<? extends HttpServlet> clazz) {
addInternalServlet(name, pathSpec, clazz, false);
addFilterPathMapping(pathSpec, webAppContext);
}
/**
* Add an internal servlet in the server.
* Note: This method is to be used for adding servlets that facilitate
* internal communication and not for user facing functionality. For
* servlets added using this method, filters are not enabled.
*
* @param name The name of the servlet (can be passed as null)
* @param pathSpec The path spec for the servlet
* @param clazz The servlet class
*/
public void addInternalServlet(String name, String pathSpec,
Class<? extends HttpServlet> clazz) {
addInternalServlet(name, pathSpec, clazz, false);
}
/**
* Add an internal servlet in the server, specifying whether or not to
* protect with Kerberos authentication.
* Note: This method is to be used for adding servlets that facilitate
* internal communication and not for user facing functionality. For
+ * servlets added using this method, filters (except internal Kerberos
* filters) are not enabled.
*
* @param name The name of the servlet (can be passed as null)
* @param pathSpec The path spec for the servlet
* @param clazz The servlet class
* @param requireAuth Require Kerberos authenticate to access servlet
*/
public void addInternalServlet(String name, String pathSpec,
Class<? extends HttpServlet> clazz, boolean requireAuth) {
ServletHolder holder = new ServletHolder(clazz);
if (name != null) {
holder.setName(name);
}
webAppContext.addServlet(holder, pathSpec);
if(requireAuth && UserGroupInformation.isSecurityEnabled()) {
LOG.info("Adding Kerberos (SPNEGO) filter to " + name);
ServletHandler handler = webAppContext.getServletHandler();
FilterMapping fmap = new FilterMapping();
fmap.setPathSpec(pathSpec);
fmap.setFilterName(SPNEGO_FILTER);
fmap.setDispatches(Handler.ALL);
handler.addFilterMapping(fmap);
}
}
@Override
public void addFilter(String name, String classname,
Map<String, String> parameters) {
FilterHolder filterHolder = getFilterHolder(name, classname, parameters);
final String[] USER_FACING_URLS = { "*.html", "*.jsp" };
FilterMapping fmap = getFilterMapping(name, USER_FACING_URLS);
defineFilter(webAppContext, filterHolder, fmap);
LOG.info(
"Added filter " + name + " (class=" + classname + ") to context " + webAppContext.getDisplayName());
final String[] ALL_URLS = { "/*" };
fmap = getFilterMapping(name, ALL_URLS);
for (Map.Entry<Context, Boolean> e : defaultContexts.entrySet()) {
if (e.getValue()) {
Context ctx = e.getKey();
defineFilter(ctx, filterHolder, fmap);
LOG.info("Added filter " + name + " (class=" + classname
+ ") to context " + ctx.getDisplayName());
}
}
filterNames.add(name);
}
@Override
public void addGlobalFilter(String name, String classname,
Map<String, String> parameters) {
final String[] ALL_URLS = { "/*" };
FilterHolder filterHolder = getFilterHolder(name, classname, parameters);
FilterMapping fmap = getFilterMapping(name, ALL_URLS);
defineFilter(webAppContext, filterHolder, fmap);
for (Context ctx : defaultContexts.keySet()) {
defineFilter(ctx, filterHolder, fmap);
}
LOG.info("Added global filter '" + name + "' (class=" + classname + ")");
}
/**
* Define a filter for a context and set up default url mappings.
*/
public static void defineFilter(Context ctx, String name,
String classname, Map<String,String> parameters, String[] urls) {
FilterHolder filterHolder = getFilterHolder(name, classname, parameters);
FilterMapping fmap = getFilterMapping(name, urls);
defineFilter(ctx, filterHolder, fmap);
}
/**
* Define a filter for a context and set up default url mappings.
*/
private static void defineFilter(Context ctx, FilterHolder holder,
FilterMapping fmap) {
ServletHandler handler = ctx.getServletHandler();
handler.addFilter(holder, fmap);
}
private static FilterMapping getFilterMapping(String name, String[] urls) {
FilterMapping fmap = new FilterMapping();
fmap.setPathSpecs(urls);
fmap.setDispatches(Handler.ALL);
fmap.setFilterName(name);
return fmap;
}
private static FilterHolder getFilterHolder(String name, String classname,
Map<String, String> parameters) {
FilterHolder holder = new FilterHolder();
holder.setName(name);
holder.setClassName(classname);
holder.setInitParameters(parameters);
return holder;
}
/**
* Add the path spec to the filter path mapping.
* @param pathSpec The path spec
* @param webAppCtx The WebApplicationContext to add to
*/
protected void addFilterPathMapping(String pathSpec,
Context webAppCtx) {
ServletHandler handler = webAppCtx.getServletHandler();
for(String name : filterNames) {
FilterMapping fmap = new FilterMapping();
fmap.setPathSpec(pathSpec);
fmap.setFilterName(name);
fmap.setDispatches(Handler.ALL);
handler.addFilterMapping(fmap);
}
}
/**
* Get the value in the webapp context.
* @param name The name of the attribute
* @return The value of the attribute
*/
public Object getAttribute(String name) {
return webAppContext.getAttribute(name);
}
public WebAppContext getWebAppContext(){
return this.webAppContext;
}
/**
* Get the pathname to the webapps files.
* @param appName eg "secondary" or "datanode"
* @return the pathname as a URL
* @throws FileNotFoundException if 'webapps' directory cannot be found on CLASSPATH.
*/
protected String getWebAppsPath(String appName) throws FileNotFoundException {
URL url = getClass().getClassLoader().getResource("webapps/" + appName);
if (url == null)
throw new FileNotFoundException("webapps/" + appName
+ " not found in CLASSPATH");
String urlString = url.toString();
return urlString.substring(0, urlString.lastIndexOf('/'));
}
/**
* Get the port that the server is on
* @return the port
*/
@Deprecated
public int getPort() {
return webServer.getConnectors()[0].getLocalPort();
}
/**
* Get the address that corresponds to a particular connector.
*
* @return the corresponding address for the connector, or null if there's no
* such connector or the connector is not bounded.
*/
public InetSocketAddress getConnectorAddress(int index) {
Preconditions.checkArgument(index >= 0);
if (index > webServer.getConnectors().length)
return null;
Connector c = webServer.getConnectors()[index];
if (c.getLocalPort() == -1) {
// The connector is not bounded
return null;
}
return new InetSocketAddress(c.getHost(), c.getLocalPort());
}
/**
* Set the min, max number of worker threads (simultaneous connections).
*/
public void setThreads(int min, int max) {
QueuedThreadPool pool = (QueuedThreadPool) webServer.getThreadPool();
pool.setMinThreads(min);
pool.setMaxThreads(max);
}
private void initSpnego(Configuration conf, String hostName,
String usernameConfKey, String keytabConfKey) throws IOException {
Map<String, String> params = new HashMap<>();
String principalInConf = conf.get(usernameConfKey);
if (principalInConf != null && !principalInConf.isEmpty()) {
params.put("kerberos.principal", SecurityUtil.getServerPrincipal(
principalInConf, hostName));
}
String httpKeytab = conf.get(keytabConfKey);
if (httpKeytab != null && !httpKeytab.isEmpty()) {
params.put("kerberos.keytab", httpKeytab);
}
params.put(AuthenticationFilter.AUTH_TYPE, "kerberos");
defineFilter(webAppContext, SPNEGO_FILTER,
AuthenticationFilter.class.getName(), params, null);
}
/**
* Start the server. Does not wait for the server to start.
*/
public void start() throws IOException {
try {
try {
openListeners();
webServer.start();
} catch (IOException ex) {
LOG.info("HttpServer.start() threw a non Bind IOException", ex);
throw ex;
} catch (MultiException ex) {
LOG.info("HttpServer.start() threw a MultiException", ex);
throw ex;
}
// Make sure there is no handler failures.
Handler[] handlers = webServer.getHandlers();
for (Handler handler : handlers) {
if (handler.isFailed()) {
throw new IOException(
"Problem in starting http server. Server handlers failed");
}
}
// Make sure there are no errors initializing the context.
Throwable unavailableException = webAppContext.getUnavailableException();
if (unavailableException != null) {
// Have to stop the webserver, or else its non-daemon threads
// will hang forever.
webServer.stop();
throw new IOException("Unable to initialize WebAppContext",
unavailableException);
}
} catch (IOException e) {
throw e;
} catch (InterruptedException e) {
throw (IOException) new InterruptedIOException(
"Interrupted while starting HTTP server").initCause(e);
} catch (Exception e) {
throw new IOException("Problem starting http server", e);
}
}
private void loadListeners() {
for (Connector c : listeners) {
webServer.addConnector(c);
}
}
/**
* Open the main listener for the server
* @throws Exception
*/
void openListeners() throws Exception {
for (Connector listener : listeners) {
if (listener.getLocalPort() != -1) {
// This listener is either started externally or has been bound
continue;
}
int port = listener.getPort();
while (true) {
// jetty has a bug where you can't reopen a listener that previously
// failed to open w/o issuing a close first, even if the port is changed
try {
listener.close();
listener.open();
LOG.info("Jetty bound to port " + listener.getLocalPort());
break;
} catch (BindException ex) {
if (port == 0 || !findPort) {
BindException be = new BindException("Port in use: "
+ listener.getHost() + ":" + listener.getPort());
be.initCause(ex);
throw be;
}
}
// try the next port number
listener.setPort(++port);
Thread.sleep(100);
}
}
}
/**
* stop the server
*/
public void stop() throws Exception {
MultiException exception = null;
for (Connector c : listeners) {
try {
c.close();
} catch (Exception e) {
LOG.error(
"Error while stopping listener for webapp"
+ webAppContext.getDisplayName(), e);
exception = addMultiException(exception, e);
}
}
try {
// explicitly destroy the secrete provider
secretProvider.destroy();
// clear & stop webAppContext attributes to avoid memory leaks.
webAppContext.clearAttributes();
webAppContext.stop();
} catch (Exception e) {
LOG.error("Error while stopping web app context for webapp "
+ webAppContext.getDisplayName(), e);
exception = addMultiException(exception, e);
}
try {
webServer.stop();
} catch (Exception e) {
LOG.error("Error while stopping web server for webapp "
+ webAppContext.getDisplayName(), e);
exception = addMultiException(exception, e);
}
if (exception != null) {
exception.ifExceptionThrow();
}
}
private MultiException addMultiException(MultiException exception, Exception e) {
if(exception == null){
exception = new MultiException();
}
exception.add(e);
return exception;
}
public void join() throws InterruptedException {
webServer.join();
}
/**
* Test for the availability of the web server
* @return true if the web server is started, false otherwise
*/
public boolean isAlive() {
return webServer != null && webServer.isStarted();
}
@Override
public String toString() {
Preconditions.checkState(!listeners.isEmpty());
StringBuilder sb = new StringBuilder("HttpServer (")
.append(isAlive() ? STATE_DESCRIPTION_ALIVE
: STATE_DESCRIPTION_NOT_LIVE)
.append("), listening at:");
for (Connector l : listeners) {
sb.append(l.getHost()).append(":").append(l.getPort()).append("/,");
}
return sb.toString();
}
/**
* Checks the user has privileges to access to instrumentation servlets.
* <p/>
* If <code>hadoop.security.instrumentation.requires.admin</code> is set to FALSE
* (default value) it always returns TRUE.
* <p/>
* If <code>hadoop.security.instrumentation.requires.admin</code> is set to TRUE
* it will check that if the current user is in the admin ACLS. If the user is
* in the admin ACLs it returns TRUE, otherwise it returns FALSE.
*
* @param servletContext the servlet context.
* @param request the servlet request.
* @param response the servlet response.
* @return TRUE/FALSE based on the logic decribed above.
*/
public static boolean isInstrumentationAccessAllowed(
ServletContext servletContext, HttpServletRequest request,
HttpServletResponse response) throws IOException {
Configuration conf =
(Configuration) servletContext.getAttribute(CONF_CONTEXT_ATTRIBUTE);
boolean access = true;
boolean adminAccess = conf.getBoolean(
CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN,
false);
if (adminAccess) {
access = hasAdministratorAccess(servletContext, request, response);
}
return access;
}
/**
* Does the user sending the HttpServletRequest has the administrator ACLs? If
* it isn't the case, response will be modified to send an error to the user.
*
* @param response used to send the error response if user does not have admin access.
* @return true if admin-authorized, false otherwise
* @throws IOException
*/
public static boolean hasAdministratorAccess(
ServletContext servletContext, HttpServletRequest request,
HttpServletResponse response) throws IOException {
Configuration conf =
(Configuration) servletContext.getAttribute(CONF_CONTEXT_ATTRIBUTE);
// If there is no authorization, anybody has administrator access.
if (!conf.getBoolean(
CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
return true;
}
String remoteUser = request.getRemoteUser();
if (remoteUser == null) {
response.sendError(HttpServletResponse.SC_FORBIDDEN,
"Unauthenticated users are not " +
"authorized to access this page.");
return false;
}
if (servletContext.getAttribute(ADMINS_ACL) != null &&
!userHasAdministratorAccess(servletContext, remoteUser)) {
response.sendError(HttpServletResponse.SC_FORBIDDEN, "User "
+ remoteUser + " is unauthorized to access this page.");
return false;
}
return true;
}
/**
* Get the admin ACLs from the given ServletContext and check if the given
* user is in the ACL.
*
* @param servletContext the context containing the admin ACL.
* @param remoteUser the remote user to check for.
* @return true if the user is present in the ACL, false if no ACL is set or
* the user is not present
*/
public static boolean userHasAdministratorAccess(ServletContext servletContext,
String remoteUser) {
AccessControlList adminsAcl = (AccessControlList) servletContext
.getAttribute(ADMINS_ACL);
UserGroupInformation remoteUserUGI =
UserGroupInformation.createRemoteUser(remoteUser);
return adminsAcl != null && adminsAcl.isUserAllowed(remoteUserUGI);
}
/**
* A very simple servlet to serve up a text representation of the current
* stack traces. It both returns the stacks to the caller and logs them.
* Currently the stack traces are done sequentially rather than exactly the
* same data.
*/
public static class StackServlet extends HttpServlet {
private static final long serialVersionUID = -6284183679759467039L;
@Override
public void doGet(HttpServletRequest request, HttpServletResponse response)
throws ServletException, IOException {
if (!HttpServer2.isInstrumentationAccessAllowed(getServletContext(),
request, response)) {
return;
}
response.setContentType("text/plain; charset=UTF-8");
try (PrintStream out = new PrintStream(
response.getOutputStream(), false, "UTF-8")) {
ReflectionUtils.printThreadInfo(out, "");
}
ReflectionUtils.logThreadInfo(LOG, "jsp requested", 1);
}
}
/**
* A Servlet input filter that quotes all HTML active characters in the
* parameter names and values. The goal is to quote the characters to make
* all of the servlets resistant to cross-site scripting attacks.
*/
public static class QuotingInputFilter implements Filter {
private FilterConfig config;
public static class RequestQuoter extends HttpServletRequestWrapper {
private final HttpServletRequest rawRequest;
public RequestQuoter(HttpServletRequest rawRequest) {
super(rawRequest);
this.rawRequest = rawRequest;
}
/**
* Return the set of parameter names, quoting each name.
*/
@SuppressWarnings("unchecked")
@Override
public Enumeration<String> getParameterNames() {
return new Enumeration<String>() {
private Enumeration<String> rawIterator =
rawRequest.getParameterNames();
@Override
public boolean hasMoreElements() {
return rawIterator.hasMoreElements();
}
@Override
public String nextElement() {
return HtmlQuoting.quoteHtmlChars(rawIterator.nextElement());
}
};
}
/**
* Unquote the name and quote the value.
*/
@Override
public String getParameter(String name) {
return HtmlQuoting.quoteHtmlChars(rawRequest.getParameter
(HtmlQuoting.unquoteHtmlChars(name)));
}
@Override
public String[] getParameterValues(String name) {
String unquoteName = HtmlQuoting.unquoteHtmlChars(name);
String[] unquoteValue = rawRequest.getParameterValues(unquoteName);
if (unquoteValue == null) {
return null;
}
String[] result = new String[unquoteValue.length];
for(int i=0; i < result.length; ++i) {
result[i] = HtmlQuoting.quoteHtmlChars(unquoteValue[i]);
}
return result;
}
@SuppressWarnings("unchecked")
@Override
public Map<String, String[]> getParameterMap() {
Map<String, String[]> result = new HashMap<>();
Map<String, String[]> raw = rawRequest.getParameterMap();
for (Map.Entry<String,String[]> item: raw.entrySet()) {
String[] rawValue = item.getValue();
String[] cookedValue = new String[rawValue.length];
for(int i=0; i< rawValue.length; ++i) {
cookedValue[i] = HtmlQuoting.quoteHtmlChars(rawValue[i]);
}
result.put(HtmlQuoting.quoteHtmlChars(item.getKey()), cookedValue);
}
return result;
}
/**
* Quote the url so that users specifying the HOST HTTP header
* can't inject attacks.
*/
@Override
public StringBuffer getRequestURL(){
String url = rawRequest.getRequestURL().toString();
return new StringBuffer(HtmlQuoting.quoteHtmlChars(url));
}
/**
* Quote the server name so that users specifying the HOST HTTP header
* can't inject attacks.
*/
@Override
public String getServerName() {
return HtmlQuoting.quoteHtmlChars(rawRequest.getServerName());
}
}
@Override
public void init(FilterConfig config) throws ServletException {
this.config = config;
}
@Override
public void destroy() {
}
@Override
public void doFilter(ServletRequest request,
ServletResponse response,
FilterChain chain
) throws IOException, ServletException {
HttpServletRequestWrapper quoted =
new RequestQuoter((HttpServletRequest) request);
HttpServletResponse httpResponse = (HttpServletResponse) response;
String mime = inferMimeType(request);
if (mime == null) {
httpResponse.setContentType("text/plain; charset=utf-8");
} else if (mime.startsWith("text/html")) {
// HTML with unspecified encoding, we want to
// force HTML with utf-8 encoding
// This is to avoid the following security issue:
// http://openmya.hacker.jp/hasegawa/security/utf7cs.html
httpResponse.setContentType("text/html; charset=utf-8");
} else if (mime.startsWith("application/xml")) {
httpResponse.setContentType("text/xml; charset=utf-8");
}
chain.doFilter(quoted, httpResponse);
}
/**
* Infer the mime type for the response based on the extension of the request
* URI. Returns null if unknown.
*/
private String inferMimeType(ServletRequest request) {
String path = ((HttpServletRequest)request).getRequestURI();
ContextHandler.SContext sContext = (ContextHandler.SContext)config.getServletContext();
MimeTypes mimes = sContext.getContextHandler().getMimeTypes();
Buffer mimeBuffer = mimes.getMimeByExtension(path);
return (mimeBuffer == null) ? null : mimeBuffer.toString();
}
}
}
| 44,470 | 34.294444 | 108 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/lib/StaticUserWebFilter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.http.lib;
import java.io.IOException;
import java.security.Principal;
import java.util.HashMap;
import javax.servlet.FilterChain;
import javax.servlet.FilterConfig;
import javax.servlet.ServletException;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletRequestWrapper;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.http.FilterContainer;
import org.apache.hadoop.http.FilterInitializer;
import javax.servlet.Filter;
import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_HTTP_STATIC_USER;
import static org.apache.hadoop.fs.CommonConfigurationKeys.DEFAULT_HADOOP_HTTP_STATIC_USER;
/**
* Provides a servlet filter that pretends to authenticate a fake user (Dr.Who)
* so that the web UI is usable for a secure cluster without authentication.
*/
public class StaticUserWebFilter extends FilterInitializer {
static final String DEPRECATED_UGI_KEY = "dfs.web.ugi";
private static final Log LOG = LogFactory.getLog(StaticUserWebFilter.class);
static class User implements Principal {
private final String name;
public User(String name) {
this.name = name;
}
@Override
public String getName() {
return name;
}
@Override
public int hashCode() {
return name.hashCode();
}
@Override
public boolean equals(Object other) {
if (other == this) {
return true;
} else if (other == null || other.getClass() != getClass()) {
return false;
}
return ((User) other).name.equals(name);
}
@Override
public String toString() {
return name;
}
}
public static class StaticUserFilter implements Filter {
private User user;
private String username;
@Override
public void destroy() {
// NOTHING
}
@Override
public void doFilter(ServletRequest request, ServletResponse response,
FilterChain chain
) throws IOException, ServletException {
HttpServletRequest httpRequest = (HttpServletRequest) request;
// if the user is already authenticated, don't override it
if (httpRequest.getRemoteUser() != null) {
chain.doFilter(request, response);
} else {
HttpServletRequestWrapper wrapper =
new HttpServletRequestWrapper(httpRequest) {
@Override
public Principal getUserPrincipal() {
return user;
}
@Override
public String getRemoteUser() {
return username;
}
};
chain.doFilter(wrapper, response);
}
}
@Override
public void init(FilterConfig conf) throws ServletException {
this.username = conf.getInitParameter(HADOOP_HTTP_STATIC_USER);
this.user = new User(username);
}
}
@Override
public void initFilter(FilterContainer container, Configuration conf) {
HashMap<String, String> options = new HashMap<String, String>();
String username = getUsernameFromConf(conf);
options.put(HADOOP_HTTP_STATIC_USER, username);
container.addFilter("static_user_filter",
StaticUserFilter.class.getName(),
options);
}
/**
* Retrieve the static username from the configuration.
*/
static String getUsernameFromConf(Configuration conf) {
String oldStyleUgi = conf.get(DEPRECATED_UGI_KEY);
if (oldStyleUgi != null) {
// We can't use the normal configuration deprecation mechanism here
// since we need to split out the username from the configured UGI.
LOG.warn(DEPRECATED_UGI_KEY + " should not be used. Instead, use " +
HADOOP_HTTP_STATIC_USER + ".");
String[] parts = oldStyleUgi.split(",");
return parts[0];
} else {
return conf.get(HADOOP_HTTP_STATIC_USER,
DEFAULT_HADOOP_HTTP_STATIC_USER);
}
}
}
| 4,901 | 31.25 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-common-project/hadoop-common/dev-support/jdiff/Null.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
public class Null {
public Null() { }
}
| 849 | 39.47619 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/util/FileSetUtils.java
|
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.maven.plugin.util;
import org.apache.maven.model.FileSet;
import org.codehaus.plexus.util.FileUtils;
import java.io.File;
import java.io.IOException;
import java.util.List;
/**
* FileSetUtils contains helper methods for mojo implementations that need to
* work with a Maven FileSet.
*/
public class FileSetUtils {
/**
* Returns a string containing every element of the given list, with each
* element separated by a comma.
*
* @param list List of all elements
* @return String containing every element, comma-separated
*/
private static String getCommaSeparatedList(List<String> list) {
StringBuilder buffer = new StringBuilder();
String separator = "";
for (Object e : list) {
buffer.append(separator).append(e);
separator = ",";
}
return buffer.toString();
}
/**
* Converts a Maven FileSet to a list of File objects.
*
* @param source FileSet to convert
* @return List containing every element of the FileSet as a File
* @throws IOException if an I/O error occurs while trying to find the files
*/
@SuppressWarnings("unchecked")
public static List<File> convertFileSetToFiles(FileSet source) throws IOException {
String includes = getCommaSeparatedList(source.getIncludes());
String excludes = getCommaSeparatedList(source.getExcludes());
return FileUtils.getFiles(new File(source.getDirectory()), includes, excludes);
}
}
| 2,018 | 32.65 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/util/Exec.java
|
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.maven.plugin.util;
import org.apache.maven.plugin.Mojo;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.UnsupportedEncodingException;
import java.util.ArrayList;
import java.util.List;
/**
* Exec is a helper class for executing an external process from a mojo.
*/
public class Exec {
private Mojo mojo;
/**
* Creates a new Exec instance for executing an external process from the given
* mojo.
*
* @param mojo Mojo executing external process
*/
public Exec(Mojo mojo) {
this.mojo = mojo;
}
/**
* Runs the specified command and saves each line of the command's output to
* the given list.
*
* @param command List containing command and all arguments
* @param output List in/out parameter to receive command output
* @return int exit code of command
*/
public int run(List<String> command, List<String> output) {
int retCode = 1;
ProcessBuilder pb = new ProcessBuilder(command);
try {
Process p = pb.start();
OutputBufferThread stdOut = new OutputBufferThread(p.getInputStream());
OutputBufferThread stdErr = new OutputBufferThread(p.getErrorStream());
stdOut.start();
stdErr.start();
retCode = p.waitFor();
if (retCode != 0) {
mojo.getLog().warn(command + " failed with error code " + retCode);
for (String s : stdErr.getOutput()) {
mojo.getLog().debug(s);
}
}
stdOut.join();
stdErr.join();
output.addAll(stdOut.getOutput());
} catch (Exception ex) {
mojo.getLog().warn(command + " failed: " + ex.toString());
}
return retCode;
}
/**
* OutputBufferThread is a background thread for consuming and storing output
* of the external process.
*/
private static class OutputBufferThread extends Thread {
private List<String> output;
private BufferedReader reader;
/**
* Creates a new OutputBufferThread to consume the given InputStream.
*
* @param is InputStream to consume
*/
public OutputBufferThread(InputStream is) {
this.setDaemon(true);
output = new ArrayList<String>();
try {
reader = new BufferedReader(new InputStreamReader(is, "UTF-8"));
} catch (UnsupportedEncodingException e) {
throw new RuntimeException("Unsupported encoding " + e.toString());
}
}
@Override
public void run() {
try {
String line = reader.readLine();
while (line != null) {
output.add(line);
line = reader.readLine();
}
} catch (IOException ex) {
throw new RuntimeException("make failed with error code " + ex.toString());
}
}
/**
* Returns every line consumed from the input.
*
* @return List<String> every line consumed from the input
*/
public List<String> getOutput() {
return output;
}
}
}
| 3,566 | 28.479339 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/protoc/ProtocMojo.java
|
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.maven.plugin.protoc;
import org.apache.hadoop.maven.plugin.util.Exec;
import org.apache.hadoop.maven.plugin.util.FileSetUtils;
import org.apache.maven.model.FileSet;
import org.apache.maven.plugin.AbstractMojo;
import org.apache.maven.plugin.MojoExecutionException;
import org.apache.maven.plugins.annotations.LifecyclePhase;
import org.apache.maven.plugins.annotations.Mojo;
import org.apache.maven.plugins.annotations.Parameter;
import org.apache.maven.project.MavenProject;
import org.codehaus.jackson.map.ObjectMapper;
import org.codehaus.jackson.type.TypeReference;
import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.zip.CRC32;
@Mojo(name="protoc", defaultPhase = LifecyclePhase.GENERATE_SOURCES)
public class ProtocMojo extends AbstractMojo {
@Parameter(defaultValue="${project}", readonly=true)
private MavenProject project;
@Parameter
private File[] imports;
@Parameter(defaultValue="${project.build.directory}/generated-sources/java")
private File output;
@Parameter(required=true)
private FileSet source;
@Parameter(defaultValue="protoc")
private String protocCommand;
@Parameter(required=true)
private String protocVersion;
@Parameter(defaultValue =
"${project.build.directory}/hadoop-maven-plugins-protoc-checksums.json")
private String checksumPath;
/**
* Compares include and source file checksums against previously computed
* checksums stored in a json file in the build directory.
*/
public class ChecksumComparator {
private final Map<String, Long> storedChecksums;
private final Map<String, Long> computedChecksums;
private final File checksumFile;
ChecksumComparator(String checksumPath) throws IOException {
checksumFile = new File(checksumPath);
// Read in the checksums
if (checksumFile.exists()) {
ObjectMapper mapper = new ObjectMapper();
storedChecksums = mapper
.readValue(checksumFile, new TypeReference<Map<String, Long>>() {
});
} else {
storedChecksums = new HashMap<>(0);
}
computedChecksums = new HashMap<>();
}
public boolean hasChanged(File file) throws IOException {
if (!file.exists()) {
throw new FileNotFoundException(
"Specified protoc include or source does not exist: " + file);
}
if (file.isDirectory()) {
return hasDirectoryChanged(file);
} else if (file.isFile()) {
return hasFileChanged(file);
} else {
throw new IOException("Not a file or directory: " + file);
}
}
private boolean hasDirectoryChanged(File directory) throws IOException {
File[] listing = directory.listFiles();
boolean changed = false;
// Do not exit early, since we need to compute and save checksums
// for each file within the directory.
for (File f : listing) {
if (f.isDirectory()) {
if (hasDirectoryChanged(f)) {
changed = true;
}
} else if (f.isFile()) {
if (hasFileChanged(f)) {
changed = true;
}
} else {
getLog().debug("Skipping entry that is not a file or directory: "
+ f);
}
}
return changed;
}
private boolean hasFileChanged(File file) throws IOException {
long computedCsum = computeChecksum(file);
// Return if the generated csum matches the stored csum
Long storedCsum = storedChecksums.get(file.getCanonicalPath());
if (storedCsum == null || storedCsum.longValue() != computedCsum) {
// It has changed.
return true;
}
return false;
}
private long computeChecksum(File file) throws IOException {
// If we've already computed the csum, reuse the computed value
final String canonicalPath = file.getCanonicalPath();
if (computedChecksums.containsKey(canonicalPath)) {
return computedChecksums.get(canonicalPath);
}
// Compute the csum for the file
CRC32 crc = new CRC32();
byte[] buffer = new byte[1024*64];
try (BufferedInputStream in =
new BufferedInputStream(new FileInputStream(file))) {
while (true) {
int read = in.read(buffer);
if (read <= 0) {
break;
}
crc.update(buffer, 0, read);
}
}
// Save it in the generated map and return
final long computedCsum = crc.getValue();
computedChecksums.put(canonicalPath, computedCsum);
return crc.getValue();
}
public void writeChecksums() throws IOException {
ObjectMapper mapper = new ObjectMapper();
try (BufferedOutputStream out = new BufferedOutputStream(
new FileOutputStream(checksumFile))) {
mapper.writeValue(out, computedChecksums);
getLog().info("Wrote protoc checksums to file " + checksumFile);
}
}
}
public void execute() throws MojoExecutionException {
try {
List<String> command = new ArrayList<String>();
command.add(protocCommand);
command.add("--version");
Exec exec = new Exec(this);
List<String> out = new ArrayList<String>();
if (exec.run(command, out) == 127) {
getLog().error("protoc, not found at: " + protocCommand);
throw new MojoExecutionException("protoc failure");
} else {
if (out.isEmpty()) {
getLog().error("stdout: " + out);
throw new MojoExecutionException(
"'protoc --version' did not return a version");
} else {
if (!out.get(0).endsWith(protocVersion)) {
throw new MojoExecutionException(
"protoc version is '" + out.get(0) + "', expected version is '"
+ protocVersion + "'");
}
}
}
if (!output.mkdirs()) {
if (!output.exists()) {
throw new MojoExecutionException(
"Could not create directory: " + output);
}
}
// Whether the import or source protoc files have changed.
ChecksumComparator comparator = new ChecksumComparator(checksumPath);
boolean importsChanged = false;
command = new ArrayList<String>();
command.add(protocCommand);
command.add("--java_out=" + output.getCanonicalPath());
if (imports != null) {
for (File i : imports) {
if (comparator.hasChanged(i)) {
importsChanged = true;
}
command.add("-I" + i.getCanonicalPath());
}
}
// Filter to generate classes for just the changed source files.
List<File> changedSources = new ArrayList<>();
boolean sourcesChanged = false;
for (File f : FileSetUtils.convertFileSetToFiles(source)) {
// Need to recompile if the source has changed, or if any import has
// changed.
if (comparator.hasChanged(f) || importsChanged) {
sourcesChanged = true;
changedSources.add(f);
command.add(f.getCanonicalPath());
}
}
if (!sourcesChanged && !importsChanged) {
getLog().info("No changes detected in protoc files, skipping "
+ "generation.");
} else {
if (getLog().isDebugEnabled()) {
StringBuilder b = new StringBuilder();
b.append("Generating classes for the following protoc files: [");
String prefix = "";
for (File f : changedSources) {
b.append(prefix);
b.append(f.toString());
prefix = ", ";
}
b.append("]");
getLog().debug(b.toString());
}
exec = new Exec(this);
out = new ArrayList<String>();
if (exec.run(command, out) != 0) {
getLog().error("protoc compiler error");
for (String s : out) {
getLog().error(s);
}
throw new MojoExecutionException("protoc failure");
}
// Write the new checksum file on success.
comparator.writeChecksums();
}
} catch (Throwable ex) {
throw new MojoExecutionException(ex.toString(), ex);
}
project.addCompileSourceRoot(output.getAbsolutePath());
}
}
| 9,070 | 32.847015 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/versioninfo/VersionInfoMojo.java
|
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.maven.plugin.versioninfo;
import java.util.Locale;
import org.apache.hadoop.maven.plugin.util.Exec;
import org.apache.hadoop.maven.plugin.util.FileSetUtils;
import org.apache.maven.model.FileSet;
import org.apache.maven.plugin.AbstractMojo;
import org.apache.maven.plugin.MojoExecutionException;
import org.apache.maven.plugins.annotations.Mojo;
import org.apache.maven.plugins.annotations.Parameter;
import org.apache.maven.project.MavenProject;
import java.io.File;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.text.DateFormat;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Comparator;
import java.util.Date;
import java.util.List;
import java.util.TimeZone;
/**
* VersionInfoMojo calculates information about the current version of the
* codebase and exports the information as properties for further use in a Maven
* build. The version information includes build time, SCM URI, SCM branch, SCM
* commit, and an MD5 checksum of the contents of the files in the codebase.
*/
@Mojo(name="version-info")
public class VersionInfoMojo extends AbstractMojo {
@Parameter(defaultValue="${project}", readonly=true)
private MavenProject project;
@Parameter(required=true)
private FileSet source;
@Parameter(defaultValue="version-info.build.time")
private String buildTimeProperty;
@Parameter(defaultValue="version-info.source.md5")
private String md5Property;
@Parameter(defaultValue="version-info.scm.uri")
private String scmUriProperty;
@Parameter(defaultValue="version-info.scm.branch")
private String scmBranchProperty;
@Parameter(defaultValue="version-info.scm.commit")
private String scmCommitProperty;
@Parameter(defaultValue="git")
private String gitCommand;
@Parameter(defaultValue="svn")
private String svnCommand;
private enum SCM {NONE, SVN, GIT}
@Override
public void execute() throws MojoExecutionException {
try {
SCM scm = determineSCM();
project.getProperties().setProperty(buildTimeProperty, getBuildTime());
project.getProperties().setProperty(scmUriProperty, getSCMUri(scm));
project.getProperties().setProperty(scmBranchProperty, getSCMBranch(scm));
project.getProperties().setProperty(scmCommitProperty, getSCMCommit(scm));
project.getProperties().setProperty(md5Property, computeMD5());
} catch (Throwable ex) {
throw new MojoExecutionException(ex.toString(), ex);
}
}
/**
* Returns a string representing current build time.
*
* @return String representing current build time
*/
private String getBuildTime() {
DateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm'Z'");
dateFormat.setTimeZone(TimeZone.getTimeZone("UTC"));
return dateFormat.format(new Date());
}
private List<String> scmOut;
/**
* Determines which SCM is in use (Subversion, git, or none) and captures
* output of the SCM command for later parsing.
*
* @return SCM in use for this build
* @throws Exception if any error occurs attempting to determine SCM
*/
private SCM determineSCM() throws Exception {
Exec exec = new Exec(this);
SCM scm = SCM.NONE;
scmOut = new ArrayList<String>();
int ret = exec.run(Arrays.asList(svnCommand, "info"), scmOut);
if (ret == 0) {
scm = SCM.SVN;
} else {
ret = exec.run(Arrays.asList(gitCommand, "branch"), scmOut);
if (ret == 0) {
ret = exec.run(Arrays.asList(gitCommand, "remote", "-v"), scmOut);
if (ret != 0) {
scm = SCM.NONE;
scmOut = null;
} else {
ret = exec.run(Arrays.asList(gitCommand, "log", "-n", "1"), scmOut);
if (ret != 0) {
scm = SCM.NONE;
scmOut = null;
} else {
scm = SCM.GIT;
}
}
}
}
if (scmOut != null) {
getLog().debug(scmOut.toString());
}
getLog().info("SCM: " + scm);
return scm;
}
/**
* Return URI and branch of Subversion repository.
*
* @param str String Subversion info output containing URI and branch
* @return String[] containing URI and branch
*/
private String[] getSvnUriInfo(String str) {
String[] res = new String[]{"Unknown", "Unknown"};
try {
String path = str;
int index = path.indexOf("trunk");
if (index > -1) {
res[0] = path.substring(0, index - 1);
res[1] = "trunk";
} else {
index = path.indexOf("branches");
if (index > -1) {
res[0] = path.substring(0, index - 1);
int branchIndex = index + "branches".length() + 1;
index = path.indexOf("/", branchIndex);
if (index > -1) {
res[1] = path.substring(branchIndex, index);
} else {
res[1] = path.substring(branchIndex);
}
}
}
} catch (Exception ex) {
getLog().warn("Could not determine URI & branch from SVN URI: " + str);
}
return res;
}
/**
* Parses SCM output and returns URI of SCM.
*
* @param scm SCM in use for this build
* @return String URI of SCM
*/
private String getSCMUri(SCM scm) {
String uri = "Unknown";
switch (scm) {
case SVN:
for (String s : scmOut) {
if (s.startsWith("URL:")) {
uri = s.substring(4).trim();
uri = getSvnUriInfo(uri)[0];
break;
}
}
break;
case GIT:
for (String s : scmOut) {
if (s.startsWith("origin") && s.endsWith("(fetch)")) {
uri = s.substring("origin".length());
uri = uri.substring(0, uri.length() - "(fetch)".length());
break;
}
}
break;
}
return uri.trim();
}
/**
* Parses SCM output and returns commit of SCM.
*
* @param scm SCM in use for this build
* @return String commit of SCM
*/
private String getSCMCommit(SCM scm) {
String commit = "Unknown";
switch (scm) {
case SVN:
for (String s : scmOut) {
if (s.startsWith("Revision:")) {
commit = s.substring("Revision:".length());
break;
}
}
break;
case GIT:
for (String s : scmOut) {
if (s.startsWith("commit")) {
commit = s.substring("commit".length());
break;
}
}
break;
}
return commit.trim();
}
/**
* Parses SCM output and returns branch of SCM.
*
* @param scm SCM in use for this build
* @return String branch of SCM
*/
private String getSCMBranch(SCM scm) {
String branch = "Unknown";
switch (scm) {
case SVN:
for (String s : scmOut) {
if (s.startsWith("URL:")) {
branch = s.substring(4).trim();
branch = getSvnUriInfo(branch)[1];
break;
}
}
break;
case GIT:
for (String s : scmOut) {
if (s.startsWith("*")) {
branch = s.substring("*".length());
break;
}
}
break;
}
return branch.trim();
}
/**
* Reads and returns the full contents of the specified file.
*
* @param file File to read
* @return byte[] containing full contents of file
* @throws IOException if there is an I/O error while reading the file
*/
private byte[] readFile(File file) throws IOException {
RandomAccessFile raf = new RandomAccessFile(file, "r");
byte[] buffer = new byte[(int) raf.length()];
raf.readFully(buffer);
raf.close();
return buffer;
}
/**
* Given a list of files, computes and returns an MD5 checksum of the full
* contents of all files.
*
* @param files List<File> containing every file to input into the MD5 checksum
* @return byte[] calculated MD5 checksum
* @throws IOException if there is an I/O error while reading a file
* @throws NoSuchAlgorithmException if the MD5 algorithm is not supported
*/
private byte[] computeMD5(List<File> files) throws IOException, NoSuchAlgorithmException {
MessageDigest md5 = MessageDigest.getInstance("MD5");
for (File file : files) {
getLog().debug("Computing MD5 for: " + file);
md5.update(readFile(file));
}
return md5.digest();
}
/**
* Converts bytes to a hexadecimal string representation and returns it.
*
* @param array byte[] to convert
* @return String containing hexadecimal representation of bytes
*/
private String byteArrayToString(byte[] array) {
StringBuilder sb = new StringBuilder();
for (byte b : array) {
sb.append(Integer.toHexString(0xff & b));
}
return sb.toString();
}
/**
* Computes and returns an MD5 checksum of the contents of all files in the
* input Maven FileSet.
*
* @return String containing hexadecimal representation of MD5 checksum
* @throws Exception if there is any error while computing the MD5 checksum
*/
private String computeMD5() throws Exception {
List<File> files = FileSetUtils.convertFileSetToFiles(source);
// File order of MD5 calculation is significant. Sorting is done on
// unix-format names, case-folded, in order to get a platform-independent
// sort and calculate the same MD5 on all platforms.
Collections.sort(files, new Comparator<File>() {
@Override
public int compare(File lhs, File rhs) {
return normalizePath(lhs).compareTo(normalizePath(rhs));
}
private String normalizePath(File file) {
return file.getPath().toUpperCase(Locale.ENGLISH)
.replaceAll("\\\\", "/");
}
});
byte[] md5 = computeMD5(files);
String md5str = byteArrayToString(md5);
getLog().info("Computed MD5: " + md5str);
return md5str;
}
}
| 10,554 | 29.772595 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/test/java/org/apache/hadoop/mapred/TestShuffleHandler.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
import static org.apache.hadoop.test.MetricsAsserts.assertGauge;
import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
import static org.apache.hadoop.test.MockitoMaker.make;
import static org.apache.hadoop.test.MockitoMaker.stub;
import static org.jboss.netty.buffer.ChannelBuffers.wrappedBuffer;
import static org.jboss.netty.handler.codec.http.HttpResponseStatus.OK;
import static org.jboss.netty.handler.codec.http.HttpVersion.HTTP_1_1;
import static org.junit.Assert.assertEquals;
import static org.junit.Assume.assumeTrue;
import java.io.DataInputStream;
import java.io.EOFException;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.net.HttpURLConnection;
import java.net.SocketException;
import java.net.URL;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.zip.CheckedOutputStream;
import java.util.zip.Checksum;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.nativeio.NativeIO;
import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.security.SecureShuffleUtils;
import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
import org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager;
import org.apache.hadoop.mapreduce.task.reduce.ShuffleHeader;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.metrics2.MetricsSource;
import org.apache.hadoop.metrics2.MetricsSystem;
import org.apache.hadoop.metrics2.impl.MetricsSystemImpl;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.service.ServiceStateException;
import org.apache.hadoop.util.PureJavaCrc32;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.api.ApplicationInitializationContext;
import org.apache.hadoop.yarn.server.api.ApplicationTerminationContext;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer;
import org.apache.hadoop.yarn.server.records.Version;
import org.jboss.netty.channel.Channel;
import org.jboss.netty.channel.ChannelFuture;
import org.jboss.netty.channel.ChannelHandlerContext;
import org.jboss.netty.handler.codec.http.DefaultHttpResponse;
import org.jboss.netty.handler.codec.http.HttpRequest;
import org.jboss.netty.handler.codec.http.HttpResponse;
import org.jboss.netty.handler.codec.http.HttpResponseStatus;
import org.junit.Assert;
import org.junit.Test;
import org.mortbay.jetty.HttpHeaders;
public class TestShuffleHandler {
static final long MiB = 1024 * 1024;
private static final Log LOG = LogFactory.getLog(TestShuffleHandler.class);
/**
* Test the validation of ShuffleHandler's meta-data's serialization and
* de-serialization.
*
* @throws Exception exception
*/
@Test (timeout = 10000)
public void testSerializeMeta() throws Exception {
assertEquals(1, ShuffleHandler.deserializeMetaData(
ShuffleHandler.serializeMetaData(1)));
assertEquals(-1, ShuffleHandler.deserializeMetaData(
ShuffleHandler.serializeMetaData(-1)));
assertEquals(8080, ShuffleHandler.deserializeMetaData(
ShuffleHandler.serializeMetaData(8080)));
}
/**
* Validate shuffle connection and input/output metrics.
*
* @throws Exception exception
*/
@Test (timeout = 10000)
public void testShuffleMetrics() throws Exception {
MetricsSystem ms = new MetricsSystemImpl();
ShuffleHandler sh = new ShuffleHandler(ms);
ChannelFuture cf = make(stub(ChannelFuture.class).
returning(true, false).from.isSuccess());
sh.metrics.shuffleConnections.incr();
sh.metrics.shuffleOutputBytes.incr(1*MiB);
sh.metrics.shuffleConnections.incr();
sh.metrics.shuffleOutputBytes.incr(2*MiB);
checkShuffleMetrics(ms, 3*MiB, 0 , 0, 2);
sh.metrics.operationComplete(cf);
sh.metrics.operationComplete(cf);
checkShuffleMetrics(ms, 3*MiB, 1, 1, 0);
}
static void checkShuffleMetrics(MetricsSystem ms, long bytes, int failed,
int succeeded, int connections) {
MetricsSource source = ms.getSource("ShuffleMetrics");
MetricsRecordBuilder rb = getMetrics(source);
assertCounter("ShuffleOutputBytes", bytes, rb);
assertCounter("ShuffleOutputsFailed", failed, rb);
assertCounter("ShuffleOutputsOK", succeeded, rb);
assertGauge("ShuffleConnections", connections, rb);
}
/**
* Verify client prematurely closing a connection.
*
* @throws Exception exception.
*/
@Test (timeout = 10000)
public void testClientClosesConnection() throws Exception {
final ArrayList<Throwable> failures = new ArrayList<Throwable>(1);
Configuration conf = new Configuration();
conf.setInt(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY, 0);
ShuffleHandler shuffleHandler = new ShuffleHandler() {
@Override
protected Shuffle getShuffle(Configuration conf) {
// replace the shuffle handler with one stubbed for testing
return new Shuffle(conf) {
@Override
protected MapOutputInfo getMapOutputInfo(String base, String mapId,
int reduce, String user) throws IOException {
return null;
}
@Override
protected void populateHeaders(List<String> mapIds, String jobId,
String user, int reduce, HttpRequest request,
HttpResponse response, boolean keepAliveParam,
Map<String, MapOutputInfo> infoMap) throws IOException {
// Only set response headers and skip everything else
// send some dummy value for content-length
super.setResponseHeaders(response, keepAliveParam, 100);
}
@Override
protected void verifyRequest(String appid, ChannelHandlerContext ctx,
HttpRequest request, HttpResponse response, URL requestUri)
throws IOException {
}
@Override
protected ChannelFuture sendMapOutput(ChannelHandlerContext ctx,
Channel ch, String user, String mapId, int reduce,
MapOutputInfo info)
throws IOException {
// send a shuffle header and a lot of data down the channel
// to trigger a broken pipe
ShuffleHeader header =
new ShuffleHeader("attempt_12345_1_m_1_0", 5678, 5678, 1);
DataOutputBuffer dob = new DataOutputBuffer();
header.write(dob);
ch.write(wrappedBuffer(dob.getData(), 0, dob.getLength()));
dob = new DataOutputBuffer();
for (int i = 0; i < 100000; ++i) {
header.write(dob);
}
return ch.write(wrappedBuffer(dob.getData(), 0, dob.getLength()));
}
@Override
protected void sendError(ChannelHandlerContext ctx,
HttpResponseStatus status) {
if (failures.size() == 0) {
failures.add(new Error());
ctx.getChannel().close();
}
}
@Override
protected void sendError(ChannelHandlerContext ctx, String message,
HttpResponseStatus status) {
if (failures.size() == 0) {
failures.add(new Error());
ctx.getChannel().close();
}
}
};
}
};
shuffleHandler.init(conf);
shuffleHandler.start();
// simulate a reducer that closes early by reading a single shuffle header
// then closing the connection
URL url = new URL("http://127.0.0.1:"
+ shuffleHandler.getConfig().get(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY)
+ "/mapOutput?job=job_12345_1&reduce=1&map=attempt_12345_1_m_1_0");
HttpURLConnection conn = (HttpURLConnection)url.openConnection();
conn.setRequestProperty(ShuffleHeader.HTTP_HEADER_NAME,
ShuffleHeader.DEFAULT_HTTP_HEADER_NAME);
conn.setRequestProperty(ShuffleHeader.HTTP_HEADER_VERSION,
ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION);
conn.connect();
DataInputStream input = new DataInputStream(conn.getInputStream());
Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
Assert.assertEquals("close", conn.getHeaderField(HttpHeaders.CONNECTION));
ShuffleHeader header = new ShuffleHeader();
header.readFields(input);
input.close();
shuffleHandler.stop();
Assert.assertTrue("sendError called when client closed connection",
failures.size() == 0);
}
@Test(timeout = 10000)
public void testKeepAlive() throws Exception {
final ArrayList<Throwable> failures = new ArrayList<Throwable>(1);
Configuration conf = new Configuration();
conf.setInt(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY, 0);
conf.setBoolean(ShuffleHandler.SHUFFLE_CONNECTION_KEEP_ALIVE_ENABLED, true);
// try setting to -ve keep alive timeout.
conf.setInt(ShuffleHandler.SHUFFLE_CONNECTION_KEEP_ALIVE_TIME_OUT, -100);
ShuffleHandler shuffleHandler = new ShuffleHandler() {
@Override
protected Shuffle getShuffle(final Configuration conf) {
// replace the shuffle handler with one stubbed for testing
return new Shuffle(conf) {
@Override
protected MapOutputInfo getMapOutputInfo(String base, String mapId,
int reduce, String user) throws IOException {
return null;
}
@Override
protected void verifyRequest(String appid, ChannelHandlerContext ctx,
HttpRequest request, HttpResponse response, URL requestUri)
throws IOException {
}
@Override
protected void populateHeaders(List<String> mapIds, String jobId,
String user, int reduce, HttpRequest request,
HttpResponse response, boolean keepAliveParam,
Map<String, MapOutputInfo> infoMap) throws IOException {
// Send some dummy data (populate content length details)
ShuffleHeader header =
new ShuffleHeader("attempt_12345_1_m_1_0", 5678, 5678, 1);
DataOutputBuffer dob = new DataOutputBuffer();
header.write(dob);
dob = new DataOutputBuffer();
for (int i = 0; i < 100000; ++i) {
header.write(dob);
}
long contentLength = dob.getLength();
// for testing purpose;
// disable connectinKeepAliveEnabled if keepAliveParam is available
if (keepAliveParam) {
connectionKeepAliveEnabled = false;
}
super.setResponseHeaders(response, keepAliveParam, contentLength);
}
@Override
protected ChannelFuture sendMapOutput(ChannelHandlerContext ctx,
Channel ch, String user, String mapId, int reduce,
MapOutputInfo info) throws IOException {
HttpResponse response = new DefaultHttpResponse(HTTP_1_1, OK);
// send a shuffle header and a lot of data down the channel
// to trigger a broken pipe
ShuffleHeader header =
new ShuffleHeader("attempt_12345_1_m_1_0", 5678, 5678, 1);
DataOutputBuffer dob = new DataOutputBuffer();
header.write(dob);
ch.write(wrappedBuffer(dob.getData(), 0, dob.getLength()));
dob = new DataOutputBuffer();
for (int i = 0; i < 100000; ++i) {
header.write(dob);
}
return ch.write(wrappedBuffer(dob.getData(), 0, dob.getLength()));
}
@Override
protected void sendError(ChannelHandlerContext ctx,
HttpResponseStatus status) {
if (failures.size() == 0) {
failures.add(new Error());
ctx.getChannel().close();
}
}
@Override
protected void sendError(ChannelHandlerContext ctx, String message,
HttpResponseStatus status) {
if (failures.size() == 0) {
failures.add(new Error());
ctx.getChannel().close();
}
}
};
}
};
shuffleHandler.init(conf);
shuffleHandler.start();
String shuffleBaseURL = "http://127.0.0.1:"
+ shuffleHandler.getConfig().get(
ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY);
URL url =
new URL(shuffleBaseURL + "/mapOutput?job=job_12345_1&reduce=1&"
+ "map=attempt_12345_1_m_1_0");
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.setRequestProperty(ShuffleHeader.HTTP_HEADER_NAME,
ShuffleHeader.DEFAULT_HTTP_HEADER_NAME);
conn.setRequestProperty(ShuffleHeader.HTTP_HEADER_VERSION,
ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION);
conn.connect();
DataInputStream input = new DataInputStream(conn.getInputStream());
Assert.assertEquals(HttpHeaders.KEEP_ALIVE,
conn.getHeaderField(HttpHeaders.CONNECTION));
Assert.assertEquals("timeout=1",
conn.getHeaderField(HttpHeaders.KEEP_ALIVE));
Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
ShuffleHeader header = new ShuffleHeader();
header.readFields(input);
input.close();
// For keepAlive via URL
url =
new URL(shuffleBaseURL + "/mapOutput?job=job_12345_1&reduce=1&"
+ "map=attempt_12345_1_m_1_0&keepAlive=true");
conn = (HttpURLConnection) url.openConnection();
conn.setRequestProperty(ShuffleHeader.HTTP_HEADER_NAME,
ShuffleHeader.DEFAULT_HTTP_HEADER_NAME);
conn.setRequestProperty(ShuffleHeader.HTTP_HEADER_VERSION,
ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION);
conn.connect();
input = new DataInputStream(conn.getInputStream());
Assert.assertEquals(HttpHeaders.KEEP_ALIVE,
conn.getHeaderField(HttpHeaders.CONNECTION));
Assert.assertEquals("timeout=1",
conn.getHeaderField(HttpHeaders.KEEP_ALIVE));
Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
header = new ShuffleHeader();
header.readFields(input);
input.close();
}
/**
* simulate a reducer that sends an invalid shuffle-header - sometimes a wrong
* header_name and sometimes a wrong version
*
* @throws Exception exception
*/
@Test (timeout = 10000)
public void testIncompatibleShuffleVersion() throws Exception {
final int failureNum = 3;
Configuration conf = new Configuration();
conf.setInt(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY, 0);
ShuffleHandler shuffleHandler = new ShuffleHandler();
shuffleHandler.init(conf);
shuffleHandler.start();
// simulate a reducer that closes early by reading a single shuffle header
// then closing the connection
URL url = new URL("http://127.0.0.1:"
+ shuffleHandler.getConfig().get(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY)
+ "/mapOutput?job=job_12345_1&reduce=1&map=attempt_12345_1_m_1_0");
for (int i = 0; i < failureNum; ++i) {
HttpURLConnection conn = (HttpURLConnection)url.openConnection();
conn.setRequestProperty(ShuffleHeader.HTTP_HEADER_NAME,
i == 0 ? "mapreduce" : "other");
conn.setRequestProperty(ShuffleHeader.HTTP_HEADER_VERSION,
i == 1 ? "1.0.0" : "1.0.1");
conn.connect();
Assert.assertEquals(
HttpURLConnection.HTTP_BAD_REQUEST, conn.getResponseCode());
}
shuffleHandler.stop();
shuffleHandler.close();
}
/**
* Validate the limit on number of shuffle connections.
*
* @throws Exception exception
*/
@Test (timeout = 10000)
public void testMaxConnections() throws Exception {
Configuration conf = new Configuration();
conf.setInt(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY, 0);
conf.setInt(ShuffleHandler.MAX_SHUFFLE_CONNECTIONS, 3);
ShuffleHandler shuffleHandler = new ShuffleHandler() {
@Override
protected Shuffle getShuffle(Configuration conf) {
// replace the shuffle handler with one stubbed for testing
return new Shuffle(conf) {
@Override
protected MapOutputInfo getMapOutputInfo(String base, String mapId,
int reduce, String user) throws IOException {
// Do nothing.
return null;
}
@Override
protected void populateHeaders(List<String> mapIds, String jobId,
String user, int reduce, HttpRequest request,
HttpResponse response, boolean keepAliveParam,
Map<String, MapOutputInfo> infoMap) throws IOException {
// Do nothing.
}
@Override
protected void verifyRequest(String appid, ChannelHandlerContext ctx,
HttpRequest request, HttpResponse response, URL requestUri)
throws IOException {
// Do nothing.
}
@Override
protected ChannelFuture sendMapOutput(ChannelHandlerContext ctx,
Channel ch, String user, String mapId, int reduce,
MapOutputInfo info)
throws IOException {
// send a shuffle header and a lot of data down the channel
// to trigger a broken pipe
ShuffleHeader header =
new ShuffleHeader("dummy_header", 5678, 5678, 1);
DataOutputBuffer dob = new DataOutputBuffer();
header.write(dob);
ch.write(wrappedBuffer(dob.getData(), 0, dob.getLength()));
dob = new DataOutputBuffer();
for (int i=0; i<100000; ++i) {
header.write(dob);
}
return ch.write(wrappedBuffer(dob.getData(), 0, dob.getLength()));
}
};
}
};
shuffleHandler.init(conf);
shuffleHandler.start();
// setup connections
int connAttempts = 3;
HttpURLConnection conns[] = new HttpURLConnection[connAttempts];
for (int i = 0; i < connAttempts; i++) {
String URLstring = "http://127.0.0.1:"
+ shuffleHandler.getConfig().get(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY)
+ "/mapOutput?job=job_12345_1&reduce=1&map=attempt_12345_1_m_"
+ i + "_0";
URL url = new URL(URLstring);
conns[i] = (HttpURLConnection)url.openConnection();
conns[i].setRequestProperty(ShuffleHeader.HTTP_HEADER_NAME,
ShuffleHeader.DEFAULT_HTTP_HEADER_NAME);
conns[i].setRequestProperty(ShuffleHeader.HTTP_HEADER_VERSION,
ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION);
}
// Try to open numerous connections
for (int i = 0; i < connAttempts; i++) {
conns[i].connect();
}
//Ensure first connections are okay
conns[0].getInputStream();
int rc = conns[0].getResponseCode();
Assert.assertEquals(HttpURLConnection.HTTP_OK, rc);
conns[1].getInputStream();
rc = conns[1].getResponseCode();
Assert.assertEquals(HttpURLConnection.HTTP_OK, rc);
// This connection should be closed because it to above the limit
try {
conns[2].getInputStream();
rc = conns[2].getResponseCode();
Assert.fail("Expected a SocketException");
} catch (SocketException se) {
LOG.info("Expected - connection should not be open");
} catch (Exception e) {
Assert.fail("Expected a SocketException");
}
shuffleHandler.stop();
}
/**
* Validate the ownership of the map-output files being pulled in. The
* local-file-system owner of the file should match the user component in the
*
* @throws Exception exception
*/
@Test(timeout = 100000)
public void testMapFileAccess() throws IOException {
// This will run only in NativeIO is enabled as SecureIOUtils need it
assumeTrue(NativeIO.isAvailable());
Configuration conf = new Configuration();
conf.setInt(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY, 0);
conf.setInt(ShuffleHandler.MAX_SHUFFLE_CONNECTIONS, 3);
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
"kerberos");
UserGroupInformation.setConfiguration(conf);
File absLogDir = new File("target",
TestShuffleHandler.class.getSimpleName() + "LocDir").getAbsoluteFile();
conf.set(YarnConfiguration.NM_LOCAL_DIRS, absLogDir.getAbsolutePath());
ApplicationId appId = ApplicationId.newInstance(12345, 1);
LOG.info(appId.toString());
String appAttemptId = "attempt_12345_1_m_1_0";
String user = "randomUser";
String reducerId = "0";
List<File> fileMap = new ArrayList<File>();
createShuffleHandlerFiles(absLogDir, user, appId.toString(), appAttemptId,
conf, fileMap);
ShuffleHandler shuffleHandler = new ShuffleHandler() {
@Override
protected Shuffle getShuffle(Configuration conf) {
// replace the shuffle handler with one stubbed for testing
return new Shuffle(conf) {
@Override
protected void verifyRequest(String appid, ChannelHandlerContext ctx,
HttpRequest request, HttpResponse response, URL requestUri)
throws IOException {
// Do nothing.
}
};
}
};
shuffleHandler.init(conf);
try {
shuffleHandler.start();
DataOutputBuffer outputBuffer = new DataOutputBuffer();
outputBuffer.reset();
Token<JobTokenIdentifier> jt =
new Token<JobTokenIdentifier>("identifier".getBytes(),
"password".getBytes(), new Text(user), new Text("shuffleService"));
jt.write(outputBuffer);
shuffleHandler
.initializeApplication(new ApplicationInitializationContext(user,
appId, ByteBuffer.wrap(outputBuffer.getData(), 0,
outputBuffer.getLength())));
URL url =
new URL(
"http://127.0.0.1:"
+ shuffleHandler.getConfig().get(
ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY)
+ "/mapOutput?job=job_12345_0001&reduce=" + reducerId
+ "&map=attempt_12345_1_m_1_0");
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.setRequestProperty(ShuffleHeader.HTTP_HEADER_NAME,
ShuffleHeader.DEFAULT_HTTP_HEADER_NAME);
conn.setRequestProperty(ShuffleHeader.HTTP_HEADER_VERSION,
ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION);
conn.connect();
byte[] byteArr = new byte[10000];
try {
DataInputStream is = new DataInputStream(conn.getInputStream());
is.readFully(byteArr);
} catch (EOFException e) {
// ignore
}
// Retrieve file owner name
FileInputStream is = new FileInputStream(fileMap.get(0));
String owner = NativeIO.POSIX.getFstat(is.getFD()).getOwner();
is.close();
String message =
"Owner '" + owner + "' for path " + fileMap.get(0).getAbsolutePath()
+ " did not match expected owner '" + user + "'";
Assert.assertTrue((new String(byteArr)).contains(message));
} finally {
shuffleHandler.stop();
FileUtil.fullyDelete(absLogDir);
}
}
private static void createShuffleHandlerFiles(File logDir, String user,
String appId, String appAttemptId, Configuration conf,
List<File> fileMap) throws IOException {
String attemptDir =
StringUtils.join(Path.SEPARATOR,
Arrays.asList(new String[] { logDir.getAbsolutePath(),
ContainerLocalizer.USERCACHE, user,
ContainerLocalizer.APPCACHE, appId, "output", appAttemptId }));
File appAttemptDir = new File(attemptDir);
appAttemptDir.mkdirs();
System.out.println(appAttemptDir.getAbsolutePath());
File indexFile = new File(appAttemptDir, "file.out.index");
fileMap.add(indexFile);
createIndexFile(indexFile, conf);
File mapOutputFile = new File(appAttemptDir, "file.out");
fileMap.add(mapOutputFile);
createMapOutputFile(mapOutputFile, conf);
}
private static void
createMapOutputFile(File mapOutputFile, Configuration conf)
throws IOException {
FileOutputStream out = new FileOutputStream(mapOutputFile);
out.write("Creating new dummy map output file. Used only for testing"
.getBytes());
out.flush();
out.close();
}
private static void createIndexFile(File indexFile, Configuration conf)
throws IOException {
if (indexFile.exists()) {
System.out.println("Deleting existing file");
indexFile.delete();
}
indexFile.createNewFile();
FSDataOutputStream output = FileSystem.getLocal(conf).getRaw().append(
new Path(indexFile.getAbsolutePath()));
Checksum crc = new PureJavaCrc32();
crc.reset();
CheckedOutputStream chk = new CheckedOutputStream(output, crc);
String msg = "Writing new index file. This file will be used only " +
"for the testing.";
chk.write(Arrays.copyOf(msg.getBytes(),
MapTask.MAP_OUTPUT_INDEX_RECORD_LENGTH));
output.writeLong(chk.getChecksum().getValue());
output.close();
}
@Test
public void testRecovery() throws IOException {
final String user = "someuser";
final ApplicationId appId = ApplicationId.newInstance(12345, 1);
final JobID jobId = JobID.downgrade(TypeConverter.fromYarn(appId));
final File tmpDir = new File(System.getProperty("test.build.data",
System.getProperty("java.io.tmpdir")),
TestShuffleHandler.class.getName());
Configuration conf = new Configuration();
conf.setInt(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY, 0);
conf.setInt(ShuffleHandler.MAX_SHUFFLE_CONNECTIONS, 3);
ShuffleHandler shuffle = new ShuffleHandler();
// emulate aux services startup with recovery enabled
shuffle.setRecoveryPath(new Path(tmpDir.toString()));
tmpDir.mkdirs();
try {
shuffle.init(conf);
shuffle.start();
// setup a shuffle token for an application
DataOutputBuffer outputBuffer = new DataOutputBuffer();
outputBuffer.reset();
Token<JobTokenIdentifier> jt = new Token<JobTokenIdentifier>(
"identifier".getBytes(), "password".getBytes(), new Text(user),
new Text("shuffleService"));
jt.write(outputBuffer);
shuffle.initializeApplication(new ApplicationInitializationContext(user,
appId, ByteBuffer.wrap(outputBuffer.getData(), 0,
outputBuffer.getLength())));
// verify we are authorized to shuffle
int rc = getShuffleResponseCode(shuffle, jt);
Assert.assertEquals(HttpURLConnection.HTTP_OK, rc);
// emulate shuffle handler restart
shuffle.close();
shuffle = new ShuffleHandler();
shuffle.setRecoveryPath(new Path(tmpDir.toString()));
shuffle.init(conf);
shuffle.start();
// verify we are still authorized to shuffle to the old application
rc = getShuffleResponseCode(shuffle, jt);
Assert.assertEquals(HttpURLConnection.HTTP_OK, rc);
// shutdown app and verify access is lost
shuffle.stopApplication(new ApplicationTerminationContext(appId));
rc = getShuffleResponseCode(shuffle, jt);
Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED, rc);
// emulate shuffle handler restart
shuffle.close();
shuffle = new ShuffleHandler();
shuffle.setRecoveryPath(new Path(tmpDir.toString()));
shuffle.init(conf);
shuffle.start();
// verify we still don't have access
rc = getShuffleResponseCode(shuffle, jt);
Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED, rc);
} finally {
if (shuffle != null) {
shuffle.close();
}
FileUtil.fullyDelete(tmpDir);
}
}
@Test
public void testRecoveryFromOtherVersions() throws IOException {
final String user = "someuser";
final ApplicationId appId = ApplicationId.newInstance(12345, 1);
final File tmpDir = new File(System.getProperty("test.build.data",
System.getProperty("java.io.tmpdir")),
TestShuffleHandler.class.getName());
Configuration conf = new Configuration();
conf.setInt(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY, 0);
conf.setInt(ShuffleHandler.MAX_SHUFFLE_CONNECTIONS, 3);
ShuffleHandler shuffle = new ShuffleHandler();
// emulate aux services startup with recovery enabled
shuffle.setRecoveryPath(new Path(tmpDir.toString()));
tmpDir.mkdirs();
try {
shuffle.init(conf);
shuffle.start();
// setup a shuffle token for an application
DataOutputBuffer outputBuffer = new DataOutputBuffer();
outputBuffer.reset();
Token<JobTokenIdentifier> jt = new Token<JobTokenIdentifier>(
"identifier".getBytes(), "password".getBytes(), new Text(user),
new Text("shuffleService"));
jt.write(outputBuffer);
shuffle.initializeApplication(new ApplicationInitializationContext(user,
appId, ByteBuffer.wrap(outputBuffer.getData(), 0,
outputBuffer.getLength())));
// verify we are authorized to shuffle
int rc = getShuffleResponseCode(shuffle, jt);
Assert.assertEquals(HttpURLConnection.HTTP_OK, rc);
// emulate shuffle handler restart
shuffle.close();
shuffle = new ShuffleHandler();
shuffle.setRecoveryPath(new Path(tmpDir.toString()));
shuffle.init(conf);
shuffle.start();
// verify we are still authorized to shuffle to the old application
rc = getShuffleResponseCode(shuffle, jt);
Assert.assertEquals(HttpURLConnection.HTTP_OK, rc);
Version version = Version.newInstance(1, 0);
Assert.assertEquals(version, shuffle.getCurrentVersion());
// emulate shuffle handler restart with compatible version
Version version11 = Version.newInstance(1, 1);
// update version info before close shuffle
shuffle.storeVersion(version11);
Assert.assertEquals(version11, shuffle.loadVersion());
shuffle.close();
shuffle = new ShuffleHandler();
shuffle.setRecoveryPath(new Path(tmpDir.toString()));
shuffle.init(conf);
shuffle.start();
// shuffle version will be override by CURRENT_VERSION_INFO after restart
// successfully.
Assert.assertEquals(version, shuffle.loadVersion());
// verify we are still authorized to shuffle to the old application
rc = getShuffleResponseCode(shuffle, jt);
Assert.assertEquals(HttpURLConnection.HTTP_OK, rc);
// emulate shuffle handler restart with incompatible version
Version version21 = Version.newInstance(2, 1);
shuffle.storeVersion(version21);
Assert.assertEquals(version21, shuffle.loadVersion());
shuffle.close();
shuffle = new ShuffleHandler();
shuffle.setRecoveryPath(new Path(tmpDir.toString()));
shuffle.init(conf);
try {
shuffle.start();
Assert.fail("Incompatible version, should expect fail here.");
} catch (ServiceStateException e) {
Assert.assertTrue("Exception message mismatch",
e.getMessage().contains("Incompatible version for state DB schema:"));
}
} finally {
if (shuffle != null) {
shuffle.close();
}
FileUtil.fullyDelete(tmpDir);
}
}
private static int getShuffleResponseCode(ShuffleHandler shuffle,
Token<JobTokenIdentifier> jt) throws IOException {
URL url = new URL("http://127.0.0.1:"
+ shuffle.getConfig().get(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY)
+ "/mapOutput?job=job_12345_0001&reduce=0&map=attempt_12345_1_m_1_0");
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
String encHash = SecureShuffleUtils.hashFromString(
SecureShuffleUtils.buildMsgFrom(url),
JobTokenSecretManager.createSecretKey(jt.getPassword()));
conn.addRequestProperty(
SecureShuffleUtils.HTTP_HEADER_URL_HASH, encHash);
conn.setRequestProperty(ShuffleHeader.HTTP_HEADER_NAME,
ShuffleHeader.DEFAULT_HTTP_HEADER_NAME);
conn.setRequestProperty(ShuffleHeader.HTTP_HEADER_VERSION,
ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION);
conn.connect();
int rc = conn.getResponseCode();
conn.disconnect();
return rc;
}
@Test(timeout = 100000)
public void testGetMapOutputInfo() throws Exception {
final ArrayList<Throwable> failures = new ArrayList<Throwable>(1);
Configuration conf = new Configuration();
conf.setInt(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY, 0);
conf.setInt(ShuffleHandler.MAX_SHUFFLE_CONNECTIONS, 3);
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
"simple");
UserGroupInformation.setConfiguration(conf);
File absLogDir = new File("target", TestShuffleHandler.class.
getSimpleName() + "LocDir").getAbsoluteFile();
conf.set(YarnConfiguration.NM_LOCAL_DIRS, absLogDir.getAbsolutePath());
ApplicationId appId = ApplicationId.newInstance(12345, 1);
String appAttemptId = "attempt_12345_1_m_1_0";
String user = "randomUser";
String reducerId = "0";
List<File> fileMap = new ArrayList<File>();
createShuffleHandlerFiles(absLogDir, user, appId.toString(), appAttemptId,
conf, fileMap);
ShuffleHandler shuffleHandler = new ShuffleHandler() {
@Override
protected Shuffle getShuffle(Configuration conf) {
// replace the shuffle handler with one stubbed for testing
return new Shuffle(conf) {
@Override
protected void populateHeaders(List<String> mapIds,
String outputBaseStr, String user, int reduce,
HttpRequest request, HttpResponse response,
boolean keepAliveParam, Map<String, MapOutputInfo> infoMap)
throws IOException {
// Only set response headers and skip everything else
// send some dummy value for content-length
super.setResponseHeaders(response, keepAliveParam, 100);
}
@Override
protected void verifyRequest(String appid,
ChannelHandlerContext ctx, HttpRequest request,
HttpResponse response, URL requestUri) throws IOException {
// Do nothing.
}
@Override
protected void sendError(ChannelHandlerContext ctx, String message,
HttpResponseStatus status) {
if (failures.size() == 0) {
failures.add(new Error(message));
ctx.getChannel().close();
}
}
@Override
protected ChannelFuture sendMapOutput(ChannelHandlerContext ctx,
Channel ch, String user, String mapId, int reduce,
MapOutputInfo info) throws IOException {
// send a shuffle header
ShuffleHeader header =
new ShuffleHeader("attempt_12345_1_m_1_0", 5678, 5678, 1);
DataOutputBuffer dob = new DataOutputBuffer();
header.write(dob);
return ch.write(wrappedBuffer(dob.getData(), 0, dob.getLength()));
}
};
}
};
shuffleHandler.init(conf);
try {
shuffleHandler.start();
DataOutputBuffer outputBuffer = new DataOutputBuffer();
outputBuffer.reset();
Token<JobTokenIdentifier> jt =
new Token<JobTokenIdentifier>("identifier".getBytes(),
"password".getBytes(), new Text(user), new Text("shuffleService"));
jt.write(outputBuffer);
shuffleHandler
.initializeApplication(new ApplicationInitializationContext(user,
appId, ByteBuffer.wrap(outputBuffer.getData(), 0,
outputBuffer.getLength())));
URL url =
new URL(
"http://127.0.0.1:"
+ shuffleHandler.getConfig().get(
ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY)
+ "/mapOutput?job=job_12345_0001&reduce=" + reducerId
+ "&map=attempt_12345_1_m_1_0");
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.setRequestProperty(ShuffleHeader.HTTP_HEADER_NAME,
ShuffleHeader.DEFAULT_HTTP_HEADER_NAME);
conn.setRequestProperty(ShuffleHeader.HTTP_HEADER_VERSION,
ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION);
conn.connect();
try {
DataInputStream is = new DataInputStream(conn.getInputStream());
ShuffleHeader header = new ShuffleHeader();
header.readFields(is);
is.close();
} catch (EOFException e) {
// ignore
}
Assert.assertEquals("sendError called due to shuffle error",
0, failures.size());
} finally {
shuffleHandler.stop();
FileUtil.fullyDelete(absLogDir);
}
}
}
| 38,154 | 39.676972 | 95 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/test/java/org/apache/hadoop/mapred/TestFadvisedFileRegion.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.nio.channels.WritableByteChannel;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.StringUtils;
import org.junit.Assert;
import org.junit.Test;
public class TestFadvisedFileRegion {
private final int FILE_SIZE = 16*1024*1024;
private static final Log LOG =
LogFactory.getLog(TestFadvisedFileRegion.class);
@Test(timeout = 100000)
public void testCustomShuffleTransfer() throws IOException {
File absLogDir = new File("target",
TestFadvisedFileRegion.class.getSimpleName() +
"LocDir").getAbsoluteFile();
String testDirPath =
StringUtils.join(Path.SEPARATOR,
new String[] { absLogDir.getAbsolutePath(),
"testCustomShuffleTransfer"});
File testDir = new File(testDirPath);
testDir.mkdirs();
System.out.println(testDir.getAbsolutePath());
File inFile = new File(testDir, "fileIn.out");
File outFile = new File(testDir, "fileOut.out");
//Initialize input file
byte [] initBuff = new byte[FILE_SIZE];
Random rand = new Random();
rand.nextBytes(initBuff);
FileOutputStream out = new FileOutputStream(inFile);
try{
out.write(initBuff);
} finally {
IOUtils.cleanup(LOG, out);
}
//define position and count to read from a file region.
int position = 2*1024*1024;
int count = 4*1024*1024 - 1;
RandomAccessFile inputFile = null;
RandomAccessFile targetFile = null;
WritableByteChannel target = null;
FadvisedFileRegion fileRegion = null;
try {
inputFile = new RandomAccessFile(inFile.getAbsolutePath(), "r");
targetFile = new RandomAccessFile(outFile.getAbsolutePath(), "rw");
target = targetFile.getChannel();
Assert.assertEquals(FILE_SIZE, inputFile.length());
//create FadvisedFileRegion
fileRegion = new FadvisedFileRegion(
inputFile, position, count, false, 0, null, null, 1024, false);
//test corner cases
customShuffleTransferCornerCases(fileRegion, target, count);
long pos = 0;
long size;
while((size = fileRegion.customShuffleTransfer(target, pos)) > 0) {
pos += size;
}
//assert size
Assert.assertEquals(count, (int)pos);
Assert.assertEquals(count, targetFile.length());
} finally {
if (fileRegion != null) {
fileRegion.releaseExternalResources();
}
IOUtils.cleanup(LOG, target);
IOUtils.cleanup(LOG, targetFile);
IOUtils.cleanup(LOG, inputFile);
}
//Read the target file and verify that copy is done correctly
byte [] buff = new byte[FILE_SIZE];
FileInputStream in = new FileInputStream(outFile);
try {
int total = in.read(buff, 0, count);
Assert.assertEquals(count, total);
for(int i = 0; i < count; i++) {
Assert.assertEquals(initBuff[position+i], buff[i]);
}
} finally {
IOUtils.cleanup(LOG, in);
}
//delete files and folders
inFile.delete();
outFile.delete();
testDir.delete();
absLogDir.delete();
}
private static void customShuffleTransferCornerCases(
FadvisedFileRegion fileRegion, WritableByteChannel target, int count) {
try {
fileRegion.customShuffleTransfer(target, -1);
Assert.fail("Expected a IllegalArgumentException");
} catch (IllegalArgumentException ie) {
LOG.info("Expected - illegal argument is passed.");
} catch (Exception e) {
Assert.fail("Expected a IllegalArgumentException");
}
//test corner cases
try {
fileRegion.customShuffleTransfer(target, count + 1);
Assert.fail("Expected a IllegalArgumentException");
} catch (IllegalArgumentException ie) {
LOG.info("Expected - illegal argument is passed.");
} catch (Exception e) {
Assert.fail("Expected a IllegalArgumentException");
}
}
}
| 5,097 | 31.265823 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import static org.fusesource.leveldbjni.JniDBFactory.asString;
import static org.fusesource.leveldbjni.JniDBFactory.bytes;
import static org.jboss.netty.buffer.ChannelBuffers.wrappedBuffer;
import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.CONTENT_TYPE;
import static org.jboss.netty.handler.codec.http.HttpMethod.GET;
import static org.jboss.netty.handler.codec.http.HttpResponseStatus.BAD_REQUEST;
import static org.jboss.netty.handler.codec.http.HttpResponseStatus.FORBIDDEN;
import static org.jboss.netty.handler.codec.http.HttpResponseStatus.INTERNAL_SERVER_ERROR;
import static org.jboss.netty.handler.codec.http.HttpResponseStatus.METHOD_NOT_ALLOWED;
import static org.jboss.netty.handler.codec.http.HttpResponseStatus.NOT_FOUND;
import static org.jboss.netty.handler.codec.http.HttpResponseStatus.OK;
import static org.jboss.netty.handler.codec.http.HttpResponseStatus.UNAUTHORIZED;
import static org.jboss.netty.handler.codec.http.HttpVersion.HTTP_1_1;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.net.InetSocketAddress;
import java.net.URL;
import java.nio.ByteBuffer;
import java.nio.channels.ClosedChannelException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
import java.util.regex.Pattern;
import javax.crypto.SecretKey;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.LocalDirAllocator;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.DataInputByteBuffer;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.ReadaheadPool;
import org.apache.hadoop.io.SecureIOUtils;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.proto.ShuffleHandlerRecoveryProtos.JobShuffleInfoProto;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.security.SecureShuffleUtils;
import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
import org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager;
import org.apache.hadoop.mapreduce.task.reduce.ShuffleHeader;
import org.apache.hadoop.metrics2.MetricsSystem;
import org.apache.hadoop.metrics2.annotation.Metric;
import org.apache.hadoop.metrics2.annotation.Metrics;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.metrics2.lib.MutableCounterInt;
import org.apache.hadoop.metrics2.lib.MutableCounterLong;
import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
import org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
import org.apache.hadoop.security.ssl.SSLFactory;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.VersionProto;
import org.apache.hadoop.yarn.server.api.ApplicationInitializationContext;
import org.apache.hadoop.yarn.server.api.ApplicationTerminationContext;
import org.apache.hadoop.yarn.server.api.AuxiliaryService;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer;
import org.apache.hadoop.yarn.server.records.Version;
import org.apache.hadoop.yarn.server.records.impl.pb.VersionPBImpl;
import org.apache.hadoop.yarn.server.utils.LeveldbIterator;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.fusesource.leveldbjni.JniDBFactory;
import org.fusesource.leveldbjni.internal.NativeDB;
import org.iq80.leveldb.DB;
import org.iq80.leveldb.DBException;
import org.iq80.leveldb.Logger;
import org.iq80.leveldb.Options;
import org.jboss.netty.bootstrap.ServerBootstrap;
import org.jboss.netty.buffer.ChannelBuffers;
import org.jboss.netty.channel.Channel;
import org.jboss.netty.channel.ChannelFactory;
import org.jboss.netty.channel.ChannelFuture;
import org.jboss.netty.channel.ChannelFutureListener;
import org.jboss.netty.channel.ChannelHandlerContext;
import org.jboss.netty.channel.ChannelPipeline;
import org.jboss.netty.channel.ChannelPipelineFactory;
import org.jboss.netty.channel.ChannelStateEvent;
import org.jboss.netty.channel.Channels;
import org.jboss.netty.channel.ExceptionEvent;
import org.jboss.netty.channel.MessageEvent;
import org.jboss.netty.channel.SimpleChannelUpstreamHandler;
import org.jboss.netty.channel.group.ChannelGroup;
import org.jboss.netty.channel.group.DefaultChannelGroup;
import org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory;
import org.jboss.netty.handler.codec.frame.TooLongFrameException;
import org.jboss.netty.handler.codec.http.DefaultHttpResponse;
import org.jboss.netty.handler.codec.http.HttpChunkAggregator;
import org.jboss.netty.handler.codec.http.HttpRequest;
import org.jboss.netty.handler.codec.http.HttpRequestDecoder;
import org.jboss.netty.handler.codec.http.HttpResponse;
import org.jboss.netty.handler.codec.http.HttpResponseEncoder;
import org.jboss.netty.handler.codec.http.HttpResponseStatus;
import org.jboss.netty.handler.codec.http.QueryStringDecoder;
import org.jboss.netty.handler.ssl.SslHandler;
import org.jboss.netty.handler.stream.ChunkedWriteHandler;
import org.jboss.netty.util.CharsetUtil;
import org.mortbay.jetty.HttpHeaders;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Charsets;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import com.google.protobuf.ByteString;
public class ShuffleHandler extends AuxiliaryService {
private static final Log LOG = LogFactory.getLog(ShuffleHandler.class);
private static final Log AUDITLOG =
LogFactory.getLog(ShuffleHandler.class.getName()+".audit");
public static final String SHUFFLE_MANAGE_OS_CACHE = "mapreduce.shuffle.manage.os.cache";
public static final boolean DEFAULT_SHUFFLE_MANAGE_OS_CACHE = true;
public static final String SHUFFLE_READAHEAD_BYTES = "mapreduce.shuffle.readahead.bytes";
public static final int DEFAULT_SHUFFLE_READAHEAD_BYTES = 4 * 1024 * 1024;
// pattern to identify errors related to the client closing the socket early
// idea borrowed from Netty SslHandler
private static final Pattern IGNORABLE_ERROR_MESSAGE = Pattern.compile(
"^.*(?:connection.*reset|connection.*closed|broken.*pipe).*$",
Pattern.CASE_INSENSITIVE);
private static final String STATE_DB_NAME = "mapreduce_shuffle_state";
private static final String STATE_DB_SCHEMA_VERSION_KEY = "shuffle-schema-version";
protected static final Version CURRENT_VERSION_INFO =
Version.newInstance(1, 0);
private int port;
private ChannelFactory selector;
private final ChannelGroup accepted = new DefaultChannelGroup();
protected HttpPipelineFactory pipelineFact;
private int sslFileBufferSize;
/**
* Should the shuffle use posix_fadvise calls to manage the OS cache during
* sendfile
*/
private boolean manageOsCache;
private int readaheadLength;
private int maxShuffleConnections;
private int shuffleBufferSize;
private boolean shuffleTransferToAllowed;
private ReadaheadPool readaheadPool = ReadaheadPool.getInstance();
private Map<String,String> userRsrc;
private JobTokenSecretManager secretManager;
private DB stateDb = null;
public static final String MAPREDUCE_SHUFFLE_SERVICEID =
"mapreduce_shuffle";
public static final String SHUFFLE_PORT_CONFIG_KEY = "mapreduce.shuffle.port";
public static final int DEFAULT_SHUFFLE_PORT = 13562;
public static final String SHUFFLE_CONNECTION_KEEP_ALIVE_ENABLED =
"mapreduce.shuffle.connection-keep-alive.enable";
public static final boolean DEFAULT_SHUFFLE_CONNECTION_KEEP_ALIVE_ENABLED = false;
public static final String SHUFFLE_CONNECTION_KEEP_ALIVE_TIME_OUT =
"mapreduce.shuffle.connection-keep-alive.timeout";
public static final int DEFAULT_SHUFFLE_CONNECTION_KEEP_ALIVE_TIME_OUT = 5; //seconds
public static final String SHUFFLE_MAPOUTPUT_META_INFO_CACHE_SIZE =
"mapreduce.shuffle.mapoutput-info.meta.cache.size";
public static final int DEFAULT_SHUFFLE_MAPOUTPUT_META_INFO_CACHE_SIZE =
1000;
public static final String CONNECTION_CLOSE = "close";
public static final String SUFFLE_SSL_FILE_BUFFER_SIZE_KEY =
"mapreduce.shuffle.ssl.file.buffer.size";
public static final int DEFAULT_SUFFLE_SSL_FILE_BUFFER_SIZE = 60 * 1024;
public static final String MAX_SHUFFLE_CONNECTIONS = "mapreduce.shuffle.max.connections";
public static final int DEFAULT_MAX_SHUFFLE_CONNECTIONS = 0; // 0 implies no limit
public static final String MAX_SHUFFLE_THREADS = "mapreduce.shuffle.max.threads";
// 0 implies Netty default of 2 * number of available processors
public static final int DEFAULT_MAX_SHUFFLE_THREADS = 0;
public static final String SHUFFLE_BUFFER_SIZE =
"mapreduce.shuffle.transfer.buffer.size";
public static final int DEFAULT_SHUFFLE_BUFFER_SIZE = 128 * 1024;
public static final String SHUFFLE_TRANSFERTO_ALLOWED =
"mapreduce.shuffle.transferTo.allowed";
public static final boolean DEFAULT_SHUFFLE_TRANSFERTO_ALLOWED = true;
public static final boolean WINDOWS_DEFAULT_SHUFFLE_TRANSFERTO_ALLOWED =
false;
boolean connectionKeepAliveEnabled = false;
int connectionKeepAliveTimeOut;
int mapOutputMetaInfoCacheSize;
@Metrics(about="Shuffle output metrics", context="mapred")
static class ShuffleMetrics implements ChannelFutureListener {
@Metric("Shuffle output in bytes")
MutableCounterLong shuffleOutputBytes;
@Metric("# of failed shuffle outputs")
MutableCounterInt shuffleOutputsFailed;
@Metric("# of succeeeded shuffle outputs")
MutableCounterInt shuffleOutputsOK;
@Metric("# of current shuffle connections")
MutableGaugeInt shuffleConnections;
@Override
public void operationComplete(ChannelFuture future) throws Exception {
if (future.isSuccess()) {
shuffleOutputsOK.incr();
} else {
shuffleOutputsFailed.incr();
}
shuffleConnections.decr();
}
}
final ShuffleMetrics metrics;
ShuffleHandler(MetricsSystem ms) {
super("httpshuffle");
metrics = ms.register(new ShuffleMetrics());
}
public ShuffleHandler() {
this(DefaultMetricsSystem.instance());
}
/**
* Serialize the shuffle port into a ByteBuffer for use later on.
* @param port the port to be sent to the ApplciationMaster
* @return the serialized form of the port.
*/
public static ByteBuffer serializeMetaData(int port) throws IOException {
//TODO these bytes should be versioned
DataOutputBuffer port_dob = new DataOutputBuffer();
port_dob.writeInt(port);
return ByteBuffer.wrap(port_dob.getData(), 0, port_dob.getLength());
}
/**
* A helper function to deserialize the metadata returned by ShuffleHandler.
* @param meta the metadata returned by the ShuffleHandler
* @return the port the Shuffle Handler is listening on to serve shuffle data.
*/
public static int deserializeMetaData(ByteBuffer meta) throws IOException {
//TODO this should be returning a class not just an int
DataInputByteBuffer in = new DataInputByteBuffer();
in.reset(meta);
int port = in.readInt();
return port;
}
/**
* A helper function to serialize the JobTokenIdentifier to be sent to the
* ShuffleHandler as ServiceData.
* @param jobToken the job token to be used for authentication of
* shuffle data requests.
* @return the serialized version of the jobToken.
*/
public static ByteBuffer serializeServiceData(Token<JobTokenIdentifier> jobToken) throws IOException {
//TODO these bytes should be versioned
DataOutputBuffer jobToken_dob = new DataOutputBuffer();
jobToken.write(jobToken_dob);
return ByteBuffer.wrap(jobToken_dob.getData(), 0, jobToken_dob.getLength());
}
static Token<JobTokenIdentifier> deserializeServiceData(ByteBuffer secret) throws IOException {
DataInputByteBuffer in = new DataInputByteBuffer();
in.reset(secret);
Token<JobTokenIdentifier> jt = new Token<JobTokenIdentifier>();
jt.readFields(in);
return jt;
}
@Override
public void initializeApplication(ApplicationInitializationContext context) {
String user = context.getUser();
ApplicationId appId = context.getApplicationId();
ByteBuffer secret = context.getApplicationDataForService();
// TODO these bytes should be versioned
try {
Token<JobTokenIdentifier> jt = deserializeServiceData(secret);
// TODO: Once SHuffle is out of NM, this can use MR APIs
JobID jobId = new JobID(Long.toString(appId.getClusterTimestamp()), appId.getId());
recordJobShuffleInfo(jobId, user, jt);
} catch (IOException e) {
LOG.error("Error during initApp", e);
// TODO add API to AuxiliaryServices to report failures
}
}
@Override
public void stopApplication(ApplicationTerminationContext context) {
ApplicationId appId = context.getApplicationId();
JobID jobId = new JobID(Long.toString(appId.getClusterTimestamp()), appId.getId());
try {
removeJobShuffleInfo(jobId);
} catch (IOException e) {
LOG.error("Error during stopApp", e);
// TODO add API to AuxiliaryServices to report failures
}
}
@Override
protected void serviceInit(Configuration conf) throws Exception {
manageOsCache = conf.getBoolean(SHUFFLE_MANAGE_OS_CACHE,
DEFAULT_SHUFFLE_MANAGE_OS_CACHE);
readaheadLength = conf.getInt(SHUFFLE_READAHEAD_BYTES,
DEFAULT_SHUFFLE_READAHEAD_BYTES);
maxShuffleConnections = conf.getInt(MAX_SHUFFLE_CONNECTIONS,
DEFAULT_MAX_SHUFFLE_CONNECTIONS);
int maxShuffleThreads = conf.getInt(MAX_SHUFFLE_THREADS,
DEFAULT_MAX_SHUFFLE_THREADS);
if (maxShuffleThreads == 0) {
maxShuffleThreads = 2 * Runtime.getRuntime().availableProcessors();
}
shuffleBufferSize = conf.getInt(SHUFFLE_BUFFER_SIZE,
DEFAULT_SHUFFLE_BUFFER_SIZE);
shuffleTransferToAllowed = conf.getBoolean(SHUFFLE_TRANSFERTO_ALLOWED,
(Shell.WINDOWS)?WINDOWS_DEFAULT_SHUFFLE_TRANSFERTO_ALLOWED:
DEFAULT_SHUFFLE_TRANSFERTO_ALLOWED);
ThreadFactory bossFactory = new ThreadFactoryBuilder()
.setNameFormat("ShuffleHandler Netty Boss #%d")
.build();
ThreadFactory workerFactory = new ThreadFactoryBuilder()
.setNameFormat("ShuffleHandler Netty Worker #%d")
.build();
selector = new NioServerSocketChannelFactory(
Executors.newCachedThreadPool(bossFactory),
Executors.newCachedThreadPool(workerFactory),
maxShuffleThreads);
super.serviceInit(new Configuration(conf));
}
// TODO change AbstractService to throw InterruptedException
@Override
protected void serviceStart() throws Exception {
Configuration conf = getConfig();
userRsrc = new ConcurrentHashMap<String,String>();
secretManager = new JobTokenSecretManager();
recoverState(conf);
ServerBootstrap bootstrap = new ServerBootstrap(selector);
try {
pipelineFact = new HttpPipelineFactory(conf);
} catch (Exception ex) {
throw new RuntimeException(ex);
}
bootstrap.setPipelineFactory(pipelineFact);
port = conf.getInt(SHUFFLE_PORT_CONFIG_KEY, DEFAULT_SHUFFLE_PORT);
Channel ch = bootstrap.bind(new InetSocketAddress(port));
accepted.add(ch);
port = ((InetSocketAddress)ch.getLocalAddress()).getPort();
conf.set(SHUFFLE_PORT_CONFIG_KEY, Integer.toString(port));
pipelineFact.SHUFFLE.setPort(port);
LOG.info(getName() + " listening on port " + port);
super.serviceStart();
sslFileBufferSize = conf.getInt(SUFFLE_SSL_FILE_BUFFER_SIZE_KEY,
DEFAULT_SUFFLE_SSL_FILE_BUFFER_SIZE);
connectionKeepAliveEnabled =
conf.getBoolean(SHUFFLE_CONNECTION_KEEP_ALIVE_ENABLED,
DEFAULT_SHUFFLE_CONNECTION_KEEP_ALIVE_ENABLED);
connectionKeepAliveTimeOut =
Math.max(1, conf.getInt(SHUFFLE_CONNECTION_KEEP_ALIVE_TIME_OUT,
DEFAULT_SHUFFLE_CONNECTION_KEEP_ALIVE_TIME_OUT));
mapOutputMetaInfoCacheSize =
Math.max(1, conf.getInt(SHUFFLE_MAPOUTPUT_META_INFO_CACHE_SIZE,
DEFAULT_SHUFFLE_MAPOUTPUT_META_INFO_CACHE_SIZE));
}
@Override
protected void serviceStop() throws Exception {
accepted.close().awaitUninterruptibly(10, TimeUnit.SECONDS);
if (selector != null) {
ServerBootstrap bootstrap = new ServerBootstrap(selector);
bootstrap.releaseExternalResources();
}
if (pipelineFact != null) {
pipelineFact.destroy();
}
if (stateDb != null) {
stateDb.close();
}
super.serviceStop();
}
@Override
public synchronized ByteBuffer getMetaData() {
try {
return serializeMetaData(port);
} catch (IOException e) {
LOG.error("Error during getMeta", e);
// TODO add API to AuxiliaryServices to report failures
return null;
}
}
protected Shuffle getShuffle(Configuration conf) {
return new Shuffle(conf);
}
private void recoverState(Configuration conf) throws IOException {
Path recoveryRoot = getRecoveryPath();
if (recoveryRoot != null) {
startStore(recoveryRoot);
Pattern jobPattern = Pattern.compile(JobID.JOBID_REGEX);
LeveldbIterator iter = null;
try {
iter = new LeveldbIterator(stateDb);
iter.seek(bytes(JobID.JOB));
while (iter.hasNext()) {
Map.Entry<byte[],byte[]> entry = iter.next();
String key = asString(entry.getKey());
if (!jobPattern.matcher(key).matches()) {
break;
}
recoverJobShuffleInfo(key, entry.getValue());
}
} catch (DBException e) {
throw new IOException("Database error during recovery", e);
} finally {
if (iter != null) {
iter.close();
}
}
}
}
private void startStore(Path recoveryRoot) throws IOException {
Options options = new Options();
options.createIfMissing(false);
options.logger(new LevelDBLogger());
Path dbPath = new Path(recoveryRoot, STATE_DB_NAME);
LOG.info("Using state database at " + dbPath + " for recovery");
File dbfile = new File(dbPath.toString());
try {
stateDb = JniDBFactory.factory.open(dbfile, options);
} catch (NativeDB.DBException e) {
if (e.isNotFound() || e.getMessage().contains(" does not exist ")) {
LOG.info("Creating state database at " + dbfile);
options.createIfMissing(true);
try {
stateDb = JniDBFactory.factory.open(dbfile, options);
storeVersion();
} catch (DBException dbExc) {
throw new IOException("Unable to create state store", dbExc);
}
} else {
throw e;
}
}
checkVersion();
}
@VisibleForTesting
Version loadVersion() throws IOException {
byte[] data = stateDb.get(bytes(STATE_DB_SCHEMA_VERSION_KEY));
// if version is not stored previously, treat it as CURRENT_VERSION_INFO.
if (data == null || data.length == 0) {
return getCurrentVersion();
}
Version version =
new VersionPBImpl(VersionProto.parseFrom(data));
return version;
}
private void storeSchemaVersion(Version version) throws IOException {
String key = STATE_DB_SCHEMA_VERSION_KEY;
byte[] data =
((VersionPBImpl) version).getProto().toByteArray();
try {
stateDb.put(bytes(key), data);
} catch (DBException e) {
throw new IOException(e.getMessage(), e);
}
}
private void storeVersion() throws IOException {
storeSchemaVersion(CURRENT_VERSION_INFO);
}
// Only used for test
@VisibleForTesting
void storeVersion(Version version) throws IOException {
storeSchemaVersion(version);
}
protected Version getCurrentVersion() {
return CURRENT_VERSION_INFO;
}
/**
* 1) Versioning scheme: major.minor. For e.g. 1.0, 1.1, 1.2...1.25, 2.0 etc.
* 2) Any incompatible change of DB schema is a major upgrade, and any
* compatible change of DB schema is a minor upgrade.
* 3) Within a minor upgrade, say 1.1 to 1.2:
* overwrite the version info and proceed as normal.
* 4) Within a major upgrade, say 1.2 to 2.0:
* throw exception and indicate user to use a separate upgrade tool to
* upgrade shuffle info or remove incompatible old state.
*/
private void checkVersion() throws IOException {
Version loadedVersion = loadVersion();
LOG.info("Loaded state DB schema version info " + loadedVersion);
if (loadedVersion.equals(getCurrentVersion())) {
return;
}
if (loadedVersion.isCompatibleTo(getCurrentVersion())) {
LOG.info("Storing state DB schedma version info " + getCurrentVersion());
storeVersion();
} else {
throw new IOException(
"Incompatible version for state DB schema: expecting DB schema version "
+ getCurrentVersion() + ", but loading version " + loadedVersion);
}
}
private void addJobToken(JobID jobId, String user,
Token<JobTokenIdentifier> jobToken) {
userRsrc.put(jobId.toString(), user);
secretManager.addTokenForJob(jobId.toString(), jobToken);
LOG.info("Added token for " + jobId.toString());
}
private void recoverJobShuffleInfo(String jobIdStr, byte[] data)
throws IOException {
JobID jobId;
try {
jobId = JobID.forName(jobIdStr);
} catch (IllegalArgumentException e) {
throw new IOException("Bad job ID " + jobIdStr + " in state store", e);
}
JobShuffleInfoProto proto = JobShuffleInfoProto.parseFrom(data);
String user = proto.getUser();
TokenProto tokenProto = proto.getJobToken();
Token<JobTokenIdentifier> jobToken = new Token<JobTokenIdentifier>(
tokenProto.getIdentifier().toByteArray(),
tokenProto.getPassword().toByteArray(),
new Text(tokenProto.getKind()), new Text(tokenProto.getService()));
addJobToken(jobId, user, jobToken);
}
private void recordJobShuffleInfo(JobID jobId, String user,
Token<JobTokenIdentifier> jobToken) throws IOException {
if (stateDb != null) {
TokenProto tokenProto = TokenProto.newBuilder()
.setIdentifier(ByteString.copyFrom(jobToken.getIdentifier()))
.setPassword(ByteString.copyFrom(jobToken.getPassword()))
.setKind(jobToken.getKind().toString())
.setService(jobToken.getService().toString())
.build();
JobShuffleInfoProto proto = JobShuffleInfoProto.newBuilder()
.setUser(user).setJobToken(tokenProto).build();
try {
stateDb.put(bytes(jobId.toString()), proto.toByteArray());
} catch (DBException e) {
throw new IOException("Error storing " + jobId, e);
}
}
addJobToken(jobId, user, jobToken);
}
private void removeJobShuffleInfo(JobID jobId) throws IOException {
String jobIdStr = jobId.toString();
secretManager.removeTokenForJob(jobIdStr);
userRsrc.remove(jobIdStr);
if (stateDb != null) {
try {
stateDb.delete(bytes(jobIdStr));
} catch (DBException e) {
throw new IOException("Unable to remove " + jobId
+ " from state store", e);
}
}
}
private static class LevelDBLogger implements Logger {
private static final Log LOG = LogFactory.getLog(LevelDBLogger.class);
@Override
public void log(String message) {
LOG.info(message);
}
}
class HttpPipelineFactory implements ChannelPipelineFactory {
final Shuffle SHUFFLE;
private SSLFactory sslFactory;
public HttpPipelineFactory(Configuration conf) throws Exception {
SHUFFLE = getShuffle(conf);
if (conf.getBoolean(MRConfig.SHUFFLE_SSL_ENABLED_KEY,
MRConfig.SHUFFLE_SSL_ENABLED_DEFAULT)) {
LOG.info("Encrypted shuffle is enabled.");
sslFactory = new SSLFactory(SSLFactory.Mode.SERVER, conf);
sslFactory.init();
}
}
public void destroy() {
if (sslFactory != null) {
sslFactory.destroy();
}
}
@Override
public ChannelPipeline getPipeline() throws Exception {
ChannelPipeline pipeline = Channels.pipeline();
if (sslFactory != null) {
pipeline.addLast("ssl", new SslHandler(sslFactory.createSSLEngine()));
}
pipeline.addLast("decoder", new HttpRequestDecoder());
pipeline.addLast("aggregator", new HttpChunkAggregator(1 << 16));
pipeline.addLast("encoder", new HttpResponseEncoder());
pipeline.addLast("chunking", new ChunkedWriteHandler());
pipeline.addLast("shuffle", SHUFFLE);
return pipeline;
// TODO factor security manager into pipeline
// TODO factor out encode/decode to permit binary shuffle
// TODO factor out decode of index to permit alt. models
}
}
class Shuffle extends SimpleChannelUpstreamHandler {
private final Configuration conf;
private final IndexCache indexCache;
private final LocalDirAllocator lDirAlloc =
new LocalDirAllocator(YarnConfiguration.NM_LOCAL_DIRS);
private int port;
public Shuffle(Configuration conf) {
this.conf = conf;
indexCache = new IndexCache(new JobConf(conf));
this.port = conf.getInt(SHUFFLE_PORT_CONFIG_KEY, DEFAULT_SHUFFLE_PORT);
}
public void setPort(int port) {
this.port = port;
}
private List<String> splitMaps(List<String> mapq) {
if (null == mapq) {
return null;
}
final List<String> ret = new ArrayList<String>();
for (String s : mapq) {
Collections.addAll(ret, s.split(","));
}
return ret;
}
@Override
public void channelOpen(ChannelHandlerContext ctx, ChannelStateEvent evt)
throws Exception {
if ((maxShuffleConnections > 0) && (accepted.size() >= maxShuffleConnections)) {
LOG.info(String.format("Current number of shuffle connections (%d) is " +
"greater than or equal to the max allowed shuffle connections (%d)",
accepted.size(), maxShuffleConnections));
evt.getChannel().close();
return;
}
accepted.add(evt.getChannel());
super.channelOpen(ctx, evt);
}
@Override
public void messageReceived(ChannelHandlerContext ctx, MessageEvent evt)
throws Exception {
HttpRequest request = (HttpRequest) evt.getMessage();
if (request.getMethod() != GET) {
sendError(ctx, METHOD_NOT_ALLOWED);
return;
}
// Check whether the shuffle version is compatible
if (!ShuffleHeader.DEFAULT_HTTP_HEADER_NAME.equals(
request.getHeader(ShuffleHeader.HTTP_HEADER_NAME))
|| !ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION.equals(
request.getHeader(ShuffleHeader.HTTP_HEADER_VERSION))) {
sendError(ctx, "Incompatible shuffle request version", BAD_REQUEST);
}
final Map<String,List<String>> q =
new QueryStringDecoder(request.getUri()).getParameters();
final List<String> keepAliveList = q.get("keepAlive");
boolean keepAliveParam = false;
if (keepAliveList != null && keepAliveList.size() == 1) {
keepAliveParam = Boolean.valueOf(keepAliveList.get(0));
if (LOG.isDebugEnabled()) {
LOG.debug("KeepAliveParam : " + keepAliveList
+ " : " + keepAliveParam);
}
}
final List<String> mapIds = splitMaps(q.get("map"));
final List<String> reduceQ = q.get("reduce");
final List<String> jobQ = q.get("job");
if (LOG.isDebugEnabled()) {
LOG.debug("RECV: " + request.getUri() +
"\n mapId: " + mapIds +
"\n reduceId: " + reduceQ +
"\n jobId: " + jobQ +
"\n keepAlive: " + keepAliveParam);
}
if (mapIds == null || reduceQ == null || jobQ == null) {
sendError(ctx, "Required param job, map and reduce", BAD_REQUEST);
return;
}
if (reduceQ.size() != 1 || jobQ.size() != 1) {
sendError(ctx, "Too many job/reduce parameters", BAD_REQUEST);
return;
}
// this audit log is disabled by default,
// to turn it on please enable this audit log
// on log4j.properties by uncommenting the setting
if (AUDITLOG.isDebugEnabled()) {
AUDITLOG.debug("shuffle for " + jobQ.get(0) +
" reducer " + reduceQ.get(0));
}
int reduceId;
String jobId;
try {
reduceId = Integer.parseInt(reduceQ.get(0));
jobId = jobQ.get(0);
} catch (NumberFormatException e) {
sendError(ctx, "Bad reduce parameter", BAD_REQUEST);
return;
} catch (IllegalArgumentException e) {
sendError(ctx, "Bad job parameter", BAD_REQUEST);
return;
}
final String reqUri = request.getUri();
if (null == reqUri) {
// TODO? add upstream?
sendError(ctx, FORBIDDEN);
return;
}
HttpResponse response = new DefaultHttpResponse(HTTP_1_1, OK);
try {
verifyRequest(jobId, ctx, request, response,
new URL("http", "", this.port, reqUri));
} catch (IOException e) {
LOG.warn("Shuffle failure ", e);
sendError(ctx, e.getMessage(), UNAUTHORIZED);
return;
}
Map<String, MapOutputInfo> mapOutputInfoMap =
new HashMap<String, MapOutputInfo>();
Channel ch = evt.getChannel();
String user = userRsrc.get(jobId);
// $x/$user/appcache/$appId/output/$mapId
// TODO: Once Shuffle is out of NM, this can use MR APIs to convert
// between App and Job
String outputBasePathStr = getBaseLocation(jobId, user);
try {
populateHeaders(mapIds, outputBasePathStr, user, reduceId, request,
response, keepAliveParam, mapOutputInfoMap);
} catch(IOException e) {
ch.write(response);
LOG.error("Shuffle error in populating headers :", e);
String errorMessage = getErrorMessage(e);
sendError(ctx,errorMessage , INTERNAL_SERVER_ERROR);
return;
}
ch.write(response);
// TODO refactor the following into the pipeline
ChannelFuture lastMap = null;
for (String mapId : mapIds) {
try {
MapOutputInfo info = mapOutputInfoMap.get(mapId);
if (info == null) {
info = getMapOutputInfo(outputBasePathStr + mapId,
mapId, reduceId, user);
}
lastMap =
sendMapOutput(ctx, ch, user, mapId,
reduceId, info);
if (null == lastMap) {
sendError(ctx, NOT_FOUND);
return;
}
} catch (IOException e) {
LOG.error("Shuffle error :", e);
String errorMessage = getErrorMessage(e);
sendError(ctx,errorMessage , INTERNAL_SERVER_ERROR);
return;
}
}
lastMap.addListener(metrics);
lastMap.addListener(ChannelFutureListener.CLOSE);
}
private String getErrorMessage(Throwable t) {
StringBuffer sb = new StringBuffer(t.getMessage());
while (t.getCause() != null) {
sb.append(t.getCause().getMessage());
t = t.getCause();
}
return sb.toString();
}
private String getBaseLocation(String jobId, String user) {
final JobID jobID = JobID.forName(jobId);
final ApplicationId appID =
ApplicationId.newInstance(Long.parseLong(jobID.getJtIdentifier()),
jobID.getId());
final String baseStr =
ContainerLocalizer.USERCACHE + "/" + user + "/"
+ ContainerLocalizer.APPCACHE + "/"
+ ConverterUtils.toString(appID) + "/output" + "/";
return baseStr;
}
protected MapOutputInfo getMapOutputInfo(String base, String mapId,
int reduce, String user) throws IOException {
// Index file
Path indexFileName =
lDirAlloc.getLocalPathToRead(base + "/file.out.index", conf);
IndexRecord info =
indexCache.getIndexInformation(mapId, reduce, indexFileName, user);
Path mapOutputFileName =
lDirAlloc.getLocalPathToRead(base + "/file.out", conf);
if (LOG.isDebugEnabled()) {
LOG.debug(base + " : " + mapOutputFileName + " : " + indexFileName);
}
MapOutputInfo outputInfo = new MapOutputInfo(mapOutputFileName, info);
return outputInfo;
}
protected void populateHeaders(List<String> mapIds, String outputBaseStr,
String user, int reduce, HttpRequest request, HttpResponse response,
boolean keepAliveParam, Map<String, MapOutputInfo> mapOutputInfoMap)
throws IOException {
long contentLength = 0;
for (String mapId : mapIds) {
String base = outputBaseStr + mapId;
MapOutputInfo outputInfo = getMapOutputInfo(base, mapId, reduce, user);
if (mapOutputInfoMap.size() < mapOutputMetaInfoCacheSize) {
mapOutputInfoMap.put(mapId, outputInfo);
}
// Index file
Path indexFileName =
lDirAlloc.getLocalPathToRead(base + "/file.out.index", conf);
IndexRecord info =
indexCache.getIndexInformation(mapId, reduce, indexFileName, user);
ShuffleHeader header =
new ShuffleHeader(mapId, info.partLength, info.rawLength, reduce);
DataOutputBuffer dob = new DataOutputBuffer();
header.write(dob);
contentLength += info.partLength;
contentLength += dob.getLength();
}
// Now set the response headers.
setResponseHeaders(response, keepAliveParam, contentLength);
}
protected void setResponseHeaders(HttpResponse response,
boolean keepAliveParam, long contentLength) {
if (!connectionKeepAliveEnabled && !keepAliveParam) {
if (LOG.isDebugEnabled()) {
LOG.debug("Setting connection close header...");
}
response.setHeader(HttpHeaders.CONNECTION, CONNECTION_CLOSE);
} else {
response.setHeader(HttpHeaders.CONTENT_LENGTH,
String.valueOf(contentLength));
response.setHeader(HttpHeaders.CONNECTION, HttpHeaders.KEEP_ALIVE);
response.setHeader(HttpHeaders.KEEP_ALIVE, "timeout="
+ connectionKeepAliveTimeOut);
LOG.info("Content Length in shuffle : " + contentLength);
}
}
class MapOutputInfo {
final Path mapOutputFileName;
final IndexRecord indexRecord;
MapOutputInfo(Path mapOutputFileName, IndexRecord indexRecord) {
this.mapOutputFileName = mapOutputFileName;
this.indexRecord = indexRecord;
}
}
protected void verifyRequest(String appid, ChannelHandlerContext ctx,
HttpRequest request, HttpResponse response, URL requestUri)
throws IOException {
SecretKey tokenSecret = secretManager.retrieveTokenSecret(appid);
if (null == tokenSecret) {
LOG.info("Request for unknown token " + appid);
throw new IOException("could not find jobid");
}
// string to encrypt
String enc_str = SecureShuffleUtils.buildMsgFrom(requestUri);
// hash from the fetcher
String urlHashStr =
request.getHeader(SecureShuffleUtils.HTTP_HEADER_URL_HASH);
if (urlHashStr == null) {
LOG.info("Missing header hash for " + appid);
throw new IOException("fetcher cannot be authenticated");
}
if (LOG.isDebugEnabled()) {
int len = urlHashStr.length();
LOG.debug("verifying request. enc_str=" + enc_str + "; hash=..." +
urlHashStr.substring(len-len/2, len-1));
}
// verify - throws exception
SecureShuffleUtils.verifyReply(urlHashStr, enc_str, tokenSecret);
// verification passed - encode the reply
String reply =
SecureShuffleUtils.generateHash(urlHashStr.getBytes(Charsets.UTF_8),
tokenSecret);
response.setHeader(SecureShuffleUtils.HTTP_HEADER_REPLY_URL_HASH, reply);
// Put shuffle version into http header
response.setHeader(ShuffleHeader.HTTP_HEADER_NAME,
ShuffleHeader.DEFAULT_HTTP_HEADER_NAME);
response.setHeader(ShuffleHeader.HTTP_HEADER_VERSION,
ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION);
if (LOG.isDebugEnabled()) {
int len = reply.length();
LOG.debug("Fetcher request verfied. enc_str=" + enc_str + ";reply=" +
reply.substring(len-len/2, len-1));
}
}
protected ChannelFuture sendMapOutput(ChannelHandlerContext ctx, Channel ch,
String user, String mapId, int reduce, MapOutputInfo mapOutputInfo)
throws IOException {
final IndexRecord info = mapOutputInfo.indexRecord;
final ShuffleHeader header =
new ShuffleHeader(mapId, info.partLength, info.rawLength, reduce);
final DataOutputBuffer dob = new DataOutputBuffer();
header.write(dob);
ch.write(wrappedBuffer(dob.getData(), 0, dob.getLength()));
final File spillfile =
new File(mapOutputInfo.mapOutputFileName.toString());
RandomAccessFile spill;
try {
spill = SecureIOUtils.openForRandomRead(spillfile, "r", user, null);
} catch (FileNotFoundException e) {
LOG.info(spillfile + " not found");
return null;
}
ChannelFuture writeFuture;
if (ch.getPipeline().get(SslHandler.class) == null) {
final FadvisedFileRegion partition = new FadvisedFileRegion(spill,
info.startOffset, info.partLength, manageOsCache, readaheadLength,
readaheadPool, spillfile.getAbsolutePath(),
shuffleBufferSize, shuffleTransferToAllowed);
writeFuture = ch.write(partition);
writeFuture.addListener(new ChannelFutureListener() {
// TODO error handling; distinguish IO/connection failures,
// attribute to appropriate spill output
@Override
public void operationComplete(ChannelFuture future) {
if (future.isSuccess()) {
partition.transferSuccessful();
}
partition.releaseExternalResources();
}
});
} else {
// HTTPS cannot be done with zero copy.
final FadvisedChunkedFile chunk = new FadvisedChunkedFile(spill,
info.startOffset, info.partLength, sslFileBufferSize,
manageOsCache, readaheadLength, readaheadPool,
spillfile.getAbsolutePath());
writeFuture = ch.write(chunk);
}
metrics.shuffleConnections.incr();
metrics.shuffleOutputBytes.incr(info.partLength); // optimistic
return writeFuture;
}
protected void sendError(ChannelHandlerContext ctx,
HttpResponseStatus status) {
sendError(ctx, "", status);
}
protected void sendError(ChannelHandlerContext ctx, String message,
HttpResponseStatus status) {
HttpResponse response = new DefaultHttpResponse(HTTP_1_1, status);
response.setHeader(CONTENT_TYPE, "text/plain; charset=UTF-8");
// Put shuffle version into http header
response.setHeader(ShuffleHeader.HTTP_HEADER_NAME,
ShuffleHeader.DEFAULT_HTTP_HEADER_NAME);
response.setHeader(ShuffleHeader.HTTP_HEADER_VERSION,
ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION);
response.setContent(
ChannelBuffers.copiedBuffer(message, CharsetUtil.UTF_8));
// Close the connection as soon as the error message is sent.
ctx.getChannel().write(response).addListener(ChannelFutureListener.CLOSE);
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e)
throws Exception {
Channel ch = e.getChannel();
Throwable cause = e.getCause();
if (cause instanceof TooLongFrameException) {
sendError(ctx, BAD_REQUEST);
return;
} else if (cause instanceof IOException) {
if (cause instanceof ClosedChannelException) {
LOG.debug("Ignoring closed channel error", cause);
return;
}
String message = String.valueOf(cause.getMessage());
if (IGNORABLE_ERROR_MESSAGE.matcher(message).matches()) {
LOG.debug("Ignoring client socket close", cause);
return;
}
}
LOG.error("Shuffle error: ", cause);
if (ch.isConnected()) {
LOG.error("Shuffle error " + e);
sendError(ctx, INTERNAL_SERVER_ERROR);
}
}
}
}
| 41,709 | 37.872321 | 104 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/FadvisedChunkedFile.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.FileDescriptor;
import java.io.IOException;
import java.io.RandomAccessFile;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.io.ReadaheadPool;
import org.apache.hadoop.io.ReadaheadPool.ReadaheadRequest;
import org.apache.hadoop.io.nativeio.NativeIO;
import static org.apache.hadoop.io.nativeio.NativeIO.POSIX.POSIX_FADV_DONTNEED;
import org.jboss.netty.handler.stream.ChunkedFile;
public class FadvisedChunkedFile extends ChunkedFile {
private static final Log LOG = LogFactory.getLog(FadvisedChunkedFile.class);
private final boolean manageOsCache;
private final int readaheadLength;
private final ReadaheadPool readaheadPool;
private final FileDescriptor fd;
private final String identifier;
private ReadaheadRequest readaheadRequest;
public FadvisedChunkedFile(RandomAccessFile file, long position, long count,
int chunkSize, boolean manageOsCache, int readaheadLength,
ReadaheadPool readaheadPool, String identifier) throws IOException {
super(file, position, count, chunkSize);
this.manageOsCache = manageOsCache;
this.readaheadLength = readaheadLength;
this.readaheadPool = readaheadPool;
this.fd = file.getFD();
this.identifier = identifier;
}
@Override
public Object nextChunk() throws Exception {
if (manageOsCache && readaheadPool != null) {
readaheadRequest = readaheadPool
.readaheadStream(identifier, fd, getCurrentOffset(), readaheadLength,
getEndOffset(), readaheadRequest);
}
return super.nextChunk();
}
@Override
public void close() throws Exception {
if (readaheadRequest != null) {
readaheadRequest.cancel();
}
if (manageOsCache && getEndOffset() - getStartOffset() > 0) {
try {
NativeIO.POSIX.getCacheManipulator().posixFadviseIfPossible(identifier,
fd,
getStartOffset(), getEndOffset() - getStartOffset(),
POSIX_FADV_DONTNEED);
} catch (Throwable t) {
LOG.warn("Failed to manage OS cache for " + identifier, t);
}
}
super.close();
}
}
| 2,990 | 33.77907 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/FadvisedFileRegion.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.FileDescriptor;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.channels.WritableByteChannel;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.io.ReadaheadPool;
import org.apache.hadoop.io.ReadaheadPool.ReadaheadRequest;
import org.apache.hadoop.io.nativeio.NativeIO;
import static org.apache.hadoop.io.nativeio.NativeIO.POSIX.POSIX_FADV_DONTNEED;
import org.jboss.netty.channel.DefaultFileRegion;
import com.google.common.annotations.VisibleForTesting;
public class FadvisedFileRegion extends DefaultFileRegion {
private static final Log LOG = LogFactory.getLog(FadvisedFileRegion.class);
private final boolean manageOsCache;
private final int readaheadLength;
private final ReadaheadPool readaheadPool;
private final FileDescriptor fd;
private final String identifier;
private final long count;
private final long position;
private final int shuffleBufferSize;
private final boolean shuffleTransferToAllowed;
private final FileChannel fileChannel;
private ReadaheadRequest readaheadRequest;
public FadvisedFileRegion(RandomAccessFile file, long position, long count,
boolean manageOsCache, int readaheadLength, ReadaheadPool readaheadPool,
String identifier, int shuffleBufferSize,
boolean shuffleTransferToAllowed) throws IOException {
super(file.getChannel(), position, count);
this.manageOsCache = manageOsCache;
this.readaheadLength = readaheadLength;
this.readaheadPool = readaheadPool;
this.fd = file.getFD();
this.identifier = identifier;
this.fileChannel = file.getChannel();
this.count = count;
this.position = position;
this.shuffleBufferSize = shuffleBufferSize;
this.shuffleTransferToAllowed = shuffleTransferToAllowed;
}
@Override
public long transferTo(WritableByteChannel target, long position)
throws IOException {
if (manageOsCache && readaheadPool != null) {
readaheadRequest = readaheadPool.readaheadStream(identifier, fd,
getPosition() + position, readaheadLength,
getPosition() + getCount(), readaheadRequest);
}
if(this.shuffleTransferToAllowed) {
return super.transferTo(target, position);
} else {
return customShuffleTransfer(target, position);
}
}
/**
* This method transfers data using local buffer. It transfers data from
* a disk to a local buffer in memory, and then it transfers data from the
* buffer to the target. This is used only if transferTo is disallowed in
* the configuration file. super.TransferTo does not perform well on Windows
* due to a small IO request generated. customShuffleTransfer can control
* the size of the IO requests by changing the size of the intermediate
* buffer.
*/
@VisibleForTesting
long customShuffleTransfer(WritableByteChannel target, long position)
throws IOException {
long actualCount = this.count - position;
if (actualCount < 0 || position < 0) {
throw new IllegalArgumentException(
"position out of range: " + position +
" (expected: 0 - " + (this.count - 1) + ')');
}
if (actualCount == 0) {
return 0L;
}
long trans = actualCount;
int readSize;
ByteBuffer byteBuffer = ByteBuffer.allocate(this.shuffleBufferSize);
while(trans > 0L &&
(readSize = fileChannel.read(byteBuffer, this.position+position)) > 0) {
//adjust counters and buffer limit
if(readSize < trans) {
trans -= readSize;
position += readSize;
byteBuffer.flip();
} else {
//We can read more than we need if the actualCount is not multiple
//of the byteBuffer size and file is big enough. In that case we cannot
//use flip method but we need to set buffer limit manually to trans.
byteBuffer.limit((int)trans);
byteBuffer.position(0);
position += trans;
trans = 0;
}
//write data to the target
while(byteBuffer.hasRemaining()) {
target.write(byteBuffer);
}
byteBuffer.clear();
}
return actualCount - trans;
}
@Override
public void releaseExternalResources() {
if (readaheadRequest != null) {
readaheadRequest.cancel();
}
super.releaseExternalResources();
}
/**
* Call when the transfer completes successfully so we can advise the OS that
* we don't need the region to be cached anymore.
*/
public void transferSuccessful() {
if (manageOsCache && getCount() > 0) {
try {
NativeIO.POSIX.getCacheManipulator().posixFadviseIfPossible(identifier,
fd, getPosition(), getCount(), POSIX_FADV_DONTNEED);
} catch (Throwable t) {
LOG.warn("Failed to manage OS cache for " + identifier, t);
}
}
}
}
| 5,814 | 33.613095 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/TestTypeConverter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import org.apache.hadoop.util.StringUtils;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.JobStatus.State;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
import org.apache.hadoop.yarn.api.records.QueueState;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.util.Records;
import org.junit.Assert;
import org.junit.Test;
import org.mockito.Mockito;
public class TestTypeConverter {
@Test
public void testEnums() throws Exception {
for (YarnApplicationState applicationState : YarnApplicationState.values()) {
TypeConverter.fromYarn(applicationState, FinalApplicationStatus.FAILED);
}
// ad hoc test of NEW_SAVING, which is newly added
Assert.assertEquals(State.PREP, TypeConverter.fromYarn(
YarnApplicationState.NEW_SAVING, FinalApplicationStatus.FAILED));
for (TaskType taskType : TaskType.values()) {
TypeConverter.fromYarn(taskType);
}
for (JobState jobState : JobState.values()) {
TypeConverter.fromYarn(jobState);
}
for (QueueState queueState : QueueState.values()) {
TypeConverter.fromYarn(queueState);
}
for (TaskState taskState : TaskState.values()) {
TypeConverter.fromYarn(taskState);
}
}
@Test
public void testFromYarn() throws Exception {
int appStartTime = 612354;
int appFinishTime = 612355;
YarnApplicationState state = YarnApplicationState.RUNNING;
ApplicationId applicationId = ApplicationId.newInstance(0, 0);
ApplicationReport applicationReport = Records
.newRecord(ApplicationReport.class);
applicationReport.setApplicationId(applicationId);
applicationReport.setYarnApplicationState(state);
applicationReport.setStartTime(appStartTime);
applicationReport.setFinishTime(appFinishTime);
applicationReport.setUser("TestTypeConverter-user");
ApplicationResourceUsageReport appUsageRpt = Records
.newRecord(ApplicationResourceUsageReport.class);
Resource r = Records.newRecord(Resource.class);
r.setMemory(2048);
appUsageRpt.setNeededResources(r);
appUsageRpt.setNumReservedContainers(1);
appUsageRpt.setNumUsedContainers(3);
appUsageRpt.setReservedResources(r);
appUsageRpt.setUsedResources(r);
applicationReport.setApplicationResourceUsageReport(appUsageRpt);
JobStatus jobStatus = TypeConverter.fromYarn(applicationReport, "dummy-jobfile");
Assert.assertEquals(appStartTime, jobStatus.getStartTime());
Assert.assertEquals(appFinishTime, jobStatus.getFinishTime());
Assert.assertEquals(state.toString(), jobStatus.getState().toString());
}
@Test
public void testFromYarnApplicationReport() {
ApplicationId mockAppId = mock(ApplicationId.class);
when(mockAppId.getClusterTimestamp()).thenReturn(12345L);
when(mockAppId.getId()).thenReturn(6789);
ApplicationReport mockReport = mock(ApplicationReport.class);
when(mockReport.getTrackingUrl()).thenReturn("dummy-tracking-url");
when(mockReport.getApplicationId()).thenReturn(mockAppId);
when(mockReport.getYarnApplicationState()).thenReturn(YarnApplicationState.KILLED);
when(mockReport.getUser()).thenReturn("dummy-user");
when(mockReport.getQueue()).thenReturn("dummy-queue");
String jobFile = "dummy-path/job.xml";
try {
JobStatus status = TypeConverter.fromYarn(mockReport, jobFile);
} catch (NullPointerException npe) {
Assert.fail("Type converstion from YARN fails for jobs without " +
"ApplicationUsageReport");
}
ApplicationResourceUsageReport appUsageRpt = Records
.newRecord(ApplicationResourceUsageReport.class);
Resource r = Records.newRecord(Resource.class);
r.setMemory(2048);
appUsageRpt.setNeededResources(r);
appUsageRpt.setNumReservedContainers(1);
appUsageRpt.setNumUsedContainers(3);
appUsageRpt.setReservedResources(r);
appUsageRpt.setUsedResources(r);
when(mockReport.getApplicationResourceUsageReport()).thenReturn(appUsageRpt);
JobStatus status = TypeConverter.fromYarn(mockReport, jobFile);
Assert.assertNotNull("fromYarn returned null status", status);
Assert.assertEquals("jobFile set incorrectly", "dummy-path/job.xml", status.getJobFile());
Assert.assertEquals("queue set incorrectly", "dummy-queue", status.getQueue());
Assert.assertEquals("trackingUrl set incorrectly", "dummy-tracking-url", status.getTrackingUrl());
Assert.assertEquals("user set incorrectly", "dummy-user", status.getUsername());
Assert.assertEquals("schedulingInfo set incorrectly", "dummy-tracking-url", status.getSchedulingInfo());
Assert.assertEquals("jobId set incorrectly", 6789, status.getJobID().getId());
Assert.assertEquals("state set incorrectly", JobStatus.State.KILLED, status.getState());
Assert.assertEquals("needed mem info set incorrectly", 2048, status.getNeededMem());
Assert.assertEquals("num rsvd slots info set incorrectly", 1, status.getNumReservedSlots());
Assert.assertEquals("num used slots info set incorrectly", 3, status.getNumUsedSlots());
Assert.assertEquals("rsvd mem info set incorrectly", 2048, status.getReservedMem());
Assert.assertEquals("used mem info set incorrectly", 2048, status.getUsedMem());
}
@Test
public void testFromYarnQueueInfo() {
org.apache.hadoop.yarn.api.records.QueueInfo queueInfo = Records
.newRecord(org.apache.hadoop.yarn.api.records.QueueInfo.class);
queueInfo.setQueueState(org.apache.hadoop.yarn.api.records.QueueState.STOPPED);
org.apache.hadoop.mapreduce.QueueInfo returned =
TypeConverter.fromYarn(queueInfo, new Configuration());
Assert.assertEquals("queueInfo translation didn't work.",
returned.getState().toString(),
StringUtils.toLowerCase(queueInfo.getQueueState().toString()));
}
/**
* Test that child queues are converted too during conversion of the parent
* queue
*/
@Test
public void testFromYarnQueue() {
//Define child queue
org.apache.hadoop.yarn.api.records.QueueInfo child =
Mockito.mock(org.apache.hadoop.yarn.api.records.QueueInfo.class);
Mockito.when(child.getQueueState()).thenReturn(QueueState.RUNNING);
//Define parent queue
org.apache.hadoop.yarn.api.records.QueueInfo queueInfo =
Mockito.mock(org.apache.hadoop.yarn.api.records.QueueInfo.class);
List<org.apache.hadoop.yarn.api.records.QueueInfo> children =
new ArrayList<org.apache.hadoop.yarn.api.records.QueueInfo>();
children.add(child); //Add one child
Mockito.when(queueInfo.getChildQueues()).thenReturn(children);
Mockito.when(queueInfo.getQueueState()).thenReturn(QueueState.RUNNING);
//Call the function we're testing
org.apache.hadoop.mapreduce.QueueInfo returned =
TypeConverter.fromYarn(queueInfo, new Configuration());
//Verify that the converted queue has the 1 child we had added
Assert.assertEquals("QueueInfo children weren't properly converted",
returned.getQueueChildren().size(), 1);
}
@Test
public void testFromYarnJobReport() throws Exception {
int jobStartTime = 612354;
int jobFinishTime = 612355;
JobState state = JobState.RUNNING;
JobId jobId = Records.newRecord(JobId.class);
JobReport jobReport = Records.newRecord(JobReport.class);
ApplicationId applicationId = ApplicationId.newInstance(0, 0);
jobId.setAppId(applicationId);
jobId.setId(0);
jobReport.setJobId(jobId);
jobReport.setJobState(state);
jobReport.setStartTime(jobStartTime);
jobReport.setFinishTime(jobFinishTime);
jobReport.setUser("TestTypeConverter-user");
JobStatus jobStatus = TypeConverter.fromYarn(jobReport, "dummy-jobfile");
Assert.assertEquals(jobStartTime, jobStatus.getStartTime());
Assert.assertEquals(jobFinishTime, jobStatus.getFinishTime());
Assert.assertEquals(state.toString(), jobStatus.getState().toString());
}
}
| 9,510 | 44.075829 | 108 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/TestRecordFactory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2;
import org.junit.Assert;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factories.impl.pb.RecordFactoryPBImpl;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetCountersRequestPBImpl;
import org.apache.hadoop.mapreduce.v2.api.records.CounterGroup;
import org.apache.hadoop.mapreduce.v2.api.records.impl.pb.CounterGroupPBImpl;
import org.junit.Test;
public class TestRecordFactory {
@Test
public void testPbRecordFactory() {
RecordFactory pbRecordFactory = RecordFactoryPBImpl.get();
try {
CounterGroup response = pbRecordFactory.newRecordInstance(CounterGroup.class);
Assert.assertEquals(CounterGroupPBImpl.class, response.getClass());
} catch (YarnRuntimeException e) {
e.printStackTrace();
Assert.fail("Failed to crete record");
}
try {
GetCountersRequest response = pbRecordFactory.newRecordInstance(GetCountersRequest.class);
Assert.assertEquals(GetCountersRequestPBImpl.class, response.getClass());
} catch (YarnRuntimeException e) {
e.printStackTrace();
Assert.fail("Failed to crete record");
}
}
}
| 2,150 | 37.410714 | 96 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/TestRPCFactories.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2;
import java.io.IOException;
import java.net.InetSocketAddress;
import org.junit.Assert;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.CancelDelegationTokenRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.CancelDelegationTokenResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptCompletionEventsRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptCompletionEventsResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptReportRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptReportResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportsRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportsResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillJobRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillJobResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskAttemptRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskAttemptResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.RenewDelegationTokenRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.RenewDelegationTokenResponse;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.factories.impl.pb.RpcClientFactoryPBImpl;
import org.apache.hadoop.yarn.factories.impl.pb.RpcServerFactoryPBImpl;
import org.junit.Test;
public class TestRPCFactories {
@Test
public void test() {
testPbServerFactory();
testPbClientFactory();
}
private void testPbServerFactory() {
InetSocketAddress addr = new InetSocketAddress(0);
Configuration conf = new Configuration();
MRClientProtocol instance = new MRClientProtocolTestImpl();
Server server = null;
try {
server =
RpcServerFactoryPBImpl.get().getServer(
MRClientProtocol.class, instance, addr, conf, null, 1);
server.start();
} catch (YarnRuntimeException e) {
e.printStackTrace();
Assert.fail("Failed to crete server");
} finally {
server.stop();
}
}
private void testPbClientFactory() {
InetSocketAddress addr = new InetSocketAddress(0);
System.err.println(addr.getHostName() + addr.getPort());
Configuration conf = new Configuration();
MRClientProtocol instance = new MRClientProtocolTestImpl();
Server server = null;
try {
server =
RpcServerFactoryPBImpl.get().getServer(
MRClientProtocol.class, instance, addr, conf, null, 1);
server.start();
System.err.println(server.getListenerAddress());
System.err.println(NetUtils.getConnectAddress(server));
MRClientProtocol client = null;
try {
client = (MRClientProtocol) RpcClientFactoryPBImpl.get().getClient(MRClientProtocol.class, 1, NetUtils.getConnectAddress(server), conf);
} catch (YarnRuntimeException e) {
e.printStackTrace();
Assert.fail("Failed to crete client");
}
} catch (YarnRuntimeException e) {
e.printStackTrace();
Assert.fail("Failed to crete server");
} finally {
server.stop();
}
}
public class MRClientProtocolTestImpl implements MRClientProtocol {
@Override
public InetSocketAddress getConnectAddress() {
return null;
}
@Override
public GetJobReportResponse getJobReport(GetJobReportRequest request)
throws IOException {
return null;
}
@Override
public GetTaskReportResponse getTaskReport(GetTaskReportRequest request)
throws IOException {
return null;
}
@Override
public GetTaskAttemptReportResponse getTaskAttemptReport(
GetTaskAttemptReportRequest request) throws IOException {
return null;
}
@Override
public GetCountersResponse getCounters(GetCountersRequest request)
throws IOException {
return null;
}
@Override
public GetTaskAttemptCompletionEventsResponse getTaskAttemptCompletionEvents(
GetTaskAttemptCompletionEventsRequest request)
throws IOException {
return null;
}
@Override
public GetTaskReportsResponse getTaskReports(GetTaskReportsRequest request)
throws IOException {
return null;
}
@Override
public GetDiagnosticsResponse getDiagnostics(GetDiagnosticsRequest request)
throws IOException {
return null;
}
@Override
public KillJobResponse killJob(KillJobRequest request)
throws IOException {
return null;
}
@Override
public KillTaskResponse killTask(KillTaskRequest request)
throws IOException {
return null;
}
@Override
public KillTaskAttemptResponse killTaskAttempt(
KillTaskAttemptRequest request) throws IOException {
return null;
}
@Override
public FailTaskAttemptResponse failTaskAttempt(
FailTaskAttemptRequest request) throws IOException {
return null;
}
@Override
public GetDelegationTokenResponse getDelegationToken(
GetDelegationTokenRequest request) throws IOException {
return null;
}
@Override
public RenewDelegationTokenResponse renewDelegationToken(
RenewDelegationTokenRequest request) throws IOException {
return null;
}
@Override
public CancelDelegationTokenResponse cancelDelegationToken(
CancelDelegationTokenRequest request) throws IOException {
return null;
}
}
}
| 7,784 | 34.386364 | 144 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/jobhistory/TestJobHistoryUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.jobhistory;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.Calendar;
import java.util.Collections;
import java.util.List;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.junit.Assert;
import org.junit.Test;
public class TestJobHistoryUtils {
final static String TEST_DIR = new File(System.getProperty("test.build.data"))
.getAbsolutePath();
@Test
@SuppressWarnings("unchecked")
public void testGetHistoryDirsForCleaning() throws IOException {
Path pRoot = new Path(TEST_DIR, "org.apache.hadoop.mapreduce.v2.jobhistory."
+ "TestJobHistoryUtils.testGetHistoryDirsForCleaning");
FileContext fc = FileContext.getFileContext();
Calendar cCal = Calendar.getInstance();
int year = 2013;
int month = 7;
int day = 21;
cCal.set(year, month - 1, day, 1, 0);
long cutoff = cCal.getTimeInMillis();
clearDir(fc, pRoot);
Path pId00 = createPath(fc, pRoot, year, month, day, "000000");
Path pId01 = createPath(fc, pRoot, year, month, day+1, "000001");
Path pId02 = createPath(fc, pRoot, year, month, day-1, "000002");
Path pId03 = createPath(fc, pRoot, year, month+1, day, "000003");
Path pId04 = createPath(fc, pRoot, year, month+1, day+1, "000004");
Path pId05 = createPath(fc, pRoot, year, month+1, day-1, "000005");
Path pId06 = createPath(fc, pRoot, year, month-1, day, "000006");
Path pId07 = createPath(fc, pRoot, year, month-1, day+1, "000007");
Path pId08 = createPath(fc, pRoot, year, month-1, day-1, "000008");
Path pId09 = createPath(fc, pRoot, year+1, month, day, "000009");
Path pId10 = createPath(fc, pRoot, year+1, month, day+1, "000010");
Path pId11 = createPath(fc, pRoot, year+1, month, day-1, "000011");
Path pId12 = createPath(fc, pRoot, year+1, month+1, day, "000012");
Path pId13 = createPath(fc, pRoot, year+1, month+1, day+1, "000013");
Path pId14 = createPath(fc, pRoot, year+1, month+1, day-1, "000014");
Path pId15 = createPath(fc, pRoot, year+1, month-1, day, "000015");
Path pId16 = createPath(fc, pRoot, year+1, month-1, day+1, "000016");
Path pId17 = createPath(fc, pRoot, year+1, month-1, day-1, "000017");
Path pId18 = createPath(fc, pRoot, year-1, month, day, "000018");
Path pId19 = createPath(fc, pRoot, year-1, month, day+1, "000019");
Path pId20 = createPath(fc, pRoot, year-1, month, day-1, "000020");
Path pId21 = createPath(fc, pRoot, year-1, month+1, day, "000021");
Path pId22 = createPath(fc, pRoot, year-1, month+1, day+1, "000022");
Path pId23 = createPath(fc, pRoot, year-1, month+1, day-1, "000023");
Path pId24 = createPath(fc, pRoot, year-1, month-1, day, "000024");
Path pId25 = createPath(fc, pRoot, year-1, month-1, day+1, "000025");
Path pId26 = createPath(fc, pRoot, year-1, month-1, day-1, "000026");
// non-expected names should be ignored without problems
Path pId27 = createPath(fc, pRoot, "foo", "" + month, "" + day, "000027");
Path pId28 = createPath(fc, pRoot, "" + year, "foo", "" + day, "000028");
Path pId29 = createPath(fc, pRoot, "" + year, "" + month, "foo", "000029");
List<FileStatus> dirs = JobHistoryUtils
.getHistoryDirsForCleaning(fc, pRoot, cutoff);
Collections.sort(dirs);
Assert.assertEquals(14, dirs.size());
Assert.assertEquals(pId26.toUri().getPath(),
dirs.get(0).getPath().toUri().getPath());
Assert.assertEquals(pId24.toUri().getPath(),
dirs.get(1).getPath().toUri().getPath());
Assert.assertEquals(pId25.toUri().getPath(),
dirs.get(2).getPath().toUri().getPath());
Assert.assertEquals(pId20.toUri().getPath(),
dirs.get(3).getPath().toUri().getPath());
Assert.assertEquals(pId18.toUri().getPath(),
dirs.get(4).getPath().toUri().getPath());
Assert.assertEquals(pId19.toUri().getPath(),
dirs.get(5).getPath().toUri().getPath());
Assert.assertEquals(pId23.toUri().getPath(),
dirs.get(6).getPath().toUri().getPath());
Assert.assertEquals(pId21.toUri().getPath(),
dirs.get(7).getPath().toUri().getPath());
Assert.assertEquals(pId22.toUri().getPath(),
dirs.get(8).getPath().toUri().getPath());
Assert.assertEquals(pId08.toUri().getPath(),
dirs.get(9).getPath().toUri().getPath());
Assert.assertEquals(pId06.toUri().getPath(),
dirs.get(10).getPath().toUri().getPath());
Assert.assertEquals(pId07.toUri().getPath(),
dirs.get(11).getPath().toUri().getPath());
Assert.assertEquals(pId02.toUri().getPath(),
dirs.get(12).getPath().toUri().getPath());
Assert.assertEquals(pId00.toUri().getPath(),
dirs.get(13).getPath().toUri().getPath());
}
private void clearDir(FileContext fc, Path p) throws IOException {
try {
fc.delete(p, true);
} catch (FileNotFoundException e) {
// ignore
}
fc.mkdir(p, FsPermission.getDirDefault(), false);
}
private Path createPath(FileContext fc, Path root, int year, int month,
int day, String id) throws IOException {
Path path = new Path(root, year + Path.SEPARATOR + month + Path.SEPARATOR +
day + Path.SEPARATOR + id);
fc.mkdir(path, FsPermission.getDirDefault(), true);
return path;
}
private Path createPath(FileContext fc, Path root, String year, String month,
String day, String id) throws IOException {
Path path = new Path(root, year + Path.SEPARATOR + month + Path.SEPARATOR +
day + Path.SEPARATOR + id);
fc.mkdir(path, FsPermission.getDirDefault(), true);
return path;
}
}
| 6,632 | 45.0625 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/jobhistory/TestFileNameIndexUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.jobhistory;
import java.io.IOException;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.junit.Assert;
import org.junit.Test;
public class TestFileNameIndexUtils {
private static final String OLD_JOB_HISTORY_FILE_FORMATTER = "%s"
+ FileNameIndexUtils.DELIMITER + "%s"
+ FileNameIndexUtils.DELIMITER + "%s"
+ FileNameIndexUtils.DELIMITER + "%s"
+ FileNameIndexUtils.DELIMITER + "%s"
+ FileNameIndexUtils.DELIMITER + "%s"
+ FileNameIndexUtils.DELIMITER + "%s"
+ FileNameIndexUtils.DELIMITER + "%s"
+ JobHistoryUtils.JOB_HISTORY_FILE_EXTENSION;
private static final String OLD_FORMAT_BEFORE_ADD_START_TIME = "%s"
+ FileNameIndexUtils.DELIMITER + "%s"
+ FileNameIndexUtils.DELIMITER + "%s"
+ FileNameIndexUtils.DELIMITER + "%s"
+ FileNameIndexUtils.DELIMITER + "%s"
+ FileNameIndexUtils.DELIMITER + "%s"
+ FileNameIndexUtils.DELIMITER + "%s"
+ FileNameIndexUtils.DELIMITER + "%s"
+ FileNameIndexUtils.DELIMITER + "%s"
+ JobHistoryUtils.JOB_HISTORY_FILE_EXTENSION;
private static final String JOB_HISTORY_FILE_FORMATTER = "%s"
+ FileNameIndexUtils.DELIMITER + "%s"
+ FileNameIndexUtils.DELIMITER + "%s"
+ FileNameIndexUtils.DELIMITER + "%s"
+ FileNameIndexUtils.DELIMITER + "%s"
+ FileNameIndexUtils.DELIMITER + "%s"
+ FileNameIndexUtils.DELIMITER + "%s"
+ FileNameIndexUtils.DELIMITER + "%s"
+ FileNameIndexUtils.DELIMITER + "%s"
+ FileNameIndexUtils.DELIMITER + "%s"
+ JobHistoryUtils.JOB_HISTORY_FILE_EXTENSION;
private static final String JOB_ID = "job_1317928501754_0001";
private static final String SUBMIT_TIME = "1317928742025";
private static final String USER_NAME = "username";
private static final String USER_NAME_WITH_DELIMITER = "user"
+ FileNameIndexUtils.DELIMITER + "name";
private static final String USER_NAME_WITH_DELIMITER_ESCAPE = "user"
+ FileNameIndexUtils.DELIMITER_ESCAPE + "name";
private static final String JOB_NAME = "mapreduce";
private static final String JOB_NAME_WITH_DELIMITER = "map"
+ FileNameIndexUtils.DELIMITER + "reduce";
private static final String JOB_NAME_WITH_DELIMITER_ESCAPE = "map"
+ FileNameIndexUtils.DELIMITER_ESCAPE + "reduce";
private static final String FINISH_TIME = "1317928754958";
private static final String NUM_MAPS = "1";
private static final String NUM_REDUCES = "1";
private static final String JOB_STATUS = "SUCCEEDED";
private static final String QUEUE_NAME = "default";
private static final String QUEUE_NAME_WITH_DELIMITER = "test"
+ FileNameIndexUtils.DELIMITER + "queue";
private static final String QUEUE_NAME_WITH_DELIMITER_ESCAPE = "test"
+ FileNameIndexUtils.DELIMITER_ESCAPE + "queue";
private static final String JOB_START_TIME = "1317928742060";
@Test
public void testEncodingDecodingEquivalence() throws IOException {
JobIndexInfo info = new JobIndexInfo();
JobID oldJobId = JobID.forName(JOB_ID);
JobId jobId = TypeConverter.toYarn(oldJobId);
info.setJobId(jobId);
info.setSubmitTime(Long.parseLong(SUBMIT_TIME));
info.setUser(USER_NAME);
info.setJobName(JOB_NAME);
info.setFinishTime(Long.parseLong(FINISH_TIME));
info.setNumMaps(Integer.parseInt(NUM_MAPS));
info.setNumReduces(Integer.parseInt(NUM_REDUCES));
info.setJobStatus(JOB_STATUS);
info.setQueueName(QUEUE_NAME);
info.setJobStartTime(Long.parseLong(JOB_START_TIME));
String jobHistoryFile = FileNameIndexUtils.getDoneFileName(info);
JobIndexInfo parsedInfo = FileNameIndexUtils.getIndexInfo(jobHistoryFile);
Assert.assertEquals("Job id different after encoding and decoding",
info.getJobId(), parsedInfo.getJobId());
Assert.assertEquals("Submit time different after encoding and decoding",
info.getSubmitTime(), parsedInfo.getSubmitTime());
Assert.assertEquals("User different after encoding and decoding",
info.getUser(), parsedInfo.getUser());
Assert.assertEquals("Job name different after encoding and decoding",
info.getJobName(), parsedInfo.getJobName());
Assert.assertEquals("Finish time different after encoding and decoding",
info.getFinishTime(), parsedInfo.getFinishTime());
Assert.assertEquals("Num maps different after encoding and decoding",
info.getNumMaps(), parsedInfo.getNumMaps());
Assert.assertEquals("Num reduces different after encoding and decoding",
info.getNumReduces(), parsedInfo.getNumReduces());
Assert.assertEquals("Job status different after encoding and decoding",
info.getJobStatus(), parsedInfo.getJobStatus());
Assert.assertEquals("Queue name different after encoding and decoding",
info.getQueueName(), parsedInfo.getQueueName());
Assert.assertEquals("Job start time different after encoding and decoding",
info.getJobStartTime(), parsedInfo.getJobStartTime());
}
@Test
public void testUserNamePercentEncoding() throws IOException {
JobIndexInfo info = new JobIndexInfo();
JobID oldJobId = JobID.forName(JOB_ID);
JobId jobId = TypeConverter.toYarn(oldJobId);
info.setJobId(jobId);
info.setSubmitTime(Long.parseLong(SUBMIT_TIME));
info.setUser(USER_NAME_WITH_DELIMITER);
info.setJobName(JOB_NAME);
info.setFinishTime(Long.parseLong(FINISH_TIME));
info.setNumMaps(Integer.parseInt(NUM_MAPS));
info.setNumReduces(Integer.parseInt(NUM_REDUCES));
info.setJobStatus(JOB_STATUS);
info.setQueueName(QUEUE_NAME);
info.setJobStartTime(Long.parseLong(JOB_START_TIME));
String jobHistoryFile = FileNameIndexUtils.getDoneFileName(info);
Assert.assertTrue("User name not encoded correctly into job history file",
jobHistoryFile.contains(USER_NAME_WITH_DELIMITER_ESCAPE));
}
@Test
public void testTrimJobName() throws IOException {
int jobNameTrimLength = 5;
JobIndexInfo info = new JobIndexInfo();
JobID oldJobId = JobID.forName(JOB_ID);
JobId jobId = TypeConverter.toYarn(oldJobId);
info.setJobId(jobId);
info.setSubmitTime(Long.parseLong(SUBMIT_TIME));
info.setUser(USER_NAME);
info.setJobName(JOB_NAME);
info.setFinishTime(Long.parseLong(FINISH_TIME));
info.setNumMaps(Integer.parseInt(NUM_MAPS));
info.setNumReduces(Integer.parseInt(NUM_REDUCES));
info.setJobStatus(JOB_STATUS);
info.setQueueName(QUEUE_NAME);
info.setJobStartTime(Long.parseLong(JOB_START_TIME));
String jobHistoryFile =
FileNameIndexUtils.getDoneFileName(info, jobNameTrimLength);
JobIndexInfo parsedInfo = FileNameIndexUtils.getIndexInfo(jobHistoryFile);
Assert.assertEquals("Job name did not get trimmed correctly",
info.getJobName().substring(0, jobNameTrimLength),
parsedInfo.getJobName());
}
@Test
public void testUserNamePercentDecoding() throws IOException {
String jobHistoryFile = String.format(JOB_HISTORY_FILE_FORMATTER,
JOB_ID,
SUBMIT_TIME,
USER_NAME_WITH_DELIMITER_ESCAPE,
JOB_NAME,
FINISH_TIME,
NUM_MAPS,
NUM_REDUCES,
JOB_STATUS,
QUEUE_NAME,
JOB_START_TIME);
JobIndexInfo info = FileNameIndexUtils.getIndexInfo(jobHistoryFile);
Assert.assertEquals("User name doesn't match",
USER_NAME_WITH_DELIMITER, info.getUser());
}
@Test
public void testJobNamePercentEncoding() throws IOException {
JobIndexInfo info = new JobIndexInfo();
JobID oldJobId = JobID.forName(JOB_ID);
JobId jobId = TypeConverter.toYarn(oldJobId);
info.setJobId(jobId);
info.setSubmitTime(Long.parseLong(SUBMIT_TIME));
info.setUser(USER_NAME);
info.setJobName(JOB_NAME_WITH_DELIMITER);
info.setFinishTime(Long.parseLong(FINISH_TIME));
info.setNumMaps(Integer.parseInt(NUM_MAPS));
info.setNumReduces(Integer.parseInt(NUM_REDUCES));
info.setJobStatus(JOB_STATUS);
info.setQueueName(QUEUE_NAME);
info.setJobStartTime(Long.parseLong(JOB_START_TIME));
String jobHistoryFile = FileNameIndexUtils.getDoneFileName(info);
Assert.assertTrue("Job name not encoded correctly into job history file",
jobHistoryFile.contains(JOB_NAME_WITH_DELIMITER_ESCAPE));
}
@Test
public void testJobNamePercentDecoding() throws IOException {
String jobHistoryFile = String.format(JOB_HISTORY_FILE_FORMATTER,
JOB_ID,
SUBMIT_TIME,
USER_NAME,
JOB_NAME_WITH_DELIMITER_ESCAPE,
FINISH_TIME,
NUM_MAPS,
NUM_REDUCES,
JOB_STATUS,
QUEUE_NAME,
JOB_START_TIME );
JobIndexInfo info = FileNameIndexUtils.getIndexInfo(jobHistoryFile);
Assert.assertEquals("Job name doesn't match",
JOB_NAME_WITH_DELIMITER, info.getJobName());
}
@Test
public void testQueueNamePercentEncoding() throws IOException {
JobIndexInfo info = new JobIndexInfo();
JobID oldJobId = JobID.forName(JOB_ID);
JobId jobId = TypeConverter.toYarn(oldJobId);
info.setJobId(jobId);
info.setSubmitTime(Long.parseLong(SUBMIT_TIME));
info.setUser(USER_NAME);
info.setJobName(JOB_NAME);
info.setFinishTime(Long.parseLong(FINISH_TIME));
info.setNumMaps(Integer.parseInt(NUM_MAPS));
info.setNumReduces(Integer.parseInt(NUM_REDUCES));
info.setJobStatus(JOB_STATUS);
info.setQueueName(QUEUE_NAME_WITH_DELIMITER);
info.setJobStartTime(Long.parseLong(JOB_START_TIME));
String jobHistoryFile = FileNameIndexUtils.getDoneFileName(info);
Assert.assertTrue("Queue name not encoded correctly into job history file",
jobHistoryFile.contains(QUEUE_NAME_WITH_DELIMITER_ESCAPE));
}
@Test
public void testQueueNamePercentDecoding() throws IOException {
String jobHistoryFile = String.format(JOB_HISTORY_FILE_FORMATTER,
JOB_ID,
SUBMIT_TIME,
USER_NAME,
JOB_NAME,
FINISH_TIME,
NUM_MAPS,
NUM_REDUCES,
JOB_STATUS,
QUEUE_NAME_WITH_DELIMITER_ESCAPE,
JOB_START_TIME );
JobIndexInfo info = FileNameIndexUtils.getIndexInfo(jobHistoryFile);
Assert.assertEquals("Queue name doesn't match",
QUEUE_NAME_WITH_DELIMITER, info.getQueueName());
}
@Test
public void testJobStartTimeBackwardsCompatible() throws IOException{
String jobHistoryFile = String.format(OLD_FORMAT_BEFORE_ADD_START_TIME,
JOB_ID,
SUBMIT_TIME,
USER_NAME,
JOB_NAME_WITH_DELIMITER_ESCAPE,
FINISH_TIME,
NUM_MAPS,
NUM_REDUCES,
JOB_STATUS,
QUEUE_NAME );
JobIndexInfo info = FileNameIndexUtils.getIndexInfo(jobHistoryFile);
Assert.assertEquals(info.getJobStartTime(), info.getSubmitTime());
}
@Test
public void testJobHistoryFileNameBackwardsCompatible() throws IOException {
JobID oldJobId = JobID.forName(JOB_ID);
JobId jobId = TypeConverter.toYarn(oldJobId);
long submitTime = Long.parseLong(SUBMIT_TIME);
long finishTime = Long.parseLong(FINISH_TIME);
int numMaps = Integer.parseInt(NUM_MAPS);
int numReduces = Integer.parseInt(NUM_REDUCES);
String jobHistoryFile = String.format(OLD_JOB_HISTORY_FILE_FORMATTER,
JOB_ID,
SUBMIT_TIME,
USER_NAME,
JOB_NAME,
FINISH_TIME,
NUM_MAPS,
NUM_REDUCES,
JOB_STATUS);
JobIndexInfo info = FileNameIndexUtils.getIndexInfo(jobHistoryFile);
Assert.assertEquals("Job id incorrect after decoding old history file",
jobId, info.getJobId());
Assert.assertEquals("Submit time incorrect after decoding old history file",
submitTime, info.getSubmitTime());
Assert.assertEquals("User incorrect after decoding old history file",
USER_NAME, info.getUser());
Assert.assertEquals("Job name incorrect after decoding old history file",
JOB_NAME, info.getJobName());
Assert.assertEquals("Finish time incorrect after decoding old history file",
finishTime, info.getFinishTime());
Assert.assertEquals("Num maps incorrect after decoding old history file",
numMaps, info.getNumMaps());
Assert.assertEquals("Num reduces incorrect after decoding old history file",
numReduces, info.getNumReduces());
Assert.assertEquals("Job status incorrect after decoding old history file",
JOB_STATUS, info.getJobStatus());
Assert.assertNull("Queue name incorrect after decoding old history file",
info.getQueueName());
}
}
| 13,392 | 39.340361 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/api/records/TestIds.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.records;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.junit.Test;
public class TestIds {
@Test
public void testJobId() {
long ts1 = 1315890136000l;
long ts2 = 1315890136001l;
JobId j1 = createJobId(ts1, 2);
JobId j2 = createJobId(ts1, 1);
JobId j3 = createJobId(ts2, 1);
JobId j4 = createJobId(ts1, 2);
assertTrue(j1.equals(j4));
assertFalse(j1.equals(j2));
assertFalse(j1.equals(j3));
assertTrue(j1.compareTo(j4) == 0);
assertTrue(j1.compareTo(j2) > 0);
assertTrue(j1.compareTo(j3) < 0);
assertTrue(j1.hashCode() == j4.hashCode());
assertFalse(j1.hashCode() == j2.hashCode());
assertFalse(j1.hashCode() == j3.hashCode());
JobId j5 = createJobId(ts1, 231415);
assertEquals("job_" + ts1 + "_0002", j1.toString());
assertEquals("job_" + ts1 + "_231415", j5.toString());
}
@Test
public void testTaskId() {
long ts1 = 1315890136000l;
long ts2 = 1315890136001l;
TaskId t1 = createTaskId(ts1, 1, 2, TaskType.MAP);
TaskId t2 = createTaskId(ts1, 1, 2, TaskType.REDUCE);
TaskId t3 = createTaskId(ts1, 1, 1, TaskType.MAP);
TaskId t4 = createTaskId(ts1, 1, 2, TaskType.MAP);
TaskId t5 = createTaskId(ts2, 1, 1, TaskType.MAP);
assertTrue(t1.equals(t4));
assertFalse(t1.equals(t2));
assertFalse(t1.equals(t3));
assertFalse(t1.equals(t5));
assertTrue(t1.compareTo(t4) == 0);
assertTrue(t1.compareTo(t2) < 0);
assertTrue(t1.compareTo(t3) > 0);
assertTrue(t1.compareTo(t5) < 0);
assertTrue(t1.hashCode() == t4.hashCode());
assertFalse(t1.hashCode() == t2.hashCode());
assertFalse(t1.hashCode() == t3.hashCode());
assertFalse(t1.hashCode() == t5.hashCode());
TaskId t6 = createTaskId(ts1, 324151, 54643747, TaskType.REDUCE);
assertEquals("task_" + ts1 + "_0001_m_000002", t1.toString());
assertEquals("task_" + ts1 + "_324151_r_54643747", t6.toString());
}
@Test
public void testTaskAttemptId() {
long ts1 = 1315890136000l;
long ts2 = 1315890136001l;
TaskAttemptId t1 = createTaskAttemptId(ts1, 2, 2, TaskType.MAP, 2);
TaskAttemptId t2 = createTaskAttemptId(ts1, 2, 2, TaskType.REDUCE, 2);
TaskAttemptId t3 = createTaskAttemptId(ts1, 2, 2, TaskType.MAP, 3);
TaskAttemptId t4 = createTaskAttemptId(ts1, 2, 2, TaskType.MAP, 1);
TaskAttemptId t5 = createTaskAttemptId(ts1, 2, 1, TaskType.MAP, 3);
TaskAttemptId t6 = createTaskAttemptId(ts1, 2, 2, TaskType.MAP, 2);
assertTrue(t1.equals(t6));
assertFalse(t1.equals(t2));
assertFalse(t1.equals(t3));
assertFalse(t1.equals(t5));
assertTrue(t1.compareTo(t6) == 0);
assertTrue(t1.compareTo(t2) < 0);
assertTrue(t1.compareTo(t3) < 0);
assertTrue(t1.compareTo(t4) > 0);
assertTrue(t1.compareTo(t5) > 0);
assertTrue(t1.hashCode() == t6.hashCode());
assertFalse(t1.hashCode() == t2.hashCode());
assertFalse(t1.hashCode() == t3.hashCode());
assertFalse(t1.hashCode() == t5.hashCode());
TaskAttemptId t7 =
createTaskAttemptId(ts2, 5463346, 4326575, TaskType.REDUCE, 54375);
assertEquals("attempt_" + ts1 + "_0002_m_000002_2", t1.toString());
assertEquals("attempt_" + ts2 + "_5463346_r_4326575_54375", t7.toString());
}
private JobId createJobId(long clusterTimestamp, int idInt) {
return MRBuilderUtils.newJobId(
ApplicationId.newInstance(clusterTimestamp, idInt), idInt);
}
private TaskId createTaskId(long clusterTimestamp, int jobIdInt,
int taskIdInt, TaskType taskType) {
return MRBuilderUtils.newTaskId(createJobId(clusterTimestamp, jobIdInt),
taskIdInt, taskType);
}
private TaskAttemptId createTaskAttemptId(long clusterTimestamp,
int jobIdInt, int taskIdInt, TaskType taskType, int taskAttemptIdInt) {
return MRBuilderUtils.newTaskAttemptId(
createTaskId(clusterTimestamp, jobIdInt, taskIdInt, taskType),
taskAttemptIdInt);
}
}
| 4,993 | 34.928058 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.util;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.net.URI;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FilterFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.filecache.DistributedCache;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.util.ApplicationClassLoader;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.api.ApplicationConstants;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.api.ApplicationConstants;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.LocalResource;
import org.apache.hadoop.yarn.api.records.LocalResourceType;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
public class TestMRApps {
private static File testWorkDir = null;
@BeforeClass
public static void setupTestDirs() throws IOException {
testWorkDir = new File("target", TestMRApps.class.getCanonicalName());
delete(testWorkDir);
testWorkDir.mkdirs();
testWorkDir = testWorkDir.getAbsoluteFile();
}
@AfterClass
public static void cleanupTestDirs() throws IOException {
if (testWorkDir != null) {
delete(testWorkDir);
}
}
private static void delete(File dir) throws IOException {
Configuration conf = new Configuration();
FileSystem fs = FileSystem.getLocal(conf);
Path p = fs.makeQualified(new Path(dir.getAbsolutePath()));
fs.delete(p, true);
}
@Test (timeout = 120000)
public void testJobIDtoString() {
JobId jid = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(JobId.class);
jid.setAppId(ApplicationId.newInstance(0, 0));
assertEquals("job_0_0000", MRApps.toString(jid));
}
@Test (timeout = 120000)
public void testToJobID() {
JobId jid = MRApps.toJobID("job_1_1");
assertEquals(1, jid.getAppId().getClusterTimestamp());
assertEquals(1, jid.getAppId().getId());
assertEquals(1, jid.getId()); // tests against some proto.id and not a job.id field
}
@Test (timeout = 120000, expected=IllegalArgumentException.class)
public void testJobIDShort() {
MRApps.toJobID("job_0_0_0");
}
//TODO_get.set
@Test (timeout = 120000)
public void testTaskIDtoString() {
TaskId tid = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(TaskId.class);
tid.setJobId(RecordFactoryProvider.getRecordFactory(null).newRecordInstance(JobId.class));
tid.getJobId().setAppId(ApplicationId.newInstance(0, 0));
tid.setTaskType(TaskType.MAP);
TaskType type = tid.getTaskType();
System.err.println(type);
type = TaskType.REDUCE;
System.err.println(type);
System.err.println(tid.getTaskType());
assertEquals("task_0_0000_m_000000", MRApps.toString(tid));
tid.setTaskType(TaskType.REDUCE);
assertEquals("task_0_0000_r_000000", MRApps.toString(tid));
}
@Test (timeout = 120000)
public void testToTaskID() {
TaskId tid = MRApps.toTaskID("task_1_2_r_3");
assertEquals(1, tid.getJobId().getAppId().getClusterTimestamp());
assertEquals(2, tid.getJobId().getAppId().getId());
assertEquals(2, tid.getJobId().getId());
assertEquals(TaskType.REDUCE, tid.getTaskType());
assertEquals(3, tid.getId());
tid = MRApps.toTaskID("task_1_2_m_3");
assertEquals(TaskType.MAP, tid.getTaskType());
}
@Test(timeout = 120000, expected=IllegalArgumentException.class)
public void testTaskIDShort() {
MRApps.toTaskID("task_0_0000_m");
}
@Test(timeout = 120000, expected=IllegalArgumentException.class)
public void testTaskIDBadType() {
MRApps.toTaskID("task_0_0000_x_000000");
}
//TODO_get.set
@Test (timeout = 120000)
public void testTaskAttemptIDtoString() {
TaskAttemptId taid = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(TaskAttemptId.class);
taid.setTaskId(RecordFactoryProvider.getRecordFactory(null).newRecordInstance(TaskId.class));
taid.getTaskId().setTaskType(TaskType.MAP);
taid.getTaskId().setJobId(RecordFactoryProvider.getRecordFactory(null).newRecordInstance(JobId.class));
taid.getTaskId().getJobId().setAppId(ApplicationId.newInstance(0, 0));
assertEquals("attempt_0_0000_m_000000_0", MRApps.toString(taid));
}
@Test (timeout = 120000)
public void testToTaskAttemptID() {
TaskAttemptId taid = MRApps.toTaskAttemptID("attempt_0_1_m_2_3");
assertEquals(0, taid.getTaskId().getJobId().getAppId().getClusterTimestamp());
assertEquals(1, taid.getTaskId().getJobId().getAppId().getId());
assertEquals(1, taid.getTaskId().getJobId().getId());
assertEquals(2, taid.getTaskId().getId());
assertEquals(3, taid.getId());
}
@Test(timeout = 120000, expected=IllegalArgumentException.class)
public void testTaskAttemptIDShort() {
MRApps.toTaskAttemptID("attempt_0_0_0_m_0");
}
@Test (timeout = 120000)
public void testGetJobFileWithUser() {
Configuration conf = new Configuration();
conf.set(MRJobConfig.MR_AM_STAGING_DIR, "/my/path/to/staging");
String jobFile = MRApps.getJobFile(conf, "dummy-user",
new JobID("dummy-job", 12345));
assertNotNull("getJobFile results in null.", jobFile);
assertEquals("jobFile with specified user is not as expected.",
"/my/path/to/staging/dummy-user/.staging/job_dummy-job_12345/job.xml", jobFile);
}
@Test (timeout = 120000)
public void testSetClasspath() throws IOException {
Configuration conf = new Configuration();
conf.setBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM, true);
Job job = Job.getInstance(conf);
Map<String, String> environment = new HashMap<String, String>();
MRApps.setClasspath(environment, job.getConfiguration());
assertTrue(environment.get("CLASSPATH").startsWith(
ApplicationConstants.Environment.PWD.$$()
+ ApplicationConstants.CLASS_PATH_SEPARATOR));
String yarnAppClasspath = job.getConfiguration().get(
YarnConfiguration.YARN_APPLICATION_CLASSPATH,
StringUtils.join(",",
YarnConfiguration.DEFAULT_YARN_CROSS_PLATFORM_APPLICATION_CLASSPATH));
if (yarnAppClasspath != null) {
yarnAppClasspath =
yarnAppClasspath.replaceAll(",\\s*",
ApplicationConstants.CLASS_PATH_SEPARATOR).trim();
}
assertTrue(environment.get("CLASSPATH").contains(yarnAppClasspath));
String mrAppClasspath =
job.getConfiguration().get(
MRJobConfig.MAPREDUCE_APPLICATION_CLASSPATH,
MRJobConfig.DEFAULT_MAPREDUCE_CROSS_PLATFORM_APPLICATION_CLASSPATH);
if (mrAppClasspath != null) {
mrAppClasspath =
mrAppClasspath.replaceAll(",\\s*",
ApplicationConstants.CLASS_PATH_SEPARATOR).trim();
}
assertTrue(environment.get("CLASSPATH").contains(mrAppClasspath));
}
@Test (timeout = 120000)
public void testSetClasspathWithArchives () throws IOException {
File testTGZ = new File(testWorkDir, "test.tgz");
FileOutputStream out = new FileOutputStream(testTGZ);
out.write(0);
out.close();
Configuration conf = new Configuration();
conf.setBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM, true);
Job job = Job.getInstance(conf);
conf = job.getConfiguration();
String testTGZQualifiedPath = FileSystem.getLocal(conf).makeQualified(new Path(
testTGZ.getAbsolutePath())).toString();
conf.set(MRJobConfig.CLASSPATH_ARCHIVES, testTGZQualifiedPath);
conf.set(MRJobConfig.CACHE_ARCHIVES, testTGZQualifiedPath + "#testTGZ");
Map<String, String> environment = new HashMap<String, String>();
MRApps.setClasspath(environment, conf);
assertTrue(environment.get("CLASSPATH").startsWith(
ApplicationConstants.Environment.PWD.$$() + ApplicationConstants.CLASS_PATH_SEPARATOR));
String confClasspath = job.getConfiguration().get(
YarnConfiguration.YARN_APPLICATION_CLASSPATH,
StringUtils.join(",",
YarnConfiguration.DEFAULT_YARN_CROSS_PLATFORM_APPLICATION_CLASSPATH));
if (confClasspath != null) {
confClasspath = confClasspath.replaceAll(",\\s*", ApplicationConstants.CLASS_PATH_SEPARATOR)
.trim();
}
assertTrue(environment.get("CLASSPATH").contains(confClasspath));
assertTrue(environment.get("CLASSPATH").contains("testTGZ"));
}
@Test (timeout = 120000)
public void testSetClasspathWithUserPrecendence() {
Configuration conf = new Configuration();
conf.setBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM, true);
conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_USER_CLASSPATH_FIRST, true);
Map<String, String> env = new HashMap<String, String>();
try {
MRApps.setClasspath(env, conf);
} catch (Exception e) {
fail("Got exception while setting classpath");
}
String env_str = env.get("CLASSPATH");
String expectedClasspath = StringUtils.join(ApplicationConstants.CLASS_PATH_SEPARATOR,
Arrays.asList(ApplicationConstants.Environment.PWD.$$(), "job.jar/job.jar",
"job.jar/classes/", "job.jar/lib/*",
ApplicationConstants.Environment.PWD.$$() + "/*"));
assertTrue("MAPREDUCE_JOB_USER_CLASSPATH_FIRST set, but not taking effect!",
env_str.startsWith(expectedClasspath));
}
@Test (timeout = 120000)
public void testSetClasspathWithNoUserPrecendence() {
Configuration conf = new Configuration();
conf.setBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM, true);
conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_USER_CLASSPATH_FIRST, false);
Map<String, String> env = new HashMap<String, String>();
try {
MRApps.setClasspath(env, conf);
} catch (Exception e) {
fail("Got exception while setting classpath");
}
String env_str = env.get("CLASSPATH");
String expectedClasspath = StringUtils.join(ApplicationConstants.CLASS_PATH_SEPARATOR,
Arrays.asList("job.jar/job.jar", "job.jar/classes/", "job.jar/lib/*",
ApplicationConstants.Environment.PWD.$$() + "/*"));
assertTrue("MAPREDUCE_JOB_USER_CLASSPATH_FIRST false, and job.jar is not in"
+ " the classpath!", env_str.contains(expectedClasspath));
assertFalse("MAPREDUCE_JOB_USER_CLASSPATH_FIRST false, but taking effect!",
env_str.startsWith(expectedClasspath));
}
@Test (timeout = 120000)
public void testSetClasspathWithJobClassloader() throws IOException {
Configuration conf = new Configuration();
conf.setBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM, true);
conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_CLASSLOADER, true);
Map<String, String> env = new HashMap<String, String>();
MRApps.setClasspath(env, conf);
String cp = env.get("CLASSPATH");
String appCp = env.get("APP_CLASSPATH");
assertFalse("MAPREDUCE_JOB_CLASSLOADER true, but job.jar is in the"
+ " classpath!", cp.contains("jar" + ApplicationConstants.CLASS_PATH_SEPARATOR + "job"));
assertFalse("MAPREDUCE_JOB_CLASSLOADER true, but PWD is in the classpath!",
cp.contains("PWD"));
String expectedAppClasspath = StringUtils.join(ApplicationConstants.CLASS_PATH_SEPARATOR,
Arrays.asList(ApplicationConstants.Environment.PWD.$$(), "job.jar/job.jar",
"job.jar/classes/", "job.jar/lib/*",
ApplicationConstants.Environment.PWD.$$() + "/*"));
assertEquals("MAPREDUCE_JOB_CLASSLOADER true, but job.jar is not in the app"
+ " classpath!", expectedAppClasspath, appCp);
}
@Test (timeout = 3000000)
public void testSetClasspathWithFramework() throws IOException {
final String FRAMEWORK_NAME = "some-framework-name";
final String FRAMEWORK_PATH = "some-framework-path#" + FRAMEWORK_NAME;
Configuration conf = new Configuration();
conf.setBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM, true);
conf.set(MRJobConfig.MAPREDUCE_APPLICATION_FRAMEWORK_PATH, FRAMEWORK_PATH);
Map<String, String> env = new HashMap<String, String>();
try {
MRApps.setClasspath(env, conf);
fail("Failed to catch framework path set without classpath change");
} catch (IllegalArgumentException e) {
assertTrue("Unexpected IllegalArgumentException",
e.getMessage().contains("Could not locate MapReduce framework name '"
+ FRAMEWORK_NAME + "'"));
}
env.clear();
final String FRAMEWORK_CLASSPATH = FRAMEWORK_NAME + "/*.jar";
conf.set(MRJobConfig.MAPREDUCE_APPLICATION_CLASSPATH, FRAMEWORK_CLASSPATH);
MRApps.setClasspath(env, conf);
final String stdClasspath = StringUtils.join(ApplicationConstants.CLASS_PATH_SEPARATOR,
Arrays.asList("job.jar/job.jar", "job.jar/classes/", "job.jar/lib/*",
ApplicationConstants.Environment.PWD.$$() + "/*"));
String expectedClasspath = StringUtils.join(ApplicationConstants.CLASS_PATH_SEPARATOR,
Arrays.asList(ApplicationConstants.Environment.PWD.$$(),
FRAMEWORK_CLASSPATH, stdClasspath));
assertEquals("Incorrect classpath with framework and no user precedence",
expectedClasspath, env.get("CLASSPATH"));
env.clear();
conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_USER_CLASSPATH_FIRST, true);
MRApps.setClasspath(env, conf);
expectedClasspath = StringUtils.join(ApplicationConstants.CLASS_PATH_SEPARATOR,
Arrays.asList(ApplicationConstants.Environment.PWD.$$(),
stdClasspath, FRAMEWORK_CLASSPATH));
assertEquals("Incorrect classpath with framework and user precedence",
expectedClasspath, env.get("CLASSPATH"));
}
@Test (timeout = 30000)
public void testSetupDistributedCacheEmpty() throws IOException {
Configuration conf = new Configuration();
Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
MRApps.setupDistributedCache(conf, localResources);
assertTrue("Empty Config did not produce an empty list of resources",
localResources.isEmpty());
}
@SuppressWarnings("deprecation")
public void testSetupDistributedCacheConflicts() throws Exception {
Configuration conf = new Configuration();
conf.setClass("fs.mockfs.impl", MockFileSystem.class, FileSystem.class);
URI mockUri = URI.create("mockfs://mock/");
FileSystem mockFs = ((FilterFileSystem)FileSystem.get(mockUri, conf))
.getRawFileSystem();
URI archive = new URI("mockfs://mock/tmp/something.zip#something");
Path archivePath = new Path(archive);
URI file = new URI("mockfs://mock/tmp/something.txt#something");
Path filePath = new Path(file);
when(mockFs.resolvePath(archivePath)).thenReturn(archivePath);
when(mockFs.resolvePath(filePath)).thenReturn(filePath);
DistributedCache.addCacheArchive(archive, conf);
conf.set(MRJobConfig.CACHE_ARCHIVES_TIMESTAMPS, "10");
conf.set(MRJobConfig.CACHE_ARCHIVES_SIZES, "10");
conf.set(MRJobConfig.CACHE_ARCHIVES_VISIBILITIES, "true");
DistributedCache.addCacheFile(file, conf);
conf.set(MRJobConfig.CACHE_FILE_TIMESTAMPS, "11");
conf.set(MRJobConfig.CACHE_FILES_SIZES, "11");
conf.set(MRJobConfig.CACHE_FILE_VISIBILITIES, "true");
Map<String, LocalResource> localResources =
new HashMap<String, LocalResource>();
MRApps.setupDistributedCache(conf, localResources);
assertEquals(1, localResources.size());
LocalResource lr = localResources.get("something");
//Archive wins
assertNotNull(lr);
assertEquals(10l, lr.getSize());
assertEquals(10l, lr.getTimestamp());
assertEquals(LocalResourceType.ARCHIVE, lr.getType());
}
@SuppressWarnings("deprecation")
public void testSetupDistributedCacheConflictsFiles() throws Exception {
Configuration conf = new Configuration();
conf.setClass("fs.mockfs.impl", MockFileSystem.class, FileSystem.class);
URI mockUri = URI.create("mockfs://mock/");
FileSystem mockFs = ((FilterFileSystem)FileSystem.get(mockUri, conf))
.getRawFileSystem();
URI file = new URI("mockfs://mock/tmp/something.zip#something");
Path filePath = new Path(file);
URI file2 = new URI("mockfs://mock/tmp/something.txt#something");
Path file2Path = new Path(file2);
when(mockFs.resolvePath(filePath)).thenReturn(filePath);
when(mockFs.resolvePath(file2Path)).thenReturn(file2Path);
DistributedCache.addCacheFile(file, conf);
DistributedCache.addCacheFile(file2, conf);
conf.set(MRJobConfig.CACHE_FILE_TIMESTAMPS, "10,11");
conf.set(MRJobConfig.CACHE_FILES_SIZES, "10,11");
conf.set(MRJobConfig.CACHE_FILE_VISIBILITIES, "true,true");
Map<String, LocalResource> localResources =
new HashMap<String, LocalResource>();
MRApps.setupDistributedCache(conf, localResources);
assertEquals(1, localResources.size());
LocalResource lr = localResources.get("something");
//First one wins
assertNotNull(lr);
assertEquals(10l, lr.getSize());
assertEquals(10l, lr.getTimestamp());
assertEquals(LocalResourceType.FILE, lr.getType());
}
@SuppressWarnings("deprecation")
@Test (timeout = 30000)
public void testSetupDistributedCache() throws Exception {
Configuration conf = new Configuration();
conf.setClass("fs.mockfs.impl", MockFileSystem.class, FileSystem.class);
URI mockUri = URI.create("mockfs://mock/");
FileSystem mockFs = ((FilterFileSystem)FileSystem.get(mockUri, conf))
.getRawFileSystem();
URI archive = new URI("mockfs://mock/tmp/something.zip");
Path archivePath = new Path(archive);
URI file = new URI("mockfs://mock/tmp/something.txt#something");
Path filePath = new Path(file);
when(mockFs.resolvePath(archivePath)).thenReturn(archivePath);
when(mockFs.resolvePath(filePath)).thenReturn(filePath);
DistributedCache.addCacheArchive(archive, conf);
conf.set(MRJobConfig.CACHE_ARCHIVES_TIMESTAMPS, "10");
conf.set(MRJobConfig.CACHE_ARCHIVES_SIZES, "10");
conf.set(MRJobConfig.CACHE_ARCHIVES_VISIBILITIES, "true");
DistributedCache.addCacheFile(file, conf);
conf.set(MRJobConfig.CACHE_FILE_TIMESTAMPS, "11");
conf.set(MRJobConfig.CACHE_FILES_SIZES, "11");
conf.set(MRJobConfig.CACHE_FILE_VISIBILITIES, "true");
Map<String, LocalResource> localResources =
new HashMap<String, LocalResource>();
MRApps.setupDistributedCache(conf, localResources);
assertEquals(2, localResources.size());
LocalResource lr = localResources.get("something.zip");
assertNotNull(lr);
assertEquals(10l, lr.getSize());
assertEquals(10l, lr.getTimestamp());
assertEquals(LocalResourceType.ARCHIVE, lr.getType());
lr = localResources.get("something");
assertNotNull(lr);
assertEquals(11l, lr.getSize());
assertEquals(11l, lr.getTimestamp());
assertEquals(LocalResourceType.FILE, lr.getType());
}
static class MockFileSystem extends FilterFileSystem {
MockFileSystem() {
super(mock(FileSystem.class));
}
public void initialize(URI name, Configuration conf) throws IOException {}
}
@Test
public void testLogSystemProperties() throws Exception {
Configuration conf = new Configuration();
// test no logging
conf.set(MRJobConfig.MAPREDUCE_JVM_SYSTEM_PROPERTIES_TO_LOG, " ");
String value = MRApps.getSystemPropertiesToLog(conf);
assertNull(value);
// test logging of selected keys
String classpath = "java.class.path";
String os = "os.name";
String version = "java.version";
conf.set(MRJobConfig.MAPREDUCE_JVM_SYSTEM_PROPERTIES_TO_LOG, classpath + ", " + os);
value = MRApps.getSystemPropertiesToLog(conf);
assertNotNull(value);
assertTrue(value.contains(classpath));
assertTrue(value.contains(os));
assertFalse(value.contains(version));
}
@Test
public void testTaskStateUI() {
assertTrue(MRApps.TaskStateUI.PENDING.correspondsTo(TaskState.SCHEDULED));
assertTrue(MRApps.TaskStateUI.COMPLETED.correspondsTo(TaskState.SUCCEEDED));
assertTrue(MRApps.TaskStateUI.COMPLETED.correspondsTo(TaskState.FAILED));
assertTrue(MRApps.TaskStateUI.COMPLETED.correspondsTo(TaskState.KILLED));
assertTrue(MRApps.TaskStateUI.RUNNING.correspondsTo(TaskState.RUNNING));
}
private static final String[] SYS_CLASSES = new String[] {
"/java/fake/Klass",
"/javax/management/fake/Klass",
"/org/apache/commons/logging/fake/Klass",
"/org/apache/log4j/fake/Klass",
"/org/apache/hadoop/fake/Klass"
};
private static final String[] DEFAULT_XMLS = new String[] {
"core-default.xml",
"mapred-default.xml",
"hdfs-default.xml",
"yarn-default.xml"
};
@Test
public void testSystemClasses() {
final List<String> systemClasses =
Arrays.asList(StringUtils.getTrimmedStrings(
ApplicationClassLoader.SYSTEM_CLASSES_DEFAULT));
for (String defaultXml : DEFAULT_XMLS) {
assertTrue(defaultXml + " must be system resource",
ApplicationClassLoader.isSystemClass(defaultXml, systemClasses));
}
for (String klass : SYS_CLASSES) {
assertTrue(klass + " must be system class",
ApplicationClassLoader.isSystemClass(klass, systemClasses));
}
assertFalse("/fake/Klass must not be a system class",
ApplicationClassLoader.isSystemClass("/fake/Klass", systemClasses));
}
}
| 23,226 | 41.540293 | 109 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapred/TestJobClientGetJob.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import static org.junit.Assert.assertNotNull;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.junit.Test;
public class TestJobClientGetJob {
private static Path TEST_ROOT_DIR =
new Path(System.getProperty("test.build.data","/tmp"));
private Path createTempFile(String filename, String contents)
throws IOException {
Path path = new Path(TEST_ROOT_DIR, filename);
Configuration conf = new Configuration();
FSDataOutputStream os = FileSystem.getLocal(conf).create(path);
os.writeBytes(contents);
os.close();
return path;
}
@SuppressWarnings("deprecation")
@Test
public void testGetRunningJobFromJobClient() throws Exception {
JobConf conf = new JobConf();
conf.set("mapreduce.framework.name", "local");
FileInputFormat.addInputPath(conf, createTempFile("in", "hello"));
Path outputDir = new Path(TEST_ROOT_DIR, getClass().getSimpleName());
outputDir.getFileSystem(conf).delete(outputDir, true);
FileOutputFormat.setOutputPath(conf, outputDir);
JobClient jc = new JobClient(conf);
RunningJob runningJob = jc.submitJob(conf);
assertNotNull("Running job", runningJob);
// Check that the running job can be retrieved by ID
RunningJob newRunningJob = jc.getJob(runningJob.getID());
assertNotNull("New running job", newRunningJob);
}
}
| 2,338 | 35.546875 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapred/TestMRWithDistributedCache.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.Arrays;
import java.util.jar.JarOutputStream;
import java.util.zip.ZipEntry;
import org.junit.Assert;
import junit.framework.TestCase;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.filecache.DistributedCache;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.TaskInputOutputContext;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat;
import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
import org.junit.Test;
/**
* Tests the use of the
* {@link org.apache.hadoop.mapreduce.filecache.DistributedCache} within the
* full MR flow as well as the LocalJobRunner. This ought to be part of the
* filecache package, but that package is not currently in mapred, so cannot
* depend on MR for testing.
*
* We use the distributed.* namespace for temporary files.
*
* See {@link TestMiniMRLocalFS}, {@link TestMiniMRDFSCaching}, and
* {@link MRCaching} for other tests that test the distributed cache.
*
* This test is not fast: it uses MiniMRCluster.
*/
@SuppressWarnings("deprecation")
public class TestMRWithDistributedCache extends TestCase {
private static Path TEST_ROOT_DIR =
new Path(System.getProperty("test.build.data","/tmp"));
private static File symlinkFile = new File("distributed.first.symlink");
private static File expectedAbsentSymlinkFile =
new File("distributed.second.jar");
private static Configuration conf = new Configuration();
private static FileSystem localFs;
static {
try {
localFs = FileSystem.getLocal(conf);
} catch (IOException io) {
throw new RuntimeException("problem getting local fs", io);
}
}
private static final Log LOG =
LogFactory.getLog(TestMRWithDistributedCache.class);
private static class DistributedCacheChecker {
public void setup(TaskInputOutputContext<?, ?, ?, ?> context)
throws IOException {
Configuration conf = context.getConfiguration();
Path[] localFiles = context.getLocalCacheFiles();
URI[] files = context.getCacheFiles();
Path[] localArchives = context.getLocalCacheArchives();
URI[] archives = context.getCacheArchives();
FileSystem fs = LocalFileSystem.get(conf);
// Check that 2 files and 2 archives are present
TestCase.assertEquals(2, localFiles.length);
TestCase.assertEquals(2, localArchives.length);
TestCase.assertEquals(2, files.length);
TestCase.assertEquals(2, archives.length);
// Check the file name
TestCase.assertTrue(files[0].getPath().endsWith("distributed.first"));
TestCase.assertTrue(files[1].getPath().endsWith("distributed.second.jar"));
// Check lengths of the files
TestCase.assertEquals(1, fs.getFileStatus(localFiles[0]).getLen());
TestCase.assertTrue(fs.getFileStatus(localFiles[1]).getLen() > 1);
// Check extraction of the archive
TestCase.assertTrue(fs.exists(new Path(localArchives[0],
"distributed.jar.inside3")));
TestCase.assertTrue(fs.exists(new Path(localArchives[1],
"distributed.jar.inside4")));
// Check the class loaders
LOG.info("Java Classpath: " + System.getProperty("java.class.path"));
ClassLoader cl = Thread.currentThread().getContextClassLoader();
// Both the file and the archive were added to classpath, so both
// should be reachable via the class loader.
TestCase.assertNotNull(cl.getResource("distributed.jar.inside2"));
TestCase.assertNotNull(cl.getResource("distributed.jar.inside3"));
TestCase.assertNull(cl.getResource("distributed.jar.inside4"));
// Check that the symlink for the renaming was created in the cwd;
TestCase.assertTrue("symlink distributed.first.symlink doesn't exist",
symlinkFile.exists());
TestCase.assertEquals("symlink distributed.first.symlink length not 1", 1,
symlinkFile.length());
//This last one is a difference between MRv2 and MRv1
TestCase.assertTrue("second file should be symlinked too",
expectedAbsentSymlinkFile.exists());
}
}
public static class DistributedCacheCheckerMapper extends
Mapper<LongWritable, Text, NullWritable, NullWritable> {
@Override
protected void setup(Context context) throws IOException,
InterruptedException {
new DistributedCacheChecker().setup(context);
}
}
public static class DistributedCacheCheckerReducer extends
Reducer<LongWritable, Text, NullWritable, NullWritable> {
@Override
public void setup(Context context) throws IOException {
new DistributedCacheChecker().setup(context);
}
}
private void testWithConf(Configuration conf) throws IOException,
InterruptedException, ClassNotFoundException, URISyntaxException {
// Create a temporary file of length 1.
Path first = createTempFile("distributed.first", "x");
// Create two jars with a single file inside them.
Path second =
makeJar(new Path(TEST_ROOT_DIR, "distributed.second.jar"), 2);
Path third =
makeJar(new Path(TEST_ROOT_DIR, "distributed.third.jar"), 3);
Path fourth =
makeJar(new Path(TEST_ROOT_DIR, "distributed.fourth.jar"), 4);
Job job = Job.getInstance(conf);
job.setMapperClass(DistributedCacheCheckerMapper.class);
job.setReducerClass(DistributedCacheCheckerReducer.class);
job.setOutputFormatClass(NullOutputFormat.class);
FileInputFormat.setInputPaths(job, first);
// Creates the Job Configuration
job.addCacheFile(
new URI(first.toUri().toString() + "#distributed.first.symlink"));
job.addFileToClassPath(second);
job.addArchiveToClassPath(third);
job.addCacheArchive(fourth.toUri());
job.setMaxMapAttempts(1); // speed up failures
job.submit();
assertTrue(job.waitForCompletion(false));
}
/** Tests using the local job runner. */
public void testLocalJobRunner() throws Exception {
symlinkFile.delete(); // ensure symlink is not present (e.g. if test is
// killed part way through)
Configuration c = new Configuration();
c.set(JTConfig.JT_IPC_ADDRESS, "local");
c.set("fs.defaultFS", "file:///");
testWithConf(c);
assertFalse("Symlink not removed by local job runner",
// Symlink target will have gone so can't use File.exists()
Arrays.asList(new File(".").list()).contains(symlinkFile.getName()));
}
private Path createTempFile(String filename, String contents)
throws IOException {
Path path = new Path(TEST_ROOT_DIR, filename);
FSDataOutputStream os = localFs.create(path);
os.writeBytes(contents);
os.close();
return path;
}
private Path makeJar(Path p, int index) throws FileNotFoundException,
IOException {
FileOutputStream fos = new FileOutputStream(new File(p.toString()));
JarOutputStream jos = new JarOutputStream(fos);
ZipEntry ze = new ZipEntry("distributed.jar.inside" + index);
jos.putNextEntry(ze);
jos.write(("inside the jar!" + index).getBytes());
jos.closeEntry();
jos.close();
return p;
}
@Test (timeout = 1000)
public void testDeprecatedFunctions() throws Exception {
DistributedCache.addLocalArchives(conf, "Test Local Archives 1");
Assert.assertEquals("Test Local Archives 1",
conf.get(DistributedCache.CACHE_LOCALARCHIVES));
Assert.assertEquals(1,
DistributedCache.getLocalCacheArchives(conf).length);
Assert.assertEquals("Test Local Archives 1",
DistributedCache.getLocalCacheArchives(conf)[0].getName());
DistributedCache.addLocalArchives(conf, "Test Local Archives 2");
Assert.assertEquals("Test Local Archives 1,Test Local Archives 2",
conf.get(DistributedCache.CACHE_LOCALARCHIVES));
Assert.assertEquals(2,
DistributedCache.getLocalCacheArchives(conf).length);
Assert.assertEquals("Test Local Archives 2",
DistributedCache.getLocalCacheArchives(conf)[1].getName());
DistributedCache.setLocalArchives(conf, "Test Local Archives 3");
Assert.assertEquals("Test Local Archives 3",
conf.get(DistributedCache.CACHE_LOCALARCHIVES));
Assert.assertEquals(1,
DistributedCache.getLocalCacheArchives(conf).length);
Assert.assertEquals("Test Local Archives 3",
DistributedCache.getLocalCacheArchives(conf)[0].getName());
DistributedCache.addLocalFiles(conf, "Test Local Files 1");
Assert.assertEquals("Test Local Files 1",
conf.get(DistributedCache.CACHE_LOCALFILES));
Assert.assertEquals(1,
DistributedCache.getLocalCacheFiles(conf).length);
Assert.assertEquals("Test Local Files 1",
DistributedCache.getLocalCacheFiles(conf)[0].getName());
DistributedCache.addLocalFiles(conf, "Test Local Files 2");
Assert.assertEquals("Test Local Files 1,Test Local Files 2",
conf.get(DistributedCache.CACHE_LOCALFILES));
Assert.assertEquals(2,
DistributedCache.getLocalCacheFiles(conf).length);
Assert.assertEquals("Test Local Files 2",
DistributedCache.getLocalCacheFiles(conf)[1].getName());
DistributedCache.setLocalFiles(conf, "Test Local Files 3");
Assert.assertEquals("Test Local Files 3",
conf.get(DistributedCache.CACHE_LOCALFILES));
Assert.assertEquals(1,
DistributedCache.getLocalCacheFiles(conf).length);
Assert.assertEquals("Test Local Files 3",
DistributedCache.getLocalCacheFiles(conf)[0].getName());
DistributedCache.setArchiveTimestamps(conf, "1234567890");
Assert.assertEquals(1234567890,
conf.getLong(DistributedCache.CACHE_ARCHIVES_TIMESTAMPS, 0));
Assert.assertEquals(1,
DistributedCache.getArchiveTimestamps(conf).length);
Assert.assertEquals(1234567890,
DistributedCache.getArchiveTimestamps(conf)[0]);
DistributedCache.setFileTimestamps(conf, "1234567890");
Assert.assertEquals(1234567890,
conf.getLong(DistributedCache.CACHE_FILES_TIMESTAMPS, 0));
Assert.assertEquals(1,
DistributedCache.getFileTimestamps(conf).length);
Assert.assertEquals(1234567890,
DistributedCache.getFileTimestamps(conf)[0]);
DistributedCache.createAllSymlink(conf, new File("Test Job Cache Dir"),
new File("Test Work Dir"));
Assert.assertNull(conf.get(DistributedCache.CACHE_SYMLINK));
Assert.assertTrue(DistributedCache.getSymlink(conf));
Assert.assertTrue(symlinkFile.createNewFile());
FileStatus fileStatus =
DistributedCache.getFileStatus(conf, symlinkFile.toURI());
Assert.assertNotNull(fileStatus);
Assert.assertEquals(fileStatus.getModificationTime(),
DistributedCache.getTimestamp(conf, symlinkFile.toURI()));
Assert.assertTrue(symlinkFile.delete());
DistributedCache.addCacheArchive(symlinkFile.toURI(), conf);
Assert.assertEquals(symlinkFile.toURI().toString(),
conf.get(DistributedCache.CACHE_ARCHIVES));
Assert.assertEquals(1, DistributedCache.getCacheArchives(conf).length);
Assert.assertEquals(symlinkFile.toURI(),
DistributedCache.getCacheArchives(conf)[0]);
DistributedCache.addCacheFile(symlinkFile.toURI(), conf);
Assert.assertEquals(symlinkFile.toURI().toString(),
conf.get(DistributedCache.CACHE_FILES));
Assert.assertEquals(1, DistributedCache.getCacheFiles(conf).length);
Assert.assertEquals(symlinkFile.toURI(),
DistributedCache.getCacheFiles(conf)[0]);
}
}
| 13,063 | 40.473016 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapred/TestLocalModeWithNewApis.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import static org.junit.Assert.*;
import java.io.BufferedReader;
import java.io.DataOutputStream;
import java.io.File;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.Random;
import java.util.StringTokenizer;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
public class TestLocalModeWithNewApis {
public static final Log LOG =
LogFactory.getLog(TestLocalModeWithNewApis.class);
Configuration conf;
@Before
public void setUp() throws Exception {
conf = new Configuration();
conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.LOCAL_FRAMEWORK_NAME);
}
@After
public void tearDown() throws Exception {
}
@Test
public void testNewApis() throws Exception {
Random r = new Random(System.currentTimeMillis());
Path tmpBaseDir = new Path("/tmp/wc-" + r.nextInt());
final Path inDir = new Path(tmpBaseDir, "input");
final Path outDir = new Path(tmpBaseDir, "output");
String input = "The quick brown fox\nhas many silly\nred fox sox\n";
FileSystem inFs = inDir.getFileSystem(conf);
FileSystem outFs = outDir.getFileSystem(conf);
outFs.delete(outDir, true);
if (!inFs.mkdirs(inDir)) {
throw new IOException("Mkdirs failed to create " + inDir.toString());
}
{
DataOutputStream file = inFs.create(new Path(inDir, "part-0"));
file.writeBytes(input);
file.close();
}
Job job = Job.getInstance(conf, "word count");
job.setJarByClass(TestLocalModeWithNewApis.class);
job.setMapperClass(TokenizerMapper.class);
job.setCombinerClass(IntSumReducer.class);
job.setReducerClass(IntSumReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
FileInputFormat.addInputPath(job, inDir);
FileOutputFormat.setOutputPath(job, outDir);
assertEquals(job.waitForCompletion(true), true);
String output = readOutput(outDir, conf);
assertEquals("The\t1\nbrown\t1\nfox\t2\nhas\t1\nmany\t1\n" +
"quick\t1\nred\t1\nsilly\t1\nsox\t1\n", output);
outFs.delete(tmpBaseDir, true);
}
static String readOutput(Path outDir, Configuration conf)
throws IOException {
FileSystem fs = outDir.getFileSystem(conf);
StringBuffer result = new StringBuffer();
Path[] fileList = FileUtil.stat2Paths(fs.listStatus(outDir,
new Utils.OutputFileUtils.OutputFilesFilter()));
for (Path outputFile : fileList) {
LOG.info("Path" + ": "+ outputFile);
BufferedReader file =
new BufferedReader(new InputStreamReader(fs.open(outputFile)));
String line = file.readLine();
while (line != null) {
result.append(line);
result.append("\n");
line = file.readLine();
}
file.close();
}
return result.toString();
}
public static class TokenizerMapper
extends Mapper<Object, Text, Text, IntWritable>{
private final static IntWritable one = new IntWritable(1);
private Text word = new Text();
public void map(Object key, Text value, Context context
) throws IOException, InterruptedException {
StringTokenizer itr = new StringTokenizer(value.toString());
while (itr.hasMoreTokens()) {
word.set(itr.nextToken());
context.write(word, one);
}
}
}
public static class IntSumReducer
extends Reducer<Text,IntWritable,Text,IntWritable> {
private IntWritable result = new IntWritable();
public void reduce(Text key, Iterable<IntWritable> values,
Context context
) throws IOException, InterruptedException {
int sum = 0;
for (IntWritable val : values) {
sum += val.get();
}
result.set(sum);
context.write(key, result);
}
}
}
| 5,240 | 32.170886 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapred/TestJobClient.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.File;
import java.io.IOException;
import java.util.Collection;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.ClusterStatus.BlackListInfo;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
import org.junit.After;
import org.junit.Assert;
import org.junit.Test;
public class TestJobClient {
final static String TEST_DIR = new File("target",
TestJobClient.class.getSimpleName()).getAbsolutePath();
@After
public void tearDown() {
FileUtil.fullyDelete(new File(TEST_DIR));
}
@Test
public void testGetClusterStatusWithLocalJobRunner() throws Exception {
Configuration conf = new Configuration();
conf.set(JTConfig.JT_IPC_ADDRESS, MRConfig.LOCAL_FRAMEWORK_NAME);
conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.LOCAL_FRAMEWORK_NAME);
JobClient client = new JobClient(conf);
ClusterStatus clusterStatus = client.getClusterStatus(true);
Collection<String> activeTrackerNames = clusterStatus
.getActiveTrackerNames();
Assert.assertEquals(0, activeTrackerNames.size());
int blacklistedTrackers = clusterStatus.getBlacklistedTrackers();
Assert.assertEquals(0, blacklistedTrackers);
Collection<BlackListInfo> blackListedTrackersInfo = clusterStatus
.getBlackListedTrackersInfo();
Assert.assertEquals(0, blackListedTrackersInfo.size());
}
@Test(timeout = 10000)
public void testIsJobDirValid() throws IOException {
Configuration conf = new Configuration();
FileSystem fs = FileSystem.getLocal(conf);
Path testDir = new Path(TEST_DIR);
fs.mkdirs(testDir);
Assert.assertFalse(JobClient.isJobDirValid(testDir, fs));
Path jobconf = new Path(testDir, "job.xml");
Path jobsplit = new Path(testDir, "job.split");
fs.create(jobconf);
fs.create(jobsplit);
Assert.assertTrue(JobClient.isJobDirValid(testDir, fs));
fs.delete(jobconf, true);
fs.delete(jobsplit, true);
}
@Test(timeout = 10000)
public void testGetStagingAreaDir() throws IOException, InterruptedException {
Configuration conf = new Configuration();
JobClient client = new JobClient(conf);
Assert.assertTrue(
"Mismatch in paths",
client.getClusterHandle().getStagingAreaDir().toString()
.equals(client.getStagingAreaDir().toString()));
}
}
| 3,331 | 35.615385 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapred/TestLocalDistributedCacheManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyInt;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.io.ByteArrayInputStream;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.URI;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FilterFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PositionedReadable;
import org.apache.hadoop.fs.Seekable;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.filecache.DistributedCache;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
@SuppressWarnings("deprecation")
public class TestLocalDistributedCacheManager {
private static FileSystem mockfs;
public static class MockFileSystem extends FilterFileSystem {
public MockFileSystem() {
super(mockfs);
}
}
private File localDir;
private static void delete(File file) throws IOException {
if (file.getAbsolutePath().length() < 5) {
throw new IllegalArgumentException(
"Path [" + file + "] is too short, not deleting");
}
if (file.exists()) {
if (file.isDirectory()) {
File[] children = file.listFiles();
if (children != null) {
for (File child : children) {
delete(child);
}
}
}
if (!file.delete()) {
throw new RuntimeException(
"Could not delete path [" + file + "]");
}
}
}
@Before
public void setup() throws Exception {
mockfs = mock(FileSystem.class);
localDir = new File(System.getProperty("test.build.dir", "target/test-dir"),
TestLocalDistributedCacheManager.class.getName());
delete(localDir);
localDir.mkdirs();
}
@After
public void cleanup() throws Exception {
delete(localDir);
}
/**
* Mock input stream based on a byte array so that it can be used by a
* FSDataInputStream.
*/
private static class MockInputStream extends ByteArrayInputStream
implements Seekable, PositionedReadable {
public MockInputStream(byte[] buf) {
super(buf);
}
// empty implementation for unused methods
public int read(long position, byte[] buffer, int offset, int length) { return -1; }
public void readFully(long position, byte[] buffer, int offset, int length) {}
public void readFully(long position, byte[] buffer) {}
public void seek(long position) {}
public long getPos() { return 0; }
public boolean seekToNewSource(long targetPos) { return false; }
}
@Test
public void testDownload() throws Exception {
JobConf conf = new JobConf();
conf.setClass("fs.mock.impl", MockFileSystem.class, FileSystem.class);
URI mockBase = new URI("mock://test-nn1/");
when(mockfs.getUri()).thenReturn(mockBase);
Path working = new Path("mock://test-nn1/user/me/");
when(mockfs.getWorkingDirectory()).thenReturn(working);
when(mockfs.resolvePath(any(Path.class))).thenAnswer(new Answer<Path>() {
@Override
public Path answer(InvocationOnMock args) throws Throwable {
return (Path) args.getArguments()[0];
}
});
final URI file = new URI("mock://test-nn1/user/me/file.txt#link");
final Path filePath = new Path(file);
File link = new File("link");
when(mockfs.getFileStatus(any(Path.class))).thenAnswer(new Answer<FileStatus>() {
@Override
public FileStatus answer(InvocationOnMock args) throws Throwable {
Path p = (Path)args.getArguments()[0];
if("file.txt".equals(p.getName())) {
return new FileStatus(201, false, 1, 500, 101, 101,
FsPermission.getDefault(), "me", "me", filePath);
} else {
throw new FileNotFoundException(p+" not supported by mocking");
}
}
});
when(mockfs.getConf()).thenReturn(conf);
final FSDataInputStream in =
new FSDataInputStream(new MockInputStream("This is a test file\n".getBytes()));
when(mockfs.open(any(Path.class), anyInt())).thenAnswer(new Answer<FSDataInputStream>() {
@Override
public FSDataInputStream answer(InvocationOnMock args) throws Throwable {
Path src = (Path)args.getArguments()[0];
if ("file.txt".equals(src.getName())) {
return in;
} else {
throw new FileNotFoundException(src+" not supported by mocking");
}
}
});
DistributedCache.addCacheFile(file, conf);
conf.set(MRJobConfig.CACHE_FILE_TIMESTAMPS, "101");
conf.set(MRJobConfig.CACHE_FILES_SIZES, "201");
conf.set(MRJobConfig.CACHE_FILE_VISIBILITIES, "false");
conf.set(MRConfig.LOCAL_DIR, localDir.getAbsolutePath());
LocalDistributedCacheManager manager = new LocalDistributedCacheManager();
try {
manager.setup(conf);
assertTrue(link.exists());
} finally {
manager.close();
}
assertFalse(link.exists());
}
@Test
public void testEmptyDownload() throws Exception {
JobConf conf = new JobConf();
conf.setClass("fs.mock.impl", MockFileSystem.class, FileSystem.class);
URI mockBase = new URI("mock://test-nn1/");
when(mockfs.getUri()).thenReturn(mockBase);
Path working = new Path("mock://test-nn1/user/me/");
when(mockfs.getWorkingDirectory()).thenReturn(working);
when(mockfs.resolvePath(any(Path.class))).thenAnswer(new Answer<Path>() {
@Override
public Path answer(InvocationOnMock args) throws Throwable {
return (Path) args.getArguments()[0];
}
});
when(mockfs.getFileStatus(any(Path.class))).thenAnswer(new Answer<FileStatus>() {
@Override
public FileStatus answer(InvocationOnMock args) throws Throwable {
Path p = (Path)args.getArguments()[0];
throw new FileNotFoundException(p+" not supported by mocking");
}
});
when(mockfs.getConf()).thenReturn(conf);
when(mockfs.open(any(Path.class), anyInt())).thenAnswer(new Answer<FSDataInputStream>() {
@Override
public FSDataInputStream answer(InvocationOnMock args) throws Throwable {
Path src = (Path)args.getArguments()[0];
throw new FileNotFoundException(src+" not supported by mocking");
}
});
conf.set(MRJobConfig.CACHE_FILES, "");
conf.set(MRConfig.LOCAL_DIR, localDir.getAbsolutePath());
LocalDistributedCacheManager manager = new LocalDistributedCacheManager();
try {
manager.setup(conf);
} finally {
manager.close();
}
}
@Test
public void testDuplicateDownload() throws Exception {
JobConf conf = new JobConf();
conf.setClass("fs.mock.impl", MockFileSystem.class, FileSystem.class);
URI mockBase = new URI("mock://test-nn1/");
when(mockfs.getUri()).thenReturn(mockBase);
Path working = new Path("mock://test-nn1/user/me/");
when(mockfs.getWorkingDirectory()).thenReturn(working);
when(mockfs.resolvePath(any(Path.class))).thenAnswer(new Answer<Path>() {
@Override
public Path answer(InvocationOnMock args) throws Throwable {
return (Path) args.getArguments()[0];
}
});
final URI file = new URI("mock://test-nn1/user/me/file.txt#link");
final Path filePath = new Path(file);
File link = new File("link");
when(mockfs.getFileStatus(any(Path.class))).thenAnswer(new Answer<FileStatus>() {
@Override
public FileStatus answer(InvocationOnMock args) throws Throwable {
Path p = (Path)args.getArguments()[0];
if("file.txt".equals(p.getName())) {
return new FileStatus(201, false, 1, 500, 101, 101,
FsPermission.getDefault(), "me", "me", filePath);
} else {
throw new FileNotFoundException(p+" not supported by mocking");
}
}
});
when(mockfs.getConf()).thenReturn(conf);
final FSDataInputStream in =
new FSDataInputStream(new MockInputStream("This is a test file\n".getBytes()));
when(mockfs.open(any(Path.class), anyInt())).thenAnswer(new Answer<FSDataInputStream>() {
@Override
public FSDataInputStream answer(InvocationOnMock args) throws Throwable {
Path src = (Path)args.getArguments()[0];
if ("file.txt".equals(src.getName())) {
return in;
} else {
throw new FileNotFoundException(src+" not supported by mocking");
}
}
});
DistributedCache.addCacheFile(file, conf);
DistributedCache.addCacheFile(file, conf);
conf.set(MRJobConfig.CACHE_FILE_TIMESTAMPS, "101,101");
conf.set(MRJobConfig.CACHE_FILES_SIZES, "201,201");
conf.set(MRJobConfig.CACHE_FILE_VISIBILITIES, "false,false");
conf.set(MRConfig.LOCAL_DIR, localDir.getAbsolutePath());
LocalDistributedCacheManager manager = new LocalDistributedCacheManager();
try {
manager.setup(conf);
assertTrue(link.exists());
} finally {
manager.close();
}
assertFalse(link.exists());
}
}
| 10,235 | 34.418685 | 93 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapred.JobPriority;
import org.apache.hadoop.mapred.TaskCompletionEvent;
import org.apache.hadoop.mapreduce.JobStatus.State;
import org.apache.hadoop.mapreduce.v2.api.records.Counter;
import org.apache.hadoop.mapreduce.v2.api.records.CounterGroup;
import org.apache.hadoop.mapreduce.v2.api.records.Counters;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.api.records.Phase;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEventStatus;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.mapreduce.v2.util.MRApps;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
import org.apache.hadoop.yarn.api.records.NodeReport;
import org.apache.hadoop.yarn.api.records.QueueACL;
import org.apache.hadoop.yarn.api.records.QueueState;
import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
public class TypeConverter {
private static RecordFactory recordFactory;
static {
recordFactory = RecordFactoryProvider.getRecordFactory(null);
}
public static org.apache.hadoop.mapred.JobID fromYarn(JobId id) {
String identifier = fromClusterTimeStamp(id.getAppId().getClusterTimestamp());
return new org.apache.hadoop.mapred.JobID(identifier, id.getId());
}
//currently there is 1-1 mapping between appid and jobid
public static org.apache.hadoop.mapreduce.JobID fromYarn(ApplicationId appID) {
String identifier = fromClusterTimeStamp(appID.getClusterTimestamp());
return new org.apache.hadoop.mapred.JobID(identifier, appID.getId());
}
public static JobId toYarn(org.apache.hadoop.mapreduce.JobID id) {
JobId jobId = recordFactory.newRecordInstance(JobId.class);
jobId.setId(id.getId()); //currently there is 1-1 mapping between appid and jobid
ApplicationId appId = ApplicationId.newInstance(
toClusterTimeStamp(id.getJtIdentifier()), id.getId());
jobId.setAppId(appId);
return jobId;
}
private static String fromClusterTimeStamp(long clusterTimeStamp) {
return Long.toString(clusterTimeStamp);
}
private static long toClusterTimeStamp(String identifier) {
return Long.parseLong(identifier);
}
public static org.apache.hadoop.mapreduce.TaskType fromYarn(
TaskType taskType) {
switch (taskType) {
case MAP:
return org.apache.hadoop.mapreduce.TaskType.MAP;
case REDUCE:
return org.apache.hadoop.mapreduce.TaskType.REDUCE;
default:
throw new YarnRuntimeException("Unrecognized task type: " + taskType);
}
}
public static TaskType
toYarn(org.apache.hadoop.mapreduce.TaskType taskType) {
switch (taskType) {
case MAP:
return TaskType.MAP;
case REDUCE:
return TaskType.REDUCE;
default:
throw new YarnRuntimeException("Unrecognized task type: " + taskType);
}
}
public static org.apache.hadoop.mapred.TaskID fromYarn(TaskId id) {
return new org.apache.hadoop.mapred.TaskID(fromYarn(id.getJobId()),
fromYarn(id.getTaskType()), id.getId());
}
public static TaskId toYarn(org.apache.hadoop.mapreduce.TaskID id) {
TaskId taskId = recordFactory.newRecordInstance(TaskId.class);
taskId.setId(id.getId());
taskId.setTaskType(toYarn(id.getTaskType()));
taskId.setJobId(toYarn(id.getJobID()));
return taskId;
}
public static TaskAttemptState toYarn(
org.apache.hadoop.mapred.TaskStatus.State state) {
switch (state) {
case COMMIT_PENDING:
return TaskAttemptState.COMMIT_PENDING;
case FAILED:
case FAILED_UNCLEAN:
return TaskAttemptState.FAILED;
case KILLED:
case KILLED_UNCLEAN:
return TaskAttemptState.KILLED;
case RUNNING:
return TaskAttemptState.RUNNING;
case SUCCEEDED:
return TaskAttemptState.SUCCEEDED;
case UNASSIGNED:
return TaskAttemptState.STARTING;
default:
throw new YarnRuntimeException("Unrecognized State: " + state);
}
}
public static Phase toYarn(org.apache.hadoop.mapred.TaskStatus.Phase phase) {
switch (phase) {
case STARTING:
return Phase.STARTING;
case MAP:
return Phase.MAP;
case SHUFFLE:
return Phase.SHUFFLE;
case SORT:
return Phase.SORT;
case REDUCE:
return Phase.REDUCE;
case CLEANUP:
return Phase.CLEANUP;
}
throw new YarnRuntimeException("Unrecognized Phase: " + phase);
}
public static TaskCompletionEvent[] fromYarn(
TaskAttemptCompletionEvent[] newEvents) {
TaskCompletionEvent[] oldEvents =
new TaskCompletionEvent[newEvents.length];
int i = 0;
for (TaskAttemptCompletionEvent newEvent
: newEvents) {
oldEvents[i++] = fromYarn(newEvent);
}
return oldEvents;
}
public static TaskCompletionEvent fromYarn(
TaskAttemptCompletionEvent newEvent) {
return new TaskCompletionEvent(newEvent.getEventId(),
fromYarn(newEvent.getAttemptId()), newEvent.getAttemptId().getId(),
newEvent.getAttemptId().getTaskId().getTaskType().equals(TaskType.MAP),
fromYarn(newEvent.getStatus()),
newEvent.getMapOutputServerAddress());
}
public static TaskCompletionEvent.Status fromYarn(
TaskAttemptCompletionEventStatus newStatus) {
switch (newStatus) {
case FAILED:
return TaskCompletionEvent.Status.FAILED;
case KILLED:
return TaskCompletionEvent.Status.KILLED;
case OBSOLETE:
return TaskCompletionEvent.Status.OBSOLETE;
case SUCCEEDED:
return TaskCompletionEvent.Status.SUCCEEDED;
case TIPFAILED:
return TaskCompletionEvent.Status.TIPFAILED;
}
throw new YarnRuntimeException("Unrecognized status: " + newStatus);
}
public static org.apache.hadoop.mapred.TaskAttemptID fromYarn(
TaskAttemptId id) {
return new org.apache.hadoop.mapred.TaskAttemptID(fromYarn(id.getTaskId()),
id.getId());
}
public static TaskAttemptId toYarn(
org.apache.hadoop.mapred.TaskAttemptID id) {
TaskAttemptId taskAttemptId = recordFactory.newRecordInstance(TaskAttemptId.class);
taskAttemptId.setTaskId(toYarn(id.getTaskID()));
taskAttemptId.setId(id.getId());
return taskAttemptId;
}
public static TaskAttemptId toYarn(
org.apache.hadoop.mapreduce.TaskAttemptID id) {
TaskAttemptId taskAttemptId = recordFactory.newRecordInstance(TaskAttemptId.class);
taskAttemptId.setTaskId(toYarn(id.getTaskID()));
taskAttemptId.setId(id.getId());
return taskAttemptId;
}
public static org.apache.hadoop.mapreduce.Counters fromYarn(
Counters yCntrs) {
if (yCntrs == null) {
return null;
}
org.apache.hadoop.mapreduce.Counters counters =
new org.apache.hadoop.mapreduce.Counters();
for (CounterGroup yGrp : yCntrs.getAllCounterGroups().values()) {
counters.addGroup(yGrp.getName(), yGrp.getDisplayName());
for (Counter yCntr : yGrp.getAllCounters().values()) {
org.apache.hadoop.mapreduce.Counter c =
counters.findCounter(yGrp.getName(),
yCntr.getName());
// if c can be found, or it will be skipped.
if (c != null) {
c.setValue(yCntr.getValue());
}
}
}
return counters;
}
public static Counters toYarn(org.apache.hadoop.mapred.Counters counters) {
if (counters == null) {
return null;
}
Counters yCntrs = recordFactory.newRecordInstance(Counters.class);
yCntrs.addAllCounterGroups(new HashMap<String, CounterGroup>());
for (org.apache.hadoop.mapred.Counters.Group grp : counters) {
CounterGroup yGrp = recordFactory.newRecordInstance(CounterGroup.class);
yGrp.setName(grp.getName());
yGrp.setDisplayName(grp.getDisplayName());
yGrp.addAllCounters(new HashMap<String, Counter>());
for (org.apache.hadoop.mapred.Counters.Counter cntr : grp) {
Counter yCntr = recordFactory.newRecordInstance(Counter.class);
yCntr.setName(cntr.getName());
yCntr.setDisplayName(cntr.getDisplayName());
yCntr.setValue(cntr.getValue());
yGrp.setCounter(yCntr.getName(), yCntr);
}
yCntrs.setCounterGroup(yGrp.getName(), yGrp);
}
return yCntrs;
}
public static Counters toYarn(org.apache.hadoop.mapreduce.Counters counters) {
if (counters == null) {
return null;
}
Counters yCntrs = recordFactory.newRecordInstance(Counters.class);
yCntrs.addAllCounterGroups(new HashMap<String, CounterGroup>());
for (org.apache.hadoop.mapreduce.CounterGroup grp : counters) {
CounterGroup yGrp = recordFactory.newRecordInstance(CounterGroup.class);
yGrp.setName(grp.getName());
yGrp.setDisplayName(grp.getDisplayName());
yGrp.addAllCounters(new HashMap<String, Counter>());
for (org.apache.hadoop.mapreduce.Counter cntr : grp) {
Counter yCntr = recordFactory.newRecordInstance(Counter.class);
yCntr.setName(cntr.getName());
yCntr.setDisplayName(cntr.getDisplayName());
yCntr.setValue(cntr.getValue());
yGrp.setCounter(yCntr.getName(), yCntr);
}
yCntrs.setCounterGroup(yGrp.getName(), yGrp);
}
return yCntrs;
}
public static JobStatus fromYarn(JobReport jobreport, String trackingUrl) {
JobPriority jobPriority = JobPriority.NORMAL;
JobStatus jobStatus = new org.apache.hadoop.mapred.JobStatus(
fromYarn(jobreport.getJobId()), jobreport.getSetupProgress(), jobreport
.getMapProgress(), jobreport.getReduceProgress(), jobreport
.getCleanupProgress(), fromYarn(jobreport.getJobState()),
jobPriority, jobreport.getUser(), jobreport.getJobName(), jobreport
.getJobFile(), trackingUrl, jobreport.isUber());
jobStatus.setStartTime(jobreport.getStartTime());
jobStatus.setFinishTime(jobreport.getFinishTime());
jobStatus.setFailureInfo(jobreport.getDiagnostics());
return jobStatus;
}
public static org.apache.hadoop.mapreduce.QueueState fromYarn(
QueueState state) {
org.apache.hadoop.mapreduce.QueueState qState =
org.apache.hadoop.mapreduce.QueueState.getState(
StringUtils.toLowerCase(state.toString()));
return qState;
}
public static int fromYarn(JobState state) {
switch (state) {
case NEW:
case INITED:
return org.apache.hadoop.mapred.JobStatus.PREP;
case RUNNING:
return org.apache.hadoop.mapred.JobStatus.RUNNING;
case KILLED:
return org.apache.hadoop.mapred.JobStatus.KILLED;
case SUCCEEDED:
return org.apache.hadoop.mapred.JobStatus.SUCCEEDED;
case FAILED:
case ERROR:
return org.apache.hadoop.mapred.JobStatus.FAILED;
}
throw new YarnRuntimeException("Unrecognized job state: " + state);
}
public static org.apache.hadoop.mapred.TIPStatus fromYarn(
TaskState state) {
switch (state) {
case NEW:
case SCHEDULED:
return org.apache.hadoop.mapred.TIPStatus.PENDING;
case RUNNING:
return org.apache.hadoop.mapred.TIPStatus.RUNNING;
case KILLED:
return org.apache.hadoop.mapred.TIPStatus.KILLED;
case SUCCEEDED:
return org.apache.hadoop.mapred.TIPStatus.COMPLETE;
case FAILED:
return org.apache.hadoop.mapred.TIPStatus.FAILED;
}
throw new YarnRuntimeException("Unrecognized task state: " + state);
}
public static TaskReport fromYarn(org.apache.hadoop.mapreduce.v2.api.records.TaskReport report) {
String[] diagnostics = null;
if (report.getDiagnosticsList() != null) {
diagnostics = new String[report.getDiagnosticsCount()];
int i = 0;
for (String cs : report.getDiagnosticsList()) {
diagnostics[i++] = cs.toString();
}
} else {
diagnostics = new String[0];
}
TaskReport rep = new TaskReport(fromYarn(report.getTaskId()),
report.getProgress(), report.getTaskState().toString(),
diagnostics, fromYarn(report.getTaskState()), report.getStartTime(), report.getFinishTime(),
fromYarn(report.getCounters()));
List<org.apache.hadoop.mapreduce.TaskAttemptID> runningAtts
= new ArrayList<org.apache.hadoop.mapreduce.TaskAttemptID>();
for (org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId id
: report.getRunningAttemptsList()) {
runningAtts.add(fromYarn(id));
}
rep.setRunningTaskAttemptIds(runningAtts);
if (report.getSuccessfulAttempt() != null) {
rep.setSuccessfulAttemptId(fromYarn(report.getSuccessfulAttempt()));
}
return rep;
}
public static List<TaskReport> fromYarn(
List<org.apache.hadoop.mapreduce.v2.api.records.TaskReport> taskReports) {
List<TaskReport> reports = new ArrayList<TaskReport>();
for (org.apache.hadoop.mapreduce.v2.api.records.TaskReport r : taskReports) {
reports.add(fromYarn(r));
}
return reports;
}
public static State fromYarn(YarnApplicationState yarnApplicationState,
FinalApplicationStatus finalApplicationStatus) {
switch (yarnApplicationState) {
case NEW:
case NEW_SAVING:
case SUBMITTED:
case ACCEPTED:
return State.PREP;
case RUNNING:
return State.RUNNING;
case FINISHED:
if (finalApplicationStatus == FinalApplicationStatus.SUCCEEDED) {
return State.SUCCEEDED;
} else if (finalApplicationStatus == FinalApplicationStatus.KILLED) {
return State.KILLED;
}
case FAILED:
return State.FAILED;
case KILLED:
return State.KILLED;
}
throw new YarnRuntimeException("Unrecognized application state: " + yarnApplicationState);
}
private static final String TT_NAME_PREFIX = "tracker_";
public static TaskTrackerInfo fromYarn(NodeReport node) {
TaskTrackerInfo taskTracker =
new TaskTrackerInfo(TT_NAME_PREFIX + node.getNodeId().toString());
return taskTracker;
}
public static TaskTrackerInfo[] fromYarnNodes(List<NodeReport> nodes) {
List<TaskTrackerInfo> taskTrackers = new ArrayList<TaskTrackerInfo>();
for (NodeReport node : nodes) {
taskTrackers.add(fromYarn(node));
}
return taskTrackers.toArray(new TaskTrackerInfo[nodes.size()]);
}
public static JobStatus fromYarn(ApplicationReport application,
String jobFile) {
String trackingUrl = application.getTrackingUrl();
trackingUrl = trackingUrl == null ? "" : trackingUrl;
JobStatus jobStatus =
new JobStatus(
TypeConverter.fromYarn(application.getApplicationId()),
0.0f, 0.0f, 0.0f, 0.0f,
TypeConverter.fromYarn(application.getYarnApplicationState(), application.getFinalApplicationStatus()),
org.apache.hadoop.mapreduce.JobPriority.NORMAL,
application.getUser(), application.getName(),
application.getQueue(), jobFile, trackingUrl, false
);
jobStatus.setSchedulingInfo(trackingUrl); // Set AM tracking url
jobStatus.setStartTime(application.getStartTime());
jobStatus.setFinishTime(application.getFinishTime());
jobStatus.setFailureInfo(application.getDiagnostics());
ApplicationResourceUsageReport resourceUsageReport =
application.getApplicationResourceUsageReport();
if (resourceUsageReport != null) {
jobStatus.setNeededMem(
resourceUsageReport.getNeededResources().getMemory());
jobStatus.setNumReservedSlots(
resourceUsageReport.getNumReservedContainers());
jobStatus.setNumUsedSlots(resourceUsageReport.getNumUsedContainers());
jobStatus.setReservedMem(
resourceUsageReport.getReservedResources().getMemory());
jobStatus.setUsedMem(resourceUsageReport.getUsedResources().getMemory());
}
return jobStatus;
}
public static JobStatus[] fromYarnApps(List<ApplicationReport> applications,
Configuration conf) {
List<JobStatus> jobStatuses = new ArrayList<JobStatus>();
for (ApplicationReport application : applications) {
// each applicationReport has its own jobFile
org.apache.hadoop.mapreduce.JobID jobId =
TypeConverter.fromYarn(application.getApplicationId());
jobStatuses.add(TypeConverter.fromYarn(application,
MRApps.getJobFile(conf, application.getUser(), jobId)));
}
return jobStatuses.toArray(new JobStatus[jobStatuses.size()]);
}
public static QueueInfo fromYarn(org.apache.hadoop.yarn.api.records.QueueInfo
queueInfo, Configuration conf) {
QueueInfo toReturn = new QueueInfo(queueInfo.getQueueName(), "Capacity: " +
queueInfo.getCapacity() * 100 + ", MaximumCapacity: " +
(queueInfo.getMaximumCapacity() < 0 ? "UNDEFINED" :
queueInfo.getMaximumCapacity() * 100) + ", CurrentCapacity: " +
queueInfo.getCurrentCapacity() * 100, fromYarn(queueInfo.getQueueState()),
TypeConverter.fromYarnApps(queueInfo.getApplications(), conf));
List<QueueInfo> childQueues = new ArrayList<QueueInfo>();
for(org.apache.hadoop.yarn.api.records.QueueInfo childQueue :
queueInfo.getChildQueues()) {
childQueues.add(fromYarn(childQueue, conf));
}
toReturn.setQueueChildren(childQueues);
return toReturn;
}
public static QueueInfo[] fromYarnQueueInfo(
List<org.apache.hadoop.yarn.api.records.QueueInfo> queues,
Configuration conf) {
List<QueueInfo> queueInfos = new ArrayList<QueueInfo>(queues.size());
for (org.apache.hadoop.yarn.api.records.QueueInfo queue : queues) {
queueInfos.add(TypeConverter.fromYarn(queue, conf));
}
return queueInfos.toArray(new QueueInfo[queueInfos.size()]);
}
public static QueueAclsInfo[] fromYarnQueueUserAclsInfo(
List<QueueUserACLInfo> userAcls) {
List<QueueAclsInfo> acls = new ArrayList<QueueAclsInfo>();
for (QueueUserACLInfo aclInfo : userAcls) {
List<String> operations = new ArrayList<String>();
for (QueueACL qAcl : aclInfo.getUserAcls()) {
operations.add(qAcl.toString());
}
QueueAclsInfo acl =
new QueueAclsInfo(aclInfo.getQueueName(),
operations.toArray(new String[operations.size()]));
acls.add(acl);
}
return acls.toArray(new QueueAclsInfo[acls.size()]);
}
}
| 20,048 | 36.971591 | 113 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/security/MRDelegationTokenRenewer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.mapreduce.v2.security;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.security.PrivilegedAction;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.mapreduce.v2.api.HSClientProtocol;
import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol;
import org.apache.hadoop.mapreduce.v2.api.MRDelegationTokenIdentifier;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.CancelDelegationTokenRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.RenewDelegationTokenRequest;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenRenewer;
import org.apache.hadoop.yarn.ipc.YarnRPC;
import org.apache.hadoop.yarn.util.Records;
@InterfaceAudience.Private
public class MRDelegationTokenRenewer extends TokenRenewer {
private static final Log LOG = LogFactory
.getLog(MRDelegationTokenRenewer.class);
@Override
public boolean handleKind(Text kind) {
return MRDelegationTokenIdentifier.KIND_NAME.equals(kind);
}
@Override
public long renew(Token<?> token, Configuration conf) throws IOException,
InterruptedException {
org.apache.hadoop.yarn.api.records.Token dToken =
org.apache.hadoop.yarn.api.records.Token.newInstance(
token.getIdentifier(), token.getKind().toString(),
token.getPassword(), token.getService().toString());
MRClientProtocol histProxy = instantiateHistoryProxy(conf,
SecurityUtil.getTokenServiceAddr(token));
try {
RenewDelegationTokenRequest request = Records
.newRecord(RenewDelegationTokenRequest.class);
request.setDelegationToken(dToken);
return histProxy.renewDelegationToken(request).getNextExpirationTime();
} finally {
stopHistoryProxy(histProxy);
}
}
@Override
public void cancel(Token<?> token, Configuration conf) throws IOException,
InterruptedException {
org.apache.hadoop.yarn.api.records.Token dToken =
org.apache.hadoop.yarn.api.records.Token.newInstance(
token.getIdentifier(), token.getKind().toString(),
token.getPassword(), token.getService().toString());
MRClientProtocol histProxy = instantiateHistoryProxy(conf,
SecurityUtil.getTokenServiceAddr(token));
try {
CancelDelegationTokenRequest request = Records
.newRecord(CancelDelegationTokenRequest.class);
request.setDelegationToken(dToken);
histProxy.cancelDelegationToken(request);
} finally {
stopHistoryProxy(histProxy);
}
}
@Override
public boolean isManaged(Token<?> token) throws IOException {
return true;
}
protected void stopHistoryProxy(MRClientProtocol proxy) {
RPC.stopProxy(proxy);
}
protected MRClientProtocol instantiateHistoryProxy(final Configuration conf,
final InetSocketAddress hsAddress) throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Connecting to MRHistoryServer at: " + hsAddress);
}
final YarnRPC rpc = YarnRPC.create(conf);
UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
return currentUser.doAs(new PrivilegedAction<MRClientProtocol>() {
@Override
public MRClientProtocol run() {
return (MRClientProtocol) rpc.getProxy(HSClientProtocol.class,
hsAddress, conf);
}
});
}
}
| 4,498 | 36.181818 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/security/client/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.Private
package org.apache.hadoop.mapreduce.v2.security.client;
import org.apache.hadoop.classification.InterfaceAudience;
| 948 | 44.190476 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/security/client/ClientHSSecurityInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.security.client;
import java.lang.annotation.Annotation;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.v2.api.HSClientProtocolPB;
import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
import org.apache.hadoop.security.KerberosInfo;
import org.apache.hadoop.security.SecurityInfo;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.security.token.TokenInfo;
import org.apache.hadoop.security.token.TokenSelector;
public class ClientHSSecurityInfo extends SecurityInfo {
@Override
public KerberosInfo getKerberosInfo(Class<?> protocol, Configuration conf) {
if (!protocol
.equals(HSClientProtocolPB.class)) {
return null;
}
return new KerberosInfo() {
@Override
public Class<? extends Annotation> annotationType() {
return null;
}
@Override
public String serverPrincipal() {
return JHAdminConfig.MR_HISTORY_PRINCIPAL;
}
@Override
public String clientPrincipal() {
return null;
}
};
}
@Override
public TokenInfo getTokenInfo(Class<?> protocol, Configuration conf) {
if (!protocol
.equals(HSClientProtocolPB.class)) {
return null;
}
return new TokenInfo() {
@Override
public Class<? extends Annotation> annotationType() {
return null;
}
@Override
public Class<? extends TokenSelector<? extends TokenIdentifier>>
value() {
return ClientHSTokenSelector.class;
}
}; }
}
| 2,395 | 28.95 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/security/client/ClientHSTokenSelector.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.security.client;
import java.util.Collection;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.v2.api.MRDelegationTokenIdentifier;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.security.token.TokenSelector;
public class ClientHSTokenSelector implements
TokenSelector<MRDelegationTokenIdentifier> {
private static final Log LOG = LogFactory
.getLog(ClientHSTokenSelector.class);
@SuppressWarnings("unchecked")
public Token<MRDelegationTokenIdentifier> selectToken(Text service,
Collection<Token<? extends TokenIdentifier>> tokens) {
if (service == null) {
return null;
}
LOG.debug("Looking for a token with service " + service.toString());
for (Token<? extends TokenIdentifier> token : tokens) {
if (LOG.isDebugEnabled()) {
LOG.debug("Token kind is " + token.getKind().toString()
+ " and the token's service name is " + token.getService());
}
if (MRDelegationTokenIdentifier.KIND_NAME.equals(token.getKind())
&& service.equals(token.getService())) {
return (Token<MRDelegationTokenIdentifier>) token;
}
}
return null;
}
}
| 2,161 | 36.929825 | 74 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JobIndexInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.jobhistory;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
/**
* Maintains information which may be used by the jobHistory indexing
* system.
*/
public class JobIndexInfo {
private long submitTime;
private long finishTime;
private String user;
private String queueName;
private String jobName;
private JobId jobId;
private int numMaps;
private int numReduces;
private String jobStatus;
private long jobStartTime;
public JobIndexInfo() {
}
public JobIndexInfo(long submitTime, long finishTime, String user,
String jobName, JobId jobId, int numMaps, int numReduces, String jobStatus) {
this(submitTime, finishTime, user, jobName, jobId, numMaps, numReduces,
jobStatus, JobConf.DEFAULT_QUEUE_NAME);
}
public JobIndexInfo(long submitTime, long finishTime, String user,
String jobName, JobId jobId, int numMaps, int numReduces,
String jobStatus, String queueName) {
this.submitTime = submitTime;
this.finishTime = finishTime;
this.user = user;
this.jobName = jobName;
this.jobId = jobId;
this.numMaps = numMaps;
this.numReduces = numReduces;
this.jobStatus = jobStatus;
this.jobStartTime = -1;
this.queueName = queueName;
}
public long getSubmitTime() {
return submitTime;
}
public void setSubmitTime(long submitTime) {
this.submitTime = submitTime;
}
public long getFinishTime() {
return finishTime;
}
public void setFinishTime(long finishTime) {
this.finishTime = finishTime;
}
public String getUser() {
return user;
}
public void setUser(String user) {
this.user = user;
}
public String getQueueName() {
return queueName;
}
public void setQueueName(String queueName) {
this.queueName = queueName;
}
public String getJobName() {
return jobName;
}
public void setJobName(String jobName) {
this.jobName = jobName;
}
public JobId getJobId() {
return jobId;
}
public void setJobId(JobId jobId) {
this.jobId = jobId;
}
public int getNumMaps() {
return numMaps;
}
public void setNumMaps(int numMaps) {
this.numMaps = numMaps;
}
public int getNumReduces() {
return numReduces;
}
public void setNumReduces(int numReduces) {
this.numReduces = numReduces;
}
public String getJobStatus() {
return jobStatus;
}
public void setJobStatus(String jobStatus) {
this.jobStatus = jobStatus;
}
public long getJobStartTime() {
return jobStartTime;
}
public void setJobStartTime(long lTime) {
this.jobStartTime = lTime;
}
@Override
public String toString() {
return "JobIndexInfo [submitTime=" + submitTime + ", finishTime="
+ finishTime + ", user=" + user + ", jobName=" + jobName + ", jobId="
+ jobId + ", numMaps=" + numMaps + ", numReduces=" + numReduces
+ ", jobStatus=" + jobStatus + "]";
}
}
| 3,809 | 27.222222 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.Private
package org.apache.hadoop.mapreduce.v2.jobhistory;
import org.apache.hadoop.classification.InterfaceAudience;
| 943 | 43.952381 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JobHistoryUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.jobhistory;
import java.io.File;
import java.io.IOException;
import java.util.Calendar;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.LinkedList;
import java.util.List;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.UnsupportedFileSystemException;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.util.MRApps;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class JobHistoryUtils {
/**
* Permissions for the history staging dir while JobInProgress.
*/
public static final FsPermission HISTORY_STAGING_DIR_PERMISSIONS =
FsPermission.createImmutable( (short) 0700);
/**
* Permissions for the user directory under the staging directory.
*/
public static final FsPermission HISTORY_STAGING_USER_DIR_PERMISSIONS =
FsPermission.createImmutable((short) 0700);
/**
* Permissions for the history done dir and derivatives.
*/
public static final FsPermission HISTORY_DONE_DIR_PERMISSION =
FsPermission.createImmutable((short) 0770);
public static final FsPermission HISTORY_DONE_FILE_PERMISSION =
FsPermission.createImmutable((short) 0770); // rwx------
/**
* Umask for the done dir and derivatives.
*/
public static final FsPermission HISTORY_DONE_DIR_UMASK = FsPermission
.createImmutable((short) (0770 ^ 0777));
/**
* Permissions for the intermediate done directory.
*/
public static final FsPermission HISTORY_INTERMEDIATE_DONE_DIR_PERMISSIONS =
FsPermission.createImmutable((short) 01777);
/**
* Permissions for the user directory under the intermediate done directory.
*/
public static final FsPermission HISTORY_INTERMEDIATE_USER_DIR_PERMISSIONS =
FsPermission.createImmutable((short) 0770);
public static final FsPermission HISTORY_INTERMEDIATE_FILE_PERMISSIONS =
FsPermission.createImmutable((short) 0770); // rwx------
/**
* Suffix for configuration files.
*/
public static final String CONF_FILE_NAME_SUFFIX = "_conf.xml";
/**
* Suffix for summary files.
*/
public static final String SUMMARY_FILE_NAME_SUFFIX = ".summary";
/**
* Job History File extension.
*/
public static final String JOB_HISTORY_FILE_EXTENSION = ".jhist";
public static final int VERSION = 4;
public static final int SERIAL_NUMBER_DIRECTORY_DIGITS = 6;
public static final String TIMESTAMP_DIR_REGEX = "\\d{4}" + "\\" + Path.SEPARATOR + "\\d{2}" + "\\" + Path.SEPARATOR + "\\d{2}";
public static final Pattern TIMESTAMP_DIR_PATTERN = Pattern.compile(TIMESTAMP_DIR_REGEX);
private static final String TIMESTAMP_DIR_FORMAT = "%04d" + File.separator + "%02d" + File.separator + "%02d";
private static final Log LOG = LogFactory.getLog(JobHistoryUtils.class);
private static final PathFilter CONF_FILTER = new PathFilter() {
@Override
public boolean accept(Path path) {
return path.getName().endsWith(CONF_FILE_NAME_SUFFIX);
}
};
private static final PathFilter JOB_HISTORY_FILE_FILTER = new PathFilter() {
@Override
public boolean accept(Path path) {
return path.getName().endsWith(JOB_HISTORY_FILE_EXTENSION);
}
};
/**
* Checks whether the provided path string is a valid job history file.
* @param pathString the path to be checked.
* @return true is the path is a valid job history filename else return false
*/
public static boolean isValidJobHistoryFileName(String pathString) {
return pathString.endsWith(JOB_HISTORY_FILE_EXTENSION);
}
/**
* Returns the jobId from a job history file name.
* @param pathString the path string.
* @return the JobId
* @throws IOException if the filename format is invalid.
*/
public static JobID getJobIDFromHistoryFilePath(String pathString) throws IOException {
String [] parts = pathString.split(Path.SEPARATOR);
String fileNamePart = parts[parts.length -1];
JobIndexInfo jobIndexInfo = FileNameIndexUtils.getIndexInfo(fileNamePart);
return TypeConverter.fromYarn(jobIndexInfo.getJobId());
}
/**
* Gets a PathFilter which would match configuration files.
* @return the patch filter {@link PathFilter} for matching conf files.
*/
public static PathFilter getConfFileFilter() {
return CONF_FILTER;
}
/**
* Gets a PathFilter which would match job history file names.
* @return the path filter {@link PathFilter} matching job history files.
*/
public static PathFilter getHistoryFileFilter() {
return JOB_HISTORY_FILE_FILTER;
}
/**
* Gets the configured directory prefix for In Progress history files.
* @param conf the configuration for hte job
* @param jobId the id of the job the history file is for.
* @return A string representation of the prefix.
*/
public static String
getConfiguredHistoryStagingDirPrefix(Configuration conf, String jobId)
throws IOException {
String user = UserGroupInformation.getCurrentUser().getShortUserName();
Path stagingPath = MRApps.getStagingAreaDir(conf, user);
Path path = new Path(stagingPath, jobId);
String logDir = path.toString();
return ensurePathInDefaultFileSystem(logDir, conf);
}
/**
* Gets the configured directory prefix for intermediate done history files.
* @param conf
* @return A string representation of the prefix.
*/
public static String getConfiguredHistoryIntermediateDoneDirPrefix(
Configuration conf) {
String doneDirPrefix = conf
.get(JHAdminConfig.MR_HISTORY_INTERMEDIATE_DONE_DIR);
if (doneDirPrefix == null) {
doneDirPrefix = conf.get(MRJobConfig.MR_AM_STAGING_DIR,
MRJobConfig.DEFAULT_MR_AM_STAGING_DIR)
+ "/history/done_intermediate";
}
return ensurePathInDefaultFileSystem(doneDirPrefix, conf);
}
/**
* Gets the configured directory prefix for Done history files.
* @param conf the configuration object
* @return the done history directory
*/
public static String getConfiguredHistoryServerDoneDirPrefix(
Configuration conf) {
String doneDirPrefix = conf.get(JHAdminConfig.MR_HISTORY_DONE_DIR);
if (doneDirPrefix == null) {
doneDirPrefix = conf.get(MRJobConfig.MR_AM_STAGING_DIR,
MRJobConfig.DEFAULT_MR_AM_STAGING_DIR)
+ "/history/done";
}
return ensurePathInDefaultFileSystem(doneDirPrefix, conf);
}
/**
* Get default file system URI for the cluster (used to ensure consistency
* of history done/staging locations) over different context
*
* @return Default file context
*/
private static FileContext getDefaultFileContext() {
// If FS_DEFAULT_NAME_KEY was set solely by core-default.xml then we ignore
// ignore it. This prevents defaulting history paths to file system specified
// by core-default.xml which would not make sense in any case. For a test
// case to exploit this functionality it should create core-site.xml
FileContext fc = null;
Configuration defaultConf = new Configuration();
String[] sources;
sources = defaultConf.getPropertySources(
CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY);
if (sources != null &&
(!Arrays.asList(sources).contains("core-default.xml") ||
sources.length > 1)) {
try {
fc = FileContext.getFileContext(defaultConf);
LOG.info("Default file system [" +
fc.getDefaultFileSystem().getUri() + "]");
} catch (UnsupportedFileSystemException e) {
LOG.error("Unable to create default file context [" +
defaultConf.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY) +
"]",
e);
}
}
else {
LOG.info("Default file system is set solely " +
"by core-default.xml therefore - ignoring");
}
return fc;
}
/**
* Ensure that path belongs to cluster's default file system unless
* 1. it is already fully qualified.
* 2. current job configuration uses default file system
* 3. running from a test case without core-site.xml
*
* @param sourcePath source path
* @param conf the job configuration
* @return full qualified path (if necessary) in default file system
*/
private static String ensurePathInDefaultFileSystem(String sourcePath, Configuration conf) {
Path path = new Path(sourcePath);
FileContext fc = getDefaultFileContext();
if (fc == null ||
fc.getDefaultFileSystem().getUri().toString().equals(
conf.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, "")) ||
path.toUri().getAuthority() != null ||
path.toUri().getScheme()!= null) {
return sourcePath;
}
return fc.makeQualified(path).toString();
}
/**
* Gets the user directory for intermediate done history files.
* @param conf the configuration object
* @return the intermediate done directory for jobhistory files.
*/
public static String getHistoryIntermediateDoneDirForUser(Configuration conf) throws IOException {
return new Path(getConfiguredHistoryIntermediateDoneDirPrefix(conf),
UserGroupInformation.getCurrentUser().getShortUserName()).toString();
}
public static boolean shouldCreateNonUserDirectory(Configuration conf) {
// Returning true by default to allow non secure single node clusters to work
// without any configuration change.
return conf.getBoolean(MRJobConfig.MR_AM_CREATE_JH_INTERMEDIATE_BASE_DIR, true);
}
/**
* Get the job history file path for non Done history files.
*/
public static Path getStagingJobHistoryFile(Path dir, JobId jobId, int attempt) {
return getStagingJobHistoryFile(dir, TypeConverter.fromYarn(jobId).toString(), attempt);
}
/**
* Get the job history file path for non Done history files.
*/
public static Path getStagingJobHistoryFile(Path dir, String jobId, int attempt) {
return new Path(dir, jobId + "_" +
attempt + JOB_HISTORY_FILE_EXTENSION);
}
/**
* Get the done configuration file name for a job.
* @param jobId the jobId.
* @return the conf file name.
*/
public static String getIntermediateConfFileName(JobId jobId) {
return TypeConverter.fromYarn(jobId).toString() + CONF_FILE_NAME_SUFFIX;
}
/**
* Get the done summary file name for a job.
* @param jobId the jobId.
* @return the conf file name.
*/
public static String getIntermediateSummaryFileName(JobId jobId) {
return TypeConverter.fromYarn(jobId).toString() + SUMMARY_FILE_NAME_SUFFIX;
}
/**
* Gets the conf file path for jobs in progress.
*
* @param logDir the log directory prefix.
* @param jobId the jobId.
* @param attempt attempt number for this job.
* @return the conf file path for jobs in progress.
*/
public static Path getStagingConfFile(Path logDir, JobId jobId, int attempt) {
Path jobFilePath = null;
if (logDir != null) {
jobFilePath = new Path(logDir, TypeConverter.fromYarn(jobId).toString()
+ "_" + attempt + CONF_FILE_NAME_SUFFIX);
}
return jobFilePath;
}
/**
* Gets the serial number part of the path based on the jobId and serialNumber format.
* @param id
* @param serialNumberFormat
* @return the serial number part of the patch based on the jobId and serial number format.
*/
public static String serialNumberDirectoryComponent(JobId id, String serialNumberFormat) {
return String.format(serialNumberFormat,
Integer.valueOf(jobSerialNumber(id))).substring(0,
SERIAL_NUMBER_DIRECTORY_DIGITS);
}
/**Extracts the timstamp component from the path.
* @param path
* @return the timestamp component from the path
*/
public static String getTimestampPartFromPath(String path) {
Matcher matcher = TIMESTAMP_DIR_PATTERN.matcher(path);
if (matcher.find()) {
String matched = matcher.group();
String ret = matched.intern();
return ret;
} else {
return null;
}
}
/**
* Gets the history subdirectory based on the jobId, timestamp and serial number format.
* @param id
* @param timestampComponent
* @param serialNumberFormat
* @return the history sub directory based on the jobid, timestamp and serial number format
*/
public static String historyLogSubdirectory(JobId id, String timestampComponent, String serialNumberFormat) {
// String result = LOG_VERSION_STRING;
String result = "";
String serialNumberDirectory = serialNumberDirectoryComponent(id, serialNumberFormat);
result = result
+ timestampComponent
+ File.separator + serialNumberDirectory
+ File.separator;
return result;
}
/**
* Gets the timestamp component based on millisecond time.
* @param millisecondTime
* @return the timestamp component based on millisecond time
*/
public static String timestampDirectoryComponent(long millisecondTime) {
Calendar timestamp = Calendar.getInstance();
timestamp.setTimeInMillis(millisecondTime);
String dateString = null;
dateString = String
.format(TIMESTAMP_DIR_FORMAT,
timestamp.get(Calendar.YEAR),
// months are 0-based in Calendar, but people will expect January to
// be month #1.
timestamp.get(Calendar.MONTH) + 1,
timestamp.get(Calendar.DAY_OF_MONTH));
dateString = dateString.intern();
return dateString;
}
public static String doneSubdirsBeforeSerialTail() {
// date
String result = "/*/*/*"; // YYYY/MM/DD ;
return result;
}
/**
* Computes a serial number used as part of directory naming for the given jobId.
* @param id the jobId.
* @return the serial number used as part of directory naming for the given jobid
*/
public static int jobSerialNumber(JobId id) {
return id.getId();
}
public static List<FileStatus> localGlobber(FileContext fc, Path root, String tail)
throws IOException {
return localGlobber(fc, root, tail, null);
}
public static List<FileStatus> localGlobber(FileContext fc, Path root, String tail,
PathFilter filter) throws IOException {
return localGlobber(fc, root, tail, filter, null);
}
// hasMismatches is just used to return a second value if you want
// one. I would have used MutableBoxedBoolean if such had been provided.
public static List<FileStatus> localGlobber(FileContext fc, Path root, String tail,
PathFilter filter, AtomicBoolean hasFlatFiles) throws IOException {
if (tail.equals("")) {
return (listFilteredStatus(fc, root, filter));
}
if (tail.startsWith("/*")) {
Path[] subdirs = filteredStat2Paths(
remoteIterToList(fc.listStatus(root)), true, hasFlatFiles);
List<List<FileStatus>> subsubdirs = new LinkedList<List<FileStatus>>();
int subsubdirCount = 0;
if (subdirs.length == 0) {
return new LinkedList<FileStatus>();
}
String newTail = tail.substring(2);
for (int i = 0; i < subdirs.length; ++i) {
subsubdirs.add(localGlobber(fc, subdirs[i], newTail, filter, null));
// subsubdirs.set(i, localGlobber(fc, subdirs[i], newTail, filter,
// null));
subsubdirCount += subsubdirs.get(i).size();
}
List<FileStatus> result = new LinkedList<FileStatus>();
for (int i = 0; i < subsubdirs.size(); ++i) {
result.addAll(subsubdirs.get(i));
}
return result;
}
if (tail.startsWith("/")) {
int split = tail.indexOf('/', 1);
if (split < 0) {
return listFilteredStatus(fc, new Path(root, tail.substring(1)), filter);
} else {
String thisSegment = tail.substring(1, split);
String newTail = tail.substring(split);
return localGlobber(fc, new Path(root, thisSegment), newTail, filter,
hasFlatFiles);
}
}
IOException e = new IOException("localGlobber: bad tail");
throw e;
}
private static List<FileStatus> listFilteredStatus(FileContext fc, Path root,
PathFilter filter) throws IOException {
List<FileStatus> fsList = remoteIterToList(fc.listStatus(root));
if (filter == null) {
return fsList;
} else {
List<FileStatus> filteredList = new LinkedList<FileStatus>();
for (FileStatus fs : fsList) {
if (filter.accept(fs.getPath())) {
filteredList.add(fs);
}
}
return filteredList;
}
}
private static List<FileStatus> remoteIterToList(
RemoteIterator<FileStatus> rIter) throws IOException {
List<FileStatus> fsList = new LinkedList<FileStatus>();
if (rIter == null)
return fsList;
while (rIter.hasNext()) {
fsList.add(rIter.next());
}
return fsList;
}
// hasMismatches is just used to return a second value if you want
// one. I would have used MutableBoxedBoolean if such had been provided.
private static Path[] filteredStat2Paths(List<FileStatus> stats, boolean dirs,
AtomicBoolean hasMismatches) {
int resultCount = 0;
if (hasMismatches == null) {
hasMismatches = new AtomicBoolean(false);
}
for (int i = 0; i < stats.size(); ++i) {
if (stats.get(i).isDirectory() == dirs) {
stats.set(resultCount++, stats.get(i));
} else {
hasMismatches.set(true);
}
}
Path[] result = new Path[resultCount];
for (int i = 0; i < resultCount; i++) {
result[i] = stats.get(i).getPath();
}
return result;
}
public static Path getPreviousJobHistoryPath(
Configuration conf, ApplicationAttemptId applicationAttemptId)
throws IOException {
String jobId =
TypeConverter.fromYarn(applicationAttemptId.getApplicationId())
.toString();
String jobhistoryDir =
JobHistoryUtils.getConfiguredHistoryStagingDirPrefix(conf, jobId);
Path histDirPath = FileContext.getFileContext(conf).makeQualified(
new Path(jobhistoryDir));
FileContext fc = FileContext.getFileContext(histDirPath.toUri(), conf);
return fc.makeQualified(JobHistoryUtils.getStagingJobHistoryFile(
histDirPath,jobId, (applicationAttemptId.getAttemptId() - 1)));
}
/**
* Looks for the dirs to clean. The folder structure is YYYY/MM/DD/Serial so
* we can use that to more efficiently find the directories to clean by
* comparing the cutoff timestamp with the timestamp from the folder
* structure.
*
* @param fc done dir FileContext
* @param root folder for completed jobs
* @param cutoff The cutoff for the max history age
* @return The list of directories for cleaning
* @throws IOException
*/
public static List<FileStatus> getHistoryDirsForCleaning(FileContext fc,
Path root, long cutoff) throws IOException {
List<FileStatus> fsList = new ArrayList<FileStatus>();
Calendar cCal = Calendar.getInstance();
cCal.setTimeInMillis(cutoff);
int cYear = cCal.get(Calendar.YEAR);
int cMonth = cCal.get(Calendar.MONTH) + 1;
int cDate = cCal.get(Calendar.DATE);
RemoteIterator<FileStatus> yearDirIt = fc.listStatus(root);
while (yearDirIt.hasNext()) {
FileStatus yearDir = yearDirIt.next();
try {
int year = Integer.parseInt(yearDir.getPath().getName());
if (year <= cYear) {
RemoteIterator<FileStatus> monthDirIt =
fc.listStatus(yearDir.getPath());
while (monthDirIt.hasNext()) {
FileStatus monthDir = monthDirIt.next();
try {
int month = Integer.parseInt(monthDir.getPath().getName());
// If we only checked the month here, then something like 07/2013
// would incorrectly not pass when the cutoff is 06/2014
if (year < cYear || month <= cMonth) {
RemoteIterator<FileStatus> dateDirIt =
fc.listStatus(monthDir.getPath());
while (dateDirIt.hasNext()) {
FileStatus dateDir = dateDirIt.next();
try {
int date = Integer.parseInt(dateDir.getPath().getName());
// If we only checked the date here, then something like
// 07/21/2013 would incorrectly not pass when the cutoff is
// 08/20/2013 or 07/20/2012
if (year < cYear || month < cMonth || date <= cDate) {
fsList.addAll(remoteIterToList(
fc.listStatus(dateDir.getPath())));
}
} catch (NumberFormatException nfe) {
// the directory didn't fit the format we're looking for so
// skip the dir
}
}
}
} catch (NumberFormatException nfe) {
// the directory didn't fit the format we're looking for so skip
// the dir
}
}
}
} catch (NumberFormatException nfe) {
// the directory didn't fit the format we're looking for so skip the dir
}
}
return fsList;
}
}
| 22,873 | 34.796557 | 131 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/FileNameIndexUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.jobhistory;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.net.URLDecoder;
import java.net.URLEncoder;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
public class FileNameIndexUtils {
// Sanitize job history file for predictable parsing
static final String DELIMITER = "-";
static final String DELIMITER_ESCAPE = "%2D";
private static final Log LOG = LogFactory.getLog(FileNameIndexUtils.class);
// Job history file names need to be backwards compatible
// Only append new elements to the end of this list
private static final int JOB_ID_INDEX = 0;
private static final int SUBMIT_TIME_INDEX = 1;
private static final int USER_INDEX = 2;
private static final int JOB_NAME_INDEX = 3;
private static final int FINISH_TIME_INDEX = 4;
private static final int NUM_MAPS_INDEX = 5;
private static final int NUM_REDUCES_INDEX = 6;
private static final int JOB_STATUS_INDEX = 7;
private static final int QUEUE_NAME_INDEX = 8;
private static final int JOB_START_TIME_INDEX = 9;
/**
* Constructs the job history file name from the JobIndexInfo.
*
* @param indexInfo the index info.
* @return the done job history filename.
*/
public static String getDoneFileName(JobIndexInfo indexInfo) throws IOException {
return getDoneFileName(indexInfo,
JHAdminConfig.DEFAULT_MR_HS_JOBNAME_LIMIT);
}
public static String getDoneFileName(JobIndexInfo indexInfo,
int jobNameLimit) throws IOException {
StringBuilder sb = new StringBuilder();
//JobId
sb.append(escapeDelimiters(TypeConverter.fromYarn(indexInfo.getJobId()).toString()));
sb.append(DELIMITER);
//SubmitTime
sb.append(indexInfo.getSubmitTime());
sb.append(DELIMITER);
//UserName
sb.append(escapeDelimiters(getUserName(indexInfo)));
sb.append(DELIMITER);
//JobName
sb.append(escapeDelimiters(trimJobName(
getJobName(indexInfo), jobNameLimit)));
sb.append(DELIMITER);
//FinishTime
sb.append(indexInfo.getFinishTime());
sb.append(DELIMITER);
//NumMaps
sb.append(indexInfo.getNumMaps());
sb.append(DELIMITER);
//NumReduces
sb.append(indexInfo.getNumReduces());
sb.append(DELIMITER);
//JobStatus
sb.append(indexInfo.getJobStatus());
sb.append(DELIMITER);
//QueueName
sb.append(escapeDelimiters(getQueueName(indexInfo)));
sb.append(DELIMITER);
//JobStartTime
sb.append(indexInfo.getJobStartTime());
sb.append(JobHistoryUtils.JOB_HISTORY_FILE_EXTENSION);
return encodeJobHistoryFileName(sb.toString());
}
/**
* Parses the provided job history file name to construct a
* JobIndexInfo object which is returned.
*
* @param jhFileName the job history filename.
* @return a JobIndexInfo object built from the filename.
*/
public static JobIndexInfo getIndexInfo(String jhFileName) throws IOException {
String fileName = jhFileName.substring(0, jhFileName.indexOf(JobHistoryUtils.JOB_HISTORY_FILE_EXTENSION));
JobIndexInfo indexInfo = new JobIndexInfo();
String[] jobDetails = fileName.split(DELIMITER);
JobID oldJobId = JobID.forName(decodeJobHistoryFileName(jobDetails[JOB_ID_INDEX]));
JobId jobId = TypeConverter.toYarn(oldJobId);
indexInfo.setJobId(jobId);
// Do not fail if there are some minor parse errors
try {
try {
indexInfo.setSubmitTime(
Long.parseLong(decodeJobHistoryFileName(jobDetails[SUBMIT_TIME_INDEX])));
} catch (NumberFormatException e) {
LOG.warn("Unable to parse submit time from job history file "
+ jhFileName + " : " + e);
}
indexInfo.setUser(
decodeJobHistoryFileName(jobDetails[USER_INDEX]));
indexInfo.setJobName(
decodeJobHistoryFileName(jobDetails[JOB_NAME_INDEX]));
try {
indexInfo.setFinishTime(
Long.parseLong(decodeJobHistoryFileName(jobDetails[FINISH_TIME_INDEX])));
} catch (NumberFormatException e) {
LOG.warn("Unable to parse finish time from job history file "
+ jhFileName + " : " + e);
}
try {
indexInfo.setNumMaps(
Integer.parseInt(decodeJobHistoryFileName(jobDetails[NUM_MAPS_INDEX])));
} catch (NumberFormatException e) {
LOG.warn("Unable to parse num maps from job history file "
+ jhFileName + " : " + e);
}
try {
indexInfo.setNumReduces(
Integer.parseInt(decodeJobHistoryFileName(jobDetails[NUM_REDUCES_INDEX])));
} catch (NumberFormatException e) {
LOG.warn("Unable to parse num reduces from job history file "
+ jhFileName + " : " + e);
}
indexInfo.setJobStatus(
decodeJobHistoryFileName(jobDetails[JOB_STATUS_INDEX]));
indexInfo.setQueueName(
decodeJobHistoryFileName(jobDetails[QUEUE_NAME_INDEX]));
try{
if (jobDetails.length <= JOB_START_TIME_INDEX) {
indexInfo.setJobStartTime(indexInfo.getSubmitTime());
} else {
indexInfo.setJobStartTime(
Long.parseLong(decodeJobHistoryFileName(jobDetails[JOB_START_TIME_INDEX])));
}
} catch (NumberFormatException e){
LOG.warn("Unable to parse start time from job history file "
+ jhFileName + " : " + e);
}
} catch (IndexOutOfBoundsException e) {
LOG.warn("Parsing job history file with partial data encoded into name: "
+ jhFileName);
}
return indexInfo;
}
/**
* Helper function to encode the URL of the filename of the job-history
* log file.
*
* @param logFileName file name of the job-history file
* @return URL encoded filename
* @throws IOException
*/
public static String encodeJobHistoryFileName(String logFileName)
throws IOException {
String replacementDelimiterEscape = null;
// Temporarily protect the escape delimiters from encoding
if (logFileName.contains(DELIMITER_ESCAPE)) {
replacementDelimiterEscape = nonOccursString(logFileName);
logFileName = logFileName.replaceAll(DELIMITER_ESCAPE, replacementDelimiterEscape);
}
String encodedFileName = null;
try {
encodedFileName = URLEncoder.encode(logFileName, "UTF-8");
} catch (UnsupportedEncodingException uee) {
IOException ioe = new IOException();
ioe.initCause(uee);
ioe.setStackTrace(uee.getStackTrace());
throw ioe;
}
// Restore protected escape delimiters after encoding
if (replacementDelimiterEscape != null) {
encodedFileName = encodedFileName.replaceAll(replacementDelimiterEscape, DELIMITER_ESCAPE);
}
return encodedFileName;
}
/**
* Helper function to decode the URL of the filename of the job-history
* log file.
*
* @param logFileName file name of the job-history file
* @return URL decoded filename
* @throws IOException
*/
public static String decodeJobHistoryFileName(String logFileName)
throws IOException {
String decodedFileName = null;
try {
decodedFileName = URLDecoder.decode(logFileName, "UTF-8");
} catch (UnsupportedEncodingException uee) {
IOException ioe = new IOException();
ioe.initCause(uee);
ioe.setStackTrace(uee.getStackTrace());
throw ioe;
}
return decodedFileName;
}
static String nonOccursString(String logFileName) {
int adHocIndex = 0;
String unfoundString = "q" + adHocIndex;
while (logFileName.contains(unfoundString)) {
unfoundString = "q" + ++adHocIndex;
}
return unfoundString + "q";
}
private static String getUserName(JobIndexInfo indexInfo) {
return getNonEmptyString(indexInfo.getUser());
}
private static String getJobName(JobIndexInfo indexInfo) {
return getNonEmptyString(indexInfo.getJobName());
}
private static String getQueueName(JobIndexInfo indexInfo) {
return getNonEmptyString(indexInfo.getQueueName());
}
//TODO Maybe handle default values for longs and integers here?
private static String getNonEmptyString(String in) {
if (in == null || in.length() == 0) {
in = "NA";
}
return in;
}
private static String escapeDelimiters(String escapee) {
return escapee.replaceAll(DELIMITER, DELIMITER_ESCAPE);
}
/**
* Trims the job-name if required
*/
private static String trimJobName(String jobName, int jobNameLimit) {
if (jobName.length() > jobNameLimit) {
jobName = jobName.substring(0, jobNameLimit);
}
return jobName;
}
}
| 9,643 | 31.039867 | 110 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JHAdminConfig.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.jobhistory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.http.HttpConfig;
/**
* Stores Job History configuration keys that can be set by administrators of
* the Job History server.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class JHAdminConfig {
/** The prefix to all Job History configuration properties.*/
public static final String MR_HISTORY_PREFIX = "mapreduce.jobhistory.";
/** host:port address for History Server API.*/
public static final String MR_HISTORY_ADDRESS = MR_HISTORY_PREFIX + "address";
public static final int DEFAULT_MR_HISTORY_PORT = 10020;
public static final String DEFAULT_MR_HISTORY_ADDRESS = "0.0.0.0:" +
DEFAULT_MR_HISTORY_PORT;
public static final String MR_HISTORY_BIND_HOST = MR_HISTORY_PREFIX
+ "bind-host";
/** The address of the History server admin interface. */
public static final String JHS_ADMIN_ADDRESS = MR_HISTORY_PREFIX
+ "admin.address";
public static final int DEFAULT_JHS_ADMIN_PORT = 10033;
public static final String DEFAULT_JHS_ADMIN_ADDRESS = "0.0.0.0:"
+ DEFAULT_JHS_ADMIN_PORT;
/** ACL of who can be admin of Job history server. */
public static final String JHS_ADMIN_ACL = MR_HISTORY_PREFIX + "admin.acl";
public static final String DEFAULT_JHS_ADMIN_ACL = "*";
/** If history cleaning should be enabled or not.*/
public static final String MR_HISTORY_CLEANER_ENABLE =
MR_HISTORY_PREFIX + "cleaner.enable";
/** Run the History Cleaner every X ms.*/
public static final String MR_HISTORY_CLEANER_INTERVAL_MS =
MR_HISTORY_PREFIX + "cleaner.interval-ms";
public static final long DEFAULT_MR_HISTORY_CLEANER_INTERVAL_MS =
1 * 24 * 60 * 60 * 1000l; //1 day
/** The number of threads to handle client API requests.*/
public static final String MR_HISTORY_CLIENT_THREAD_COUNT =
MR_HISTORY_PREFIX + "client.thread-count";
public static final int DEFAULT_MR_HISTORY_CLIENT_THREAD_COUNT = 10;
/**
* Size of the date string cache. Effects the number of directories
* which will be scanned to find a job.
*/
public static final String MR_HISTORY_DATESTRING_CACHE_SIZE =
MR_HISTORY_PREFIX + "datestring.cache.size";
public static final int DEFAULT_MR_HISTORY_DATESTRING_CACHE_SIZE = 200000;
/** Path where history files should be stored for DONE jobs. **/
public static final String MR_HISTORY_DONE_DIR =
MR_HISTORY_PREFIX + "done-dir";
/**
* Maximum time the History server will wait for the FileSystem for History
* files to become available. Default value is -1, forever.
*/
public static final String MR_HISTORY_MAX_START_WAIT_TIME =
MR_HISTORY_PREFIX + "maximum-start-wait-time-millis";
public static final long DEFAULT_MR_HISTORY_MAX_START_WAIT_TIME = -1;
/**
* Path where history files should be stored after a job finished and before
* they are pulled into the job history server.
**/
public static final String MR_HISTORY_INTERMEDIATE_DONE_DIR =
MR_HISTORY_PREFIX + "intermediate-done-dir";
/** Size of the job list cache.*/
public static final String MR_HISTORY_JOBLIST_CACHE_SIZE =
MR_HISTORY_PREFIX + "joblist.cache.size";
public static final int DEFAULT_MR_HISTORY_JOBLIST_CACHE_SIZE = 20000;
/** The location of the Kerberos keytab file.*/
public static final String MR_HISTORY_KEYTAB = MR_HISTORY_PREFIX + "keytab";
/** Size of the loaded job cache.*/
public static final String MR_HISTORY_LOADED_JOB_CACHE_SIZE =
MR_HISTORY_PREFIX + "loadedjobs.cache.size";
public static final int DEFAULT_MR_HISTORY_LOADED_JOB_CACHE_SIZE = 5;
/**
* The maximum age of a job history file before it is deleted from the history
* server.
*/
public static final String MR_HISTORY_MAX_AGE_MS =
MR_HISTORY_PREFIX + "max-age-ms";
public static final long DEFAULT_MR_HISTORY_MAX_AGE =
7 * 24 * 60 * 60 * 1000L; //1 week
/**
* Scan for history files to more from intermediate done dir to done dir
* every X ms.
*/
public static final String MR_HISTORY_MOVE_INTERVAL_MS =
MR_HISTORY_PREFIX + "move.interval-ms";
public static final long DEFAULT_MR_HISTORY_MOVE_INTERVAL_MS =
3 * 60 * 1000l; //3 minutes
/** The number of threads used to move files.*/
public static final String MR_HISTORY_MOVE_THREAD_COUNT =
MR_HISTORY_PREFIX + "move.thread-count";
public static final int DEFAULT_MR_HISTORY_MOVE_THREAD_COUNT = 3;
/** The Kerberos principal for the history server.*/
public static final String MR_HISTORY_PRINCIPAL =
MR_HISTORY_PREFIX + "principal";
/** To enable https in MR history server */
public static final String MR_HS_HTTP_POLICY = MR_HISTORY_PREFIX
+ "http.policy";
public static String DEFAULT_MR_HS_HTTP_POLICY =
HttpConfig.Policy.HTTP_ONLY.name();
/**The address the history server webapp is on.*/
public static final String MR_HISTORY_WEBAPP_ADDRESS =
MR_HISTORY_PREFIX + "webapp.address";
public static final int DEFAULT_MR_HISTORY_WEBAPP_PORT = 19888;
public static final String DEFAULT_MR_HISTORY_WEBAPP_ADDRESS =
"0.0.0.0:" + DEFAULT_MR_HISTORY_WEBAPP_PORT;
/**The https address the history server webapp is on.*/
public static final String MR_HISTORY_WEBAPP_HTTPS_ADDRESS =
MR_HISTORY_PREFIX + "webapp.https.address";
public static final int DEFAULT_MR_HISTORY_WEBAPP_HTTPS_PORT = 19890;
public static final String DEFAULT_MR_HISTORY_WEBAPP_HTTPS_ADDRESS =
"0.0.0.0:" + DEFAULT_MR_HISTORY_WEBAPP_HTTPS_PORT;
/**The kerberos principal to be used for spnego filter for history server*/
public static final String MR_WEBAPP_SPNEGO_USER_NAME_KEY =
MR_HISTORY_PREFIX + "webapp.spnego-principal";
/** The kerberos keytab to be used for spnego filter for history server*/
public static final String MR_WEBAPP_SPNEGO_KEYTAB_FILE_KEY =
MR_HISTORY_PREFIX + "webapp.spnego-keytab-file";
/*
* HS Service Authorization
*/
public static final String MR_HS_SECURITY_SERVICE_AUTHORIZATION =
"security.mrhs.client.protocol.acl";
public static final String MR_HS_SECURITY_SERVICE_AUTHORIZATION_ADMIN_REFRESH =
"security.mrhs.admin.refresh.protocol.acl";
/**
* The HistoryStorage class to use to cache history data.
*/
public static final String MR_HISTORY_STORAGE =
MR_HISTORY_PREFIX + "store.class";
/**
* Enable the history server to store server state and recover server state
* upon startup.
*/
public static final String MR_HS_RECOVERY_ENABLE =
MR_HISTORY_PREFIX + "recovery.enable";
public static final boolean DEFAULT_MR_HS_RECOVERY_ENABLE = false;
/**
* The HistoryServerStateStoreService class to store and recover server state
*/
public static final String MR_HS_STATE_STORE =
MR_HISTORY_PREFIX + "recovery.store.class";
/**
* The URI where server state will be stored when
* HistoryServerFileSystemStateStoreService is configured as the state store
*/
public static final String MR_HS_FS_STATE_STORE_URI =
MR_HISTORY_PREFIX + "recovery.store.fs.uri";
/**
* The local path where server state will be stored when
* HistoryServerLeveldbStateStoreService is configured as the state store
*/
public static final String MR_HS_LEVELDB_STATE_STORE_PATH =
MR_HISTORY_PREFIX + "recovery.store.leveldb.path";
/** Whether to use fixed ports with the minicluster. */
public static final String MR_HISTORY_MINICLUSTER_FIXED_PORTS = MR_HISTORY_PREFIX
+ "minicluster.fixed.ports";
/**
* Default is false to be able to run tests concurrently without port
* conflicts.
*/
public static boolean DEFAULT_MR_HISTORY_MINICLUSTER_FIXED_PORTS = false;
/**
* Number of characters allowed for job name in Job History Server web page.
*/
public static final String MR_HS_JOBNAME_LIMIT = MR_HISTORY_PREFIX
+ "jobname.limit";
public static final int DEFAULT_MR_HS_JOBNAME_LIMIT = 50;
/**
* Settings for .jhist file format.
*/
public static final String MR_HS_JHIST_FORMAT =
MR_HISTORY_PREFIX + "jhist.format";
public static final String DEFAULT_MR_HS_JHIST_FORMAT =
"json";
}
| 9,190 | 38.616379 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.Private
package org.apache.hadoop.mapreduce.v2.api;
import org.apache.hadoop.classification.InterfaceAudience;
| 936 | 43.619048 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/HSClientProtocol.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api;
public interface HSClientProtocol extends MRClientProtocol {
}
| 917 | 37.25 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/HSAdminRefreshProtocol.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.security.KerberosInfo;
@KerberosInfo(serverPrincipal = CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY)
@Private
@InterfaceStability.Evolving
public interface HSAdminRefreshProtocol {
/**
* Refresh admin acls.
*
* @throws IOException
*/
public void refreshAdminAcls() throws IOException;
/**
* Refresh loaded job cache
* @throws IOException
*/
public void refreshLoadedJobCache() throws IOException;
/**
* Refresh job retention settings.
*
* @throws IOException
*/
public void refreshJobRetentionSettings() throws IOException;
/**
* Refresh log retention settings.
*
* @throws IOException
*/
public void refreshLogRetentionSettings() throws IOException;
}
| 1,830 | 29.516667 | 94 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/MRClientProtocol.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api;
import java.io.IOException;
import java.net.InetSocketAddress;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.CancelDelegationTokenRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.CancelDelegationTokenResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptCompletionEventsRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptCompletionEventsResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptReportRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptReportResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportsRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportsResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillJobRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillJobResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskAttemptRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskAttemptResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.RenewDelegationTokenRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.RenewDelegationTokenResponse;
public interface MRClientProtocol {
/**
* Address to which the client is connected
* @return InetSocketAddress
*/
public InetSocketAddress getConnectAddress();
public GetJobReportResponse getJobReport(GetJobReportRequest request) throws IOException;
public GetTaskReportResponse getTaskReport(GetTaskReportRequest request) throws IOException;
public GetTaskAttemptReportResponse getTaskAttemptReport(GetTaskAttemptReportRequest request) throws IOException;
public GetCountersResponse getCounters(GetCountersRequest request) throws IOException;
public GetTaskAttemptCompletionEventsResponse getTaskAttemptCompletionEvents(GetTaskAttemptCompletionEventsRequest request) throws IOException;
public GetTaskReportsResponse getTaskReports(GetTaskReportsRequest request) throws IOException;
public GetDiagnosticsResponse getDiagnostics(GetDiagnosticsRequest request) throws IOException;
public KillJobResponse killJob(KillJobRequest request) throws IOException;
public KillTaskResponse killTask(KillTaskRequest request) throws IOException;
public KillTaskAttemptResponse killTaskAttempt(KillTaskAttemptRequest request) throws IOException;
public FailTaskAttemptResponse failTaskAttempt(FailTaskAttemptRequest request) throws IOException;
public GetDelegationTokenResponse getDelegationToken(GetDelegationTokenRequest request) throws IOException;
/**
* Renew an existing delegation token.
*
* @param request the delegation token to be renewed.
* @return the new expiry time for the delegation token.
* @throws IOException
*/
public RenewDelegationTokenResponse renewDelegationToken(
RenewDelegationTokenRequest request) throws IOException;
/**
* Cancel an existing delegation token.
*
* @param request the delegation token to be cancelled.
* @return an empty response.
* @throws IOException
*/
public CancelDelegationTokenResponse cancelDelegationToken(
CancelDelegationTokenRequest request) throws IOException;
}
| 5,249 | 56.065217 | 145 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/HSAdminProtocol.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.security.KerberosInfo;
import org.apache.hadoop.security.RefreshUserMappingsProtocol;
import org.apache.hadoop.tools.GetUserMappingsProtocol;
@KerberosInfo(serverPrincipal = CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY)
@Private
@InterfaceStability.Evolving
public interface HSAdminProtocol extends GetUserMappingsProtocol,
RefreshUserMappingsProtocol, HSAdminRefreshProtocol {
}
| 1,461 | 40.771429 | 94 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/MRClientProtocolPB.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api;
import org.apache.hadoop.ipc.ProtocolInfo;
import org.apache.hadoop.yarn.proto.MRClientProtocol.MRClientProtocolService;
@ProtocolInfo(
protocolName = "org.apache.hadoop.mapreduce.v2.api.MRClientProtocolPB",
protocolVersion = 1)
public interface MRClientProtocolPB extends MRClientProtocolService.BlockingInterface {
}
| 1,183 | 39.827586 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/HSClientProtocolPB.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api;
import org.apache.hadoop.ipc.ProtocolInfo;
import org.apache.hadoop.yarn.proto.HSClientProtocol.HSClientProtocolService;
@ProtocolInfo(protocolName = "org.apache.hadoop.mapreduce.v2.api.HSClientProtocolPB",
protocolVersion = 1)
public interface HSClientProtocolPB extends HSClientProtocolService.BlockingInterface {
}
| 1,178 | 41.107143 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/MRDelegationTokenIdentifier.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
/**
* {@link TokenIdentifier} that identifies delegation tokens
* issued by JobHistoryServer to delegate
* MR tasks talking to the JobHistoryServer.
*/
@Private
// TODO Move to a different package.
public class MRDelegationTokenIdentifier extends AbstractDelegationTokenIdentifier {
public static final Text KIND_NAME = new Text("MR_DELEGATION_TOKEN");
public MRDelegationTokenIdentifier() {
}
/**
* Create a new delegation token identifier
* @param owner the effective username of the token owner
* @param renewer the username of the renewer
* @param realUser the real username of the token owner
*/
public MRDelegationTokenIdentifier(Text owner, Text renewer, Text realUser) {
super(owner, renewer, realUser);
}
@Override
public Text getKind() {
return KIND_NAME;
}
@InterfaceAudience.Private
public static class Renewer extends Token.TrivialRenewer {
@Override
protected Text getKind() {
return KIND_NAME;
}
}
}
| 2,201 | 31.382353 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/HSAdminRefreshProtocolPB.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.ipc.ProtocolInfo;
import org.apache.hadoop.mapreduce.v2.hs.proto.HSAdminRefreshProtocolProtos.HSAdminRefreshProtocolService;
import org.apache.hadoop.security.KerberosInfo;
@KerberosInfo(serverPrincipal = CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY)
@ProtocolInfo(protocolName = "org.apache.hadoop.mapreduce.v2.api.HSAdminRefreshProtocol", protocolVersion = 1)
@Private
@InterfaceStability.Evolving
public interface HSAdminRefreshProtocolPB extends
HSAdminRefreshProtocolService.BlockingInterface {
}
| 1,580 | 46.909091 | 110 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetJobReportResponse.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.protocolrecords;
import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
public interface GetJobReportResponse {
public abstract JobReport getJobReport();
public abstract void setJobReport(JobReport jobReport);
}
| 1,078 | 37.535714 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.protocolrecords;
| 866 | 44.631579 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetDiagnosticsResponse.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.protocolrecords;
import java.util.List;
public interface GetDiagnosticsResponse {
public abstract List<String> getDiagnosticsList();
public abstract String getDiagnostics(int index);
public abstract int getDiagnosticsCount();
public abstract void addAllDiagnostics(List<String> diagnostics);
public abstract void addDiagnostics(String diagnostic);
public abstract void removeDiagnostics(int index);
public abstract void clearDiagnostics();
}
| 1,313 | 37.647059 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetTaskReportsRequest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.protocolrecords;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
public interface GetTaskReportsRequest {
public abstract JobId getJobId();
public abstract TaskType getTaskType();
public abstract void setJobId(JobId jobId);
public abstract void setTaskType(TaskType taskType);
}
| 1,215 | 37 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetDelegationTokenResponse.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.protocolrecords;
import org.apache.hadoop.yarn.api.records.Token;
public interface GetDelegationTokenResponse {
void setDelegationToken(Token clientDToken);
Token getDelegationToken();
}
| 1,043 | 39.153846 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetTaskAttemptCompletionEventsResponse.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.protocolrecords;
import java.util.List;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent;
public interface GetTaskAttemptCompletionEventsResponse {
public abstract List<TaskAttemptCompletionEvent> getCompletionEventList();
public abstract TaskAttemptCompletionEvent getCompletionEvent(int index);
public abstract int getCompletionEventCount();
public abstract void addAllCompletionEvents(List<TaskAttemptCompletionEvent> eventList);
public abstract void addCompletionEvent(TaskAttemptCompletionEvent event);
public abstract void removeCompletionEvent(int index);
public abstract void clearCompletionEvents();
}
| 1,512 | 42.228571 | 90 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetDelegationTokenRequest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.protocolrecords;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
@Public
@Evolving
public interface GetDelegationTokenRequest {
String getRenewer();
void setRenewer(String renewer);
}
| 1,127 | 37.896552 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetDiagnosticsRequest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.protocolrecords;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
public interface GetDiagnosticsRequest {
public abstract TaskAttemptId getTaskAttemptId();
public abstract void setTaskAttemptId(TaskAttemptId taskAttemptId);
}
| 1,103 | 38.428571 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/RenewDelegationTokenResponse.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.protocolrecords;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
/**
* The response to a renewDelegationToken call to the {@code ResourceManager}.
*/
@Public
@Evolving
public interface RenewDelegationTokenResponse {
long getNextExpirationTime();
void setNextExpirationTime(long expTime);
}
| 1,236 | 36.484848 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/KillTaskResponse.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.protocolrecords;
public interface KillTaskResponse {
}
| 908 | 36.875 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/CancelDelegationTokenRequest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.protocolrecords;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.yarn.api.records.Token;
/**
* The request issued by the client to the {@code ResourceManager} to cancel a
* delegation token.
*/
@Public
@Evolving
public interface CancelDelegationTokenRequest {
Token getDelegationToken();
void setDelegationToken(Token dToken);
}
| 1,301 | 36.2 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/CancelDelegationTokenResponse.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.protocolrecords;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
/**
* The response from the {@code ResourceManager} to a cancelDelegationToken
* request.
*/
@Public
@Evolving
public interface CancelDelegationTokenResponse {
}
| 1,170 | 35.59375 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/KillTaskAttemptRequest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.protocolrecords;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
public interface KillTaskAttemptRequest {
public abstract TaskAttemptId getTaskAttemptId();
public abstract void setTaskAttemptId(TaskAttemptId taskAttemptId);
}
| 1,104 | 38.464286 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/FailTaskAttemptRequest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.protocolrecords;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
public interface FailTaskAttemptRequest {
public abstract TaskAttemptId getTaskAttemptId();
public abstract void setTaskAttemptId(TaskAttemptId taskAttemptId);
}
| 1,104 | 38.464286 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetTaskReportResponse.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.protocolrecords;
import org.apache.hadoop.mapreduce.v2.api.records.TaskReport;
public interface GetTaskReportResponse {
public abstract TaskReport getTaskReport();
public abstract void setTaskReport(TaskReport taskReport);
}
| 1,085 | 37.785714 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/FailTaskAttemptResponse.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.protocolrecords;
public interface FailTaskAttemptResponse {
}
| 915 | 37.166667 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetTaskAttemptReportResponse.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.protocolrecords;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptReport;
public interface GetTaskAttemptReportResponse {
public abstract TaskAttemptReport getTaskAttemptReport();
public abstract void setTaskAttemptReport(TaskAttemptReport taskAttemptReport);
}
| 1,134 | 39.535714 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetCountersRequest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.protocolrecords;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
public interface GetCountersRequest {
public abstract JobId getJobId();
public abstract void setJobId(JobId jobId);
}
| 1,053 | 35.344828 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/KillTaskRequest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.protocolrecords;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
public interface KillTaskRequest {
public abstract TaskId getTaskId();
public abstract void setTaskId(TaskId taskId);
}
| 1,055 | 36.714286 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetCountersResponse.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.protocolrecords;
import org.apache.hadoop.mapreduce.v2.api.records.Counters;
public interface GetCountersResponse {
public abstract Counters getCounters();
public abstract void setCounters(Counters counters);
}
| 1,071 | 37.285714 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetTaskAttemptReportRequest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.protocolrecords;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
public interface GetTaskAttemptReportRequest {
public abstract TaskAttemptId getTaskAttemptId();
public abstract void setTaskAttemptId(TaskAttemptId taskAttemptId);
}
| 1,109 | 38.642857 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetJobReportRequest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.protocolrecords;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
public interface GetJobReportRequest {
public abstract JobId getJobId();
public abstract void setJobId(JobId jobId);
}
| 1,053 | 36.642857 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/KillJobRequest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.protocolrecords;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
public interface KillJobRequest {
public abstract JobId getJobId();
public abstract void setJobId(JobId jobId);
}
| 1,048 | 36.464286 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetTaskReportRequest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.protocolrecords;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
public interface GetTaskReportRequest {
public abstract TaskId getTaskId();
public abstract void setTaskId(TaskId taskId);
}
| 1,061 | 36.928571 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetTaskAttemptCompletionEventsRequest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.protocolrecords;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
public interface GetTaskAttemptCompletionEventsRequest {
public abstract JobId getJobId();
public abstract int getFromEventId();
public abstract int getMaxEvents();
public abstract void setJobId(JobId jobId);
public abstract void setFromEventId(int id);
public abstract void setMaxEvents(int maxEvents);
}
| 1,248 | 38.03125 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/KillTaskAttemptResponse.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.protocolrecords;
public interface KillTaskAttemptResponse {
}
| 915 | 37.166667 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetTaskReportsResponse.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.protocolrecords;
import java.util.List;
import org.apache.hadoop.mapreduce.v2.api.records.TaskReport;
public interface GetTaskReportsResponse {
public abstract List<TaskReport> getTaskReportList();
public abstract TaskReport getTaskReport(int index);
public abstract int getTaskReportCount();
public abstract void addAllTaskReports(List<TaskReport> taskReports);
public abstract void addTaskReport(TaskReport taskReport);
public abstract void removeTaskReport(int index);
public abstract void clearTaskReports();
}
| 1,386 | 38.628571 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/RenewDelegationTokenRequest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.protocolrecords;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Evolving;
import org.apache.hadoop.yarn.api.records.Token;
/**
* The request issued by the client to renew a delegation token from
* the {@code ResourceManager}.
*/
@Public
@Evolving
public interface RenewDelegationTokenRequest {
Token getDelegationToken();
void setDelegationToken(Token dToken);
}
| 1,301 | 36.2 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/KillJobResponse.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.protocolrecords;
public interface KillJobResponse {
}
| 907 | 36.833333 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetCountersRequestPBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersRequest;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.impl.pb.JobIdPBImpl;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.JobIdProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetCountersRequestProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetCountersRequestProtoOrBuilder;
import org.apache.hadoop.yarn.api.records.impl.pb.ProtoBase;
public class GetCountersRequestPBImpl extends ProtoBase<GetCountersRequestProto> implements GetCountersRequest {
GetCountersRequestProto proto = GetCountersRequestProto.getDefaultInstance();
GetCountersRequestProto.Builder builder = null;
boolean viaProto = false;
private JobId jobId = null;
public GetCountersRequestPBImpl() {
builder = GetCountersRequestProto.newBuilder();
}
public GetCountersRequestPBImpl(GetCountersRequestProto proto) {
this.proto = proto;
viaProto = true;
}
public GetCountersRequestProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private void mergeLocalToBuilder() {
if (this.jobId != null) {
builder.setJobId(convertToProtoFormat(this.jobId));
}
}
private void mergeLocalToProto() {
if (viaProto)
maybeInitBuilder();
mergeLocalToBuilder();
proto = builder.build();
viaProto = true;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = GetCountersRequestProto.newBuilder(proto);
}
viaProto = false;
}
@Override
public JobId getJobId() {
GetCountersRequestProtoOrBuilder p = viaProto ? proto : builder;
if (this.jobId != null) {
return this.jobId;
}
if (!p.hasJobId()) {
return null;
}
this.jobId = convertFromProtoFormat(p.getJobId());
return this.jobId;
}
@Override
public void setJobId(JobId jobId) {
maybeInitBuilder();
if (jobId == null)
builder.clearJobId();
this.jobId = jobId;
}
private JobIdPBImpl convertFromProtoFormat(JobIdProto p) {
return new JobIdPBImpl(p);
}
private JobIdProto convertToProtoFormat(JobId t) {
return ((JobIdPBImpl)t).getProto();
}
}
| 3,220 | 28.281818 | 112 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetDiagnosticsRequestPBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsRequest;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.impl.pb.TaskAttemptIdPBImpl;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskAttemptIdProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDiagnosticsRequestProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDiagnosticsRequestProtoOrBuilder;
import org.apache.hadoop.yarn.api.records.impl.pb.ProtoBase;
public class GetDiagnosticsRequestPBImpl extends ProtoBase<GetDiagnosticsRequestProto> implements GetDiagnosticsRequest {
GetDiagnosticsRequestProto proto = GetDiagnosticsRequestProto.getDefaultInstance();
GetDiagnosticsRequestProto.Builder builder = null;
boolean viaProto = false;
private TaskAttemptId taskAttemptId = null;
public GetDiagnosticsRequestPBImpl() {
builder = GetDiagnosticsRequestProto.newBuilder();
}
public GetDiagnosticsRequestPBImpl(GetDiagnosticsRequestProto proto) {
this.proto = proto;
viaProto = true;
}
public GetDiagnosticsRequestProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private void mergeLocalToBuilder() {
if (this.taskAttemptId != null) {
builder.setTaskAttemptId(convertToProtoFormat(this.taskAttemptId));
}
}
private void mergeLocalToProto() {
if (viaProto)
maybeInitBuilder();
mergeLocalToBuilder();
proto = builder.build();
viaProto = true;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = GetDiagnosticsRequestProto.newBuilder(proto);
}
viaProto = false;
}
@Override
public TaskAttemptId getTaskAttemptId() {
GetDiagnosticsRequestProtoOrBuilder p = viaProto ? proto : builder;
if (this.taskAttemptId != null) {
return this.taskAttemptId;
}
if (!p.hasTaskAttemptId()) {
return null;
}
this.taskAttemptId = convertFromProtoFormat(p.getTaskAttemptId());
return this.taskAttemptId;
}
@Override
public void setTaskAttemptId(TaskAttemptId taskAttemptId) {
maybeInitBuilder();
if (taskAttemptId == null)
builder.clearTaskAttemptId();
this.taskAttemptId = taskAttemptId;
}
private TaskAttemptIdPBImpl convertFromProtoFormat(TaskAttemptIdProto p) {
return new TaskAttemptIdPBImpl(p);
}
private TaskAttemptIdProto convertToProtoFormat(TaskAttemptId t) {
return ((TaskAttemptIdPBImpl)t).getProto();
}
}
| 3,500 | 30.827273 | 121 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/RenewDelegationTokenResponsePBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.RenewDelegationTokenResponse;
import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenResponseProto;
import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenResponseProtoOrBuilder;
import org.apache.hadoop.yarn.api.records.impl.pb.ProtoBase;
public class RenewDelegationTokenResponsePBImpl extends
ProtoBase<RenewDelegationTokenResponseProto> implements
RenewDelegationTokenResponse {
RenewDelegationTokenResponseProto proto =
RenewDelegationTokenResponseProto.getDefaultInstance();
RenewDelegationTokenResponseProto.Builder builder = null;
boolean viaProto = false;
public RenewDelegationTokenResponsePBImpl() {
this.builder = RenewDelegationTokenResponseProto.newBuilder();
}
public RenewDelegationTokenResponsePBImpl (
RenewDelegationTokenResponseProto proto) {
this.proto = proto;
this.viaProto = true;
}
@Override
public RenewDelegationTokenResponseProto getProto() {
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = RenewDelegationTokenResponseProto.newBuilder(proto);
}
viaProto = false;
}
@Override
public long getNextExpirationTime() {
RenewDelegationTokenResponseProtoOrBuilder p = viaProto ? proto : builder;
return p.getNewExpiryTime();
}
@Override
public void setNextExpirationTime(long expTime) {
maybeInitBuilder();
builder.setNewExpiryTime(expTime);
}
}
| 2,471 | 33.816901 | 98 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetDelegationTokenRequestPBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenRequest;
import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto;
import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProtoOrBuilder;
import org.apache.hadoop.yarn.api.records.impl.pb.ProtoBase;
public class GetDelegationTokenRequestPBImpl extends
ProtoBase<GetDelegationTokenRequestProto> implements GetDelegationTokenRequest {
String renewer;
GetDelegationTokenRequestProto proto =
GetDelegationTokenRequestProto.getDefaultInstance();
GetDelegationTokenRequestProto.Builder builder = null;
boolean viaProto = false;
public GetDelegationTokenRequestPBImpl() {
builder = GetDelegationTokenRequestProto.newBuilder();
}
public GetDelegationTokenRequestPBImpl (
GetDelegationTokenRequestProto proto) {
this.proto = proto;
viaProto = true;
}
@Override
public String getRenewer(){
GetDelegationTokenRequestProtoOrBuilder p = viaProto ? proto : builder;
if (this.renewer != null) {
return this.renewer;
}
this.renewer = p.getRenewer();
return this.renewer;
}
@Override
public void setRenewer(String renewer) {
maybeInitBuilder();
if (renewer == null)
builder.clearRenewer();
this.renewer = renewer;
}
@Override
public GetDelegationTokenRequestProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private void mergeLocalToBuilder() {
if (renewer != null) {
builder.setRenewer(this.renewer);
}
}
private void mergeLocalToProto() {
if (viaProto)
maybeInitBuilder();
mergeLocalToBuilder();
proto = builder.build();
viaProto = true;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = GetDelegationTokenRequestProto.newBuilder(proto);
}
viaProto = false;
}
}
| 2,908 | 29.621053 | 95 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/package-info.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.Private
package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb;
import org.apache.hadoop.classification.InterfaceAudience;
| 960 | 44.761905 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetTaskAttemptReportRequestPBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptReportRequest;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.impl.pb.TaskAttemptIdPBImpl;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskAttemptIdProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptReportRequestProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptReportRequestProtoOrBuilder;
import org.apache.hadoop.yarn.api.records.impl.pb.ProtoBase;
public class GetTaskAttemptReportRequestPBImpl extends ProtoBase<GetTaskAttemptReportRequestProto> implements GetTaskAttemptReportRequest {
GetTaskAttemptReportRequestProto proto = GetTaskAttemptReportRequestProto.getDefaultInstance();
GetTaskAttemptReportRequestProto.Builder builder = null;
boolean viaProto = false;
private TaskAttemptId taskAttemptId = null;
public GetTaskAttemptReportRequestPBImpl() {
builder = GetTaskAttemptReportRequestProto.newBuilder();
}
public GetTaskAttemptReportRequestPBImpl(GetTaskAttemptReportRequestProto proto) {
this.proto = proto;
viaProto = true;
}
public GetTaskAttemptReportRequestProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private void mergeLocalToBuilder() {
if (this.taskAttemptId != null) {
builder.setTaskAttemptId(convertToProtoFormat(this.taskAttemptId));
}
}
private void mergeLocalToProto() {
if (viaProto)
maybeInitBuilder();
mergeLocalToBuilder();
proto = builder.build();
viaProto = true;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = GetTaskAttemptReportRequestProto.newBuilder(proto);
}
viaProto = false;
}
@Override
public TaskAttemptId getTaskAttemptId() {
GetTaskAttemptReportRequestProtoOrBuilder p = viaProto ? proto : builder;
if (this.taskAttemptId != null) {
return this.taskAttemptId;
}
if (!p.hasTaskAttemptId()) {
return null;
}
this.taskAttemptId = convertFromProtoFormat(p.getTaskAttemptId());
return this.taskAttemptId;
}
@Override
public void setTaskAttemptId(TaskAttemptId taskAttemptId) {
maybeInitBuilder();
if (taskAttemptId == null)
builder.clearTaskAttemptId();
this.taskAttemptId = taskAttemptId;
}
private TaskAttemptIdPBImpl convertFromProtoFormat(TaskAttemptIdProto p) {
return new TaskAttemptIdPBImpl(p);
}
private TaskAttemptIdProto convertToProtoFormat(TaskAttemptId t) {
return ((TaskAttemptIdPBImpl)t).getProto();
}
}
| 3,596 | 31.7 | 139 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/RenewDelegationTokenRequestPBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.RenewDelegationTokenRequest;
import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto;
import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProtoOrBuilder;
import org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
import org.apache.hadoop.yarn.api.records.Token;
import org.apache.hadoop.yarn.api.records.impl.pb.ProtoBase;
import org.apache.hadoop.yarn.api.records.impl.pb.TokenPBImpl;
public class RenewDelegationTokenRequestPBImpl extends
ProtoBase<RenewDelegationTokenRequestProto> implements
RenewDelegationTokenRequest {
RenewDelegationTokenRequestProto proto = RenewDelegationTokenRequestProto
.getDefaultInstance();
RenewDelegationTokenRequestProto.Builder builder = null;
boolean viaProto = false;
public RenewDelegationTokenRequestPBImpl() {
this.builder = RenewDelegationTokenRequestProto.newBuilder();
}
public RenewDelegationTokenRequestPBImpl(
RenewDelegationTokenRequestProto proto) {
this.proto = proto;
this.viaProto = true;
}
Token token;
@Override
public Token getDelegationToken() {
RenewDelegationTokenRequestProtoOrBuilder p = viaProto ? proto : builder;
if (this.token != null) {
return this.token;
}
this.token = convertFromProtoFormat(p.getToken());
return this.token;
}
@Override
public void setDelegationToken(Token token) {
maybeInitBuilder();
if (token == null)
builder.clearToken();
this.token = token;
}
@Override
public RenewDelegationTokenRequestProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private void mergeLocalToBuilder() {
if (token != null) {
builder.setToken(convertToProtoFormat(this.token));
}
}
private void mergeLocalToProto() {
if (viaProto)
maybeInitBuilder();
mergeLocalToBuilder();
proto = builder.build();
viaProto = true;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = RenewDelegationTokenRequestProto.newBuilder(proto);
}
viaProto = false;
}
private TokenPBImpl convertFromProtoFormat(TokenProto p) {
return new TokenPBImpl(p);
}
private TokenProto convertToProtoFormat(Token t) {
return ((TokenPBImpl) t).getProto();
}
}
| 3,296 | 30.701923 | 97 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/KillJobRequestPBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillJobRequest;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.impl.pb.JobIdPBImpl;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.JobIdProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillJobRequestProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillJobRequestProtoOrBuilder;
import org.apache.hadoop.yarn.api.records.impl.pb.ProtoBase;
public class KillJobRequestPBImpl extends ProtoBase<KillJobRequestProto> implements KillJobRequest {
KillJobRequestProto proto = KillJobRequestProto.getDefaultInstance();
KillJobRequestProto.Builder builder = null;
boolean viaProto = false;
private JobId jobId = null;
public KillJobRequestPBImpl() {
builder = KillJobRequestProto.newBuilder();
}
public KillJobRequestPBImpl(KillJobRequestProto proto) {
this.proto = proto;
viaProto = true;
}
public KillJobRequestProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private void mergeLocalToBuilder() {
if (this.jobId != null) {
builder.setJobId(convertToProtoFormat(this.jobId));
}
}
private void mergeLocalToProto() {
if (viaProto)
maybeInitBuilder();
mergeLocalToBuilder();
proto = builder.build();
viaProto = true;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = KillJobRequestProto.newBuilder(proto);
}
viaProto = false;
}
@Override
public JobId getJobId() {
KillJobRequestProtoOrBuilder p = viaProto ? proto : builder;
if (this.jobId != null) {
return this.jobId;
}
if (!p.hasJobId()) {
return null;
}
this.jobId = convertFromProtoFormat(p.getJobId());
return this.jobId;
}
@Override
public void setJobId(JobId jobId) {
maybeInitBuilder();
if (jobId == null)
builder.clearJobId();
this.jobId = jobId;
}
private JobIdPBImpl convertFromProtoFormat(JobIdProto p) {
return new JobIdPBImpl(p);
}
private JobIdProto convertToProtoFormat(JobId t) {
return ((JobIdPBImpl)t).getProto();
}
}
| 3,156 | 27.7 | 100 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/CancelDelegationTokenRequestPBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.CancelDelegationTokenRequest;
import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto;
import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProtoOrBuilder;
import org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
import org.apache.hadoop.yarn.api.records.Token;
import org.apache.hadoop.yarn.api.records.impl.pb.ProtoBase;
import org.apache.hadoop.yarn.api.records.impl.pb.TokenPBImpl;
public class CancelDelegationTokenRequestPBImpl extends
ProtoBase<CancelDelegationTokenRequestProto> implements
CancelDelegationTokenRequest {
CancelDelegationTokenRequestProto proto =
CancelDelegationTokenRequestProto.getDefaultInstance();
CancelDelegationTokenRequestProto.Builder builder = null;
boolean viaProto = false;
public CancelDelegationTokenRequestPBImpl() {
this.builder = CancelDelegationTokenRequestProto.newBuilder();
}
public CancelDelegationTokenRequestPBImpl (
CancelDelegationTokenRequestProto proto) {
this.proto = proto;
this.viaProto = true;
}
Token token;
@Override
public Token getDelegationToken() {
CancelDelegationTokenRequestProtoOrBuilder p = viaProto ? proto : builder;
if (this.token != null) {
return this.token;
}
this.token = convertFromProtoFormat(p.getToken());
return this.token;
}
@Override
public void setDelegationToken(Token token) {
maybeInitBuilder();
if (token == null)
builder.clearToken();
this.token = token;
}
@Override
public CancelDelegationTokenRequestProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private void mergeLocalToBuilder() {
if (token != null) {
builder.setToken(convertToProtoFormat(this.token));
}
}
private void mergeLocalToProto() {
if (viaProto)
maybeInitBuilder();
mergeLocalToBuilder();
proto = builder.build();
viaProto = true;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = CancelDelegationTokenRequestProto.newBuilder(proto);
}
viaProto = false;
}
private TokenPBImpl convertFromProtoFormat(TokenProto p) {
return new TokenPBImpl(p);
}
private TokenProto convertToProtoFormat(Token t) {
return ((TokenPBImpl)t).getProto();
}
}
| 3,322 | 30.349057 | 98 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetDiagnosticsResponsePBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsResponse;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDiagnosticsResponseProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDiagnosticsResponseProtoOrBuilder;
import org.apache.hadoop.yarn.api.records.impl.pb.ProtoBase;
public class GetDiagnosticsResponsePBImpl extends ProtoBase<GetDiagnosticsResponseProto> implements GetDiagnosticsResponse {
GetDiagnosticsResponseProto proto = GetDiagnosticsResponseProto.getDefaultInstance();
GetDiagnosticsResponseProto.Builder builder = null;
boolean viaProto = false;
private List<String> diagnostics = null;
public GetDiagnosticsResponsePBImpl() {
builder = GetDiagnosticsResponseProto.newBuilder();
}
public GetDiagnosticsResponsePBImpl(GetDiagnosticsResponseProto proto) {
this.proto = proto;
viaProto = true;
}
public GetDiagnosticsResponseProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private void mergeLocalToBuilder() {
if (this.diagnostics != null) {
addDiagnosticsToProto();
}
}
private void mergeLocalToProto() {
if (viaProto)
maybeInitBuilder();
mergeLocalToBuilder();
proto = builder.build();
viaProto = true;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = GetDiagnosticsResponseProto.newBuilder(proto);
}
viaProto = false;
}
@Override
public List<String> getDiagnosticsList() {
initDiagnostics();
return this.diagnostics;
}
@Override
public String getDiagnostics(int index) {
initDiagnostics();
return this.diagnostics.get(index);
}
@Override
public int getDiagnosticsCount() {
initDiagnostics();
return this.diagnostics.size();
}
private void initDiagnostics() {
if (this.diagnostics != null) {
return;
}
GetDiagnosticsResponseProtoOrBuilder p = viaProto ? proto : builder;
List<String> list = p.getDiagnosticsList();
this.diagnostics = new ArrayList<String>();
for (String c : list) {
this.diagnostics.add(c);
}
}
@Override
public void addAllDiagnostics(final List<String> diagnostics) {
if (diagnostics == null)
return;
initDiagnostics();
this.diagnostics.addAll(diagnostics);
}
private void addDiagnosticsToProto() {
maybeInitBuilder();
builder.clearDiagnostics();
if (diagnostics == null)
return;
builder.addAllDiagnostics(diagnostics);
}
@Override
public void addDiagnostics(String diagnostics) {
initDiagnostics();
this.diagnostics.add(diagnostics);
}
@Override
public void removeDiagnostics(int index) {
initDiagnostics();
this.diagnostics.remove(index);
}
@Override
public void clearDiagnostics() {
initDiagnostics();
this.diagnostics.clear();
}
}
| 3,896 | 27.035971 | 124 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/FailTaskAttemptRequestPBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptRequest;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.impl.pb.TaskAttemptIdPBImpl;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskAttemptIdProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.FailTaskAttemptRequestProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.FailTaskAttemptRequestProtoOrBuilder;
import org.apache.hadoop.yarn.api.records.impl.pb.ProtoBase;
public class FailTaskAttemptRequestPBImpl extends ProtoBase<FailTaskAttemptRequestProto> implements FailTaskAttemptRequest {
FailTaskAttemptRequestProto proto = FailTaskAttemptRequestProto.getDefaultInstance();
FailTaskAttemptRequestProto.Builder builder = null;
boolean viaProto = false;
private TaskAttemptId taskAttemptId = null;
public FailTaskAttemptRequestPBImpl() {
builder = FailTaskAttemptRequestProto.newBuilder();
}
public FailTaskAttemptRequestPBImpl(FailTaskAttemptRequestProto proto) {
this.proto = proto;
viaProto = true;
}
public FailTaskAttemptRequestProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private void mergeLocalToBuilder() {
if (this.taskAttemptId != null) {
builder.setTaskAttemptId(convertToProtoFormat(this.taskAttemptId));
}
}
private void mergeLocalToProto() {
if (viaProto)
maybeInitBuilder();
mergeLocalToBuilder();
proto = builder.build();
viaProto = true;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = FailTaskAttemptRequestProto.newBuilder(proto);
}
viaProto = false;
}
@Override
public TaskAttemptId getTaskAttemptId() {
FailTaskAttemptRequestProtoOrBuilder p = viaProto ? proto : builder;
if (this.taskAttemptId != null) {
return this.taskAttemptId;
}
if (!p.hasTaskAttemptId()) {
return null;
}
this.taskAttemptId = convertFromProtoFormat(p.getTaskAttemptId());
return this.taskAttemptId;
}
@Override
public void setTaskAttemptId(TaskAttemptId taskAttemptId) {
maybeInitBuilder();
if (taskAttemptId == null)
builder.clearTaskAttemptId();
this.taskAttemptId = taskAttemptId;
}
private TaskAttemptIdPBImpl convertFromProtoFormat(TaskAttemptIdProto p) {
return new TaskAttemptIdPBImpl(p);
}
private TaskAttemptIdProto convertToProtoFormat(TaskAttemptId t) {
return ((TaskAttemptIdPBImpl)t).getProto();
}
}
| 3,516 | 30.972727 | 124 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetDelegationTokenResponsePBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenResponse;
import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenResponseProto;
import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenResponseProtoOrBuilder;
import org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
import org.apache.hadoop.yarn.api.records.Token;
import org.apache.hadoop.yarn.api.records.impl.pb.ProtoBase;
import org.apache.hadoop.yarn.api.records.impl.pb.TokenPBImpl;
public class GetDelegationTokenResponsePBImpl extends
ProtoBase<GetDelegationTokenResponseProto> implements GetDelegationTokenResponse {
Token mrToken;
GetDelegationTokenResponseProto proto =
GetDelegationTokenResponseProto.getDefaultInstance();
GetDelegationTokenResponseProto.Builder builder = null;
boolean viaProto = false;
public GetDelegationTokenResponsePBImpl() {
builder = GetDelegationTokenResponseProto.newBuilder();
}
public GetDelegationTokenResponsePBImpl (
GetDelegationTokenResponseProto proto) {
this.proto = proto;
viaProto = true;
}
@Override
public Token getDelegationToken() {
GetDelegationTokenResponseProtoOrBuilder p = viaProto ? proto : builder;
if (this.mrToken != null) {
return this.mrToken;
}
if (!p.hasToken()) {
return null;
}
this.mrToken = convertFromProtoFormat(p.getToken());
return this.mrToken;
}
@Override
public void setDelegationToken(Token mrToken) {
maybeInitBuilder();
if (mrToken == null)
builder.getToken();
this.mrToken = mrToken;
}
@Override
public GetDelegationTokenResponseProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private void mergeLocalToBuilder() {
if (mrToken != null) {
builder.setToken(convertToProtoFormat(this.mrToken));
}
}
private void mergeLocalToProto() {
if (viaProto)
maybeInitBuilder();
mergeLocalToBuilder();
proto = builder.build();
viaProto = true;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = GetDelegationTokenResponseProto.newBuilder(proto);
}
viaProto = false;
}
private TokenPBImpl convertFromProtoFormat(TokenProto p) {
return new TokenPBImpl(p);
}
private TokenProto convertToProtoFormat(Token t) {
return ((TokenPBImpl)t).getProto();
}
}
| 3,398 | 30.472222 | 96 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/KillTaskAttemptRequestPBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskAttemptRequest;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.impl.pb.TaskAttemptIdPBImpl;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskAttemptIdProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskAttemptRequestProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskAttemptRequestProtoOrBuilder;
import org.apache.hadoop.yarn.api.records.impl.pb.ProtoBase;
public class KillTaskAttemptRequestPBImpl extends ProtoBase<KillTaskAttemptRequestProto> implements KillTaskAttemptRequest {
KillTaskAttemptRequestProto proto = KillTaskAttemptRequestProto.getDefaultInstance();
KillTaskAttemptRequestProto.Builder builder = null;
boolean viaProto = false;
private TaskAttemptId taskAttemptId = null;
public KillTaskAttemptRequestPBImpl() {
builder = KillTaskAttemptRequestProto.newBuilder();
}
public KillTaskAttemptRequestPBImpl(KillTaskAttemptRequestProto proto) {
this.proto = proto;
viaProto = true;
}
public KillTaskAttemptRequestProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private void mergeLocalToBuilder() {
if (this.taskAttemptId != null) {
builder.setTaskAttemptId(convertToProtoFormat(this.taskAttemptId));
}
}
private void mergeLocalToProto() {
if (viaProto)
maybeInitBuilder();
mergeLocalToBuilder();
proto = builder.build();
viaProto = true;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = KillTaskAttemptRequestProto.newBuilder(proto);
}
viaProto = false;
}
@Override
public TaskAttemptId getTaskAttemptId() {
KillTaskAttemptRequestProtoOrBuilder p = viaProto ? proto : builder;
if (this.taskAttemptId != null) {
return this.taskAttemptId;
}
if (!p.hasTaskAttemptId()) {
return null;
}
this.taskAttemptId = convertFromProtoFormat(p.getTaskAttemptId());
return this.taskAttemptId;
}
@Override
public void setTaskAttemptId(TaskAttemptId taskAttemptId) {
maybeInitBuilder();
if (taskAttemptId == null)
builder.clearTaskAttemptId();
this.taskAttemptId = taskAttemptId;
}
private TaskAttemptIdPBImpl convertFromProtoFormat(TaskAttemptIdProto p) {
return new TaskAttemptIdPBImpl(p);
}
private TaskAttemptIdProto convertToProtoFormat(TaskAttemptId t) {
return ((TaskAttemptIdPBImpl)t).getProto();
}
}
| 3,516 | 30.972727 | 124 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetJobReportResponsePBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportResponse;
import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
import org.apache.hadoop.mapreduce.v2.api.records.impl.pb.JobReportPBImpl;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.JobReportProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetJobReportResponseProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetJobReportResponseProtoOrBuilder;
import org.apache.hadoop.yarn.api.records.impl.pb.ProtoBase;
public class GetJobReportResponsePBImpl extends ProtoBase<GetJobReportResponseProto> implements GetJobReportResponse {
GetJobReportResponseProto proto = GetJobReportResponseProto.getDefaultInstance();
GetJobReportResponseProto.Builder builder = null;
boolean viaProto = false;
private JobReport jobReport = null;
public GetJobReportResponsePBImpl() {
builder = GetJobReportResponseProto.newBuilder();
}
public GetJobReportResponsePBImpl(GetJobReportResponseProto proto) {
this.proto = proto;
viaProto = true;
}
public GetJobReportResponseProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private void mergeLocalToBuilder() {
if (this.jobReport != null) {
builder.setJobReport(convertToProtoFormat(this.jobReport));
}
}
private void mergeLocalToProto() {
if (viaProto)
maybeInitBuilder();
mergeLocalToBuilder();
proto = builder.build();
viaProto = true;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = GetJobReportResponseProto.newBuilder(proto);
}
viaProto = false;
}
@Override
public JobReport getJobReport() {
GetJobReportResponseProtoOrBuilder p = viaProto ? proto : builder;
if (this.jobReport != null) {
return this.jobReport;
}
if (!p.hasJobReport()) {
return null;
}
this.jobReport = convertFromProtoFormat(p.getJobReport());
return this.jobReport;
}
@Override
public void setJobReport(JobReport jobReport) {
maybeInitBuilder();
if (jobReport == null)
builder.clearJobReport();
this.jobReport = jobReport;
}
private JobReportPBImpl convertFromProtoFormat(JobReportProto p) {
return new JobReportPBImpl(p);
}
private JobReportProto convertToProtoFormat(JobReport t) {
return ((JobReportPBImpl)t).getProto();
}
}
| 3,368 | 29.627273 | 118 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetCountersResponsePBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersResponse;
import org.apache.hadoop.mapreduce.v2.api.records.Counters;
import org.apache.hadoop.mapreduce.v2.api.records.impl.pb.CountersPBImpl;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.CountersProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetCountersResponseProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetCountersResponseProtoOrBuilder;
import org.apache.hadoop.yarn.api.records.impl.pb.ProtoBase;
public class GetCountersResponsePBImpl extends ProtoBase<GetCountersResponseProto> implements GetCountersResponse {
GetCountersResponseProto proto = GetCountersResponseProto.getDefaultInstance();
GetCountersResponseProto.Builder builder = null;
boolean viaProto = false;
private Counters counters = null;
public GetCountersResponsePBImpl() {
builder = GetCountersResponseProto.newBuilder();
}
public GetCountersResponsePBImpl(GetCountersResponseProto proto) {
this.proto = proto;
viaProto = true;
}
public GetCountersResponseProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private void mergeLocalToBuilder() {
if (this.counters != null) {
builder.setCounters(convertToProtoFormat(this.counters));
}
}
private void mergeLocalToProto() {
if (viaProto)
maybeInitBuilder();
mergeLocalToBuilder();
proto = builder.build();
viaProto = true;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = GetCountersResponseProto.newBuilder(proto);
}
viaProto = false;
}
@Override
public Counters getCounters() {
GetCountersResponseProtoOrBuilder p = viaProto ? proto : builder;
if (this.counters != null) {
return this.counters;
}
if (!p.hasCounters()) {
return null;
}
this.counters = convertFromProtoFormat(p.getCounters());
return this.counters;
}
@Override
public void setCounters(Counters counters) {
maybeInitBuilder();
if (counters == null)
builder.clearCounters();
this.counters = counters;
}
private CountersPBImpl convertFromProtoFormat(CountersProto p) {
return new CountersPBImpl(p);
}
private CountersProto convertToProtoFormat(Counters t) {
return ((CountersPBImpl)t).getProto();
}
}
| 3,323 | 29.218182 | 115 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/FailTaskAttemptResponsePBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptResponse;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.FailTaskAttemptResponseProto;
import org.apache.hadoop.yarn.api.records.impl.pb.ProtoBase;
public class FailTaskAttemptResponsePBImpl extends ProtoBase<FailTaskAttemptResponseProto> implements FailTaskAttemptResponse {
FailTaskAttemptResponseProto proto = FailTaskAttemptResponseProto.getDefaultInstance();
FailTaskAttemptResponseProto.Builder builder = null;
boolean viaProto = false;
public FailTaskAttemptResponsePBImpl() {
builder = FailTaskAttemptResponseProto.newBuilder();
}
public FailTaskAttemptResponsePBImpl(FailTaskAttemptResponseProto proto) {
this.proto = proto;
viaProto = true;
}
public FailTaskAttemptResponseProto getProto() {
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = FailTaskAttemptResponseProto.newBuilder(proto);
}
viaProto = false;
}
}
| 1,976 | 34.303571 | 127 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetTaskReportsRequestPBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportsRequest;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.mapreduce.v2.api.records.impl.pb.JobIdPBImpl;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.JobIdProto;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskTypeProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportsRequestProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportsRequestProtoOrBuilder;
import org.apache.hadoop.mapreduce.v2.util.MRProtoUtils;
import org.apache.hadoop.yarn.api.records.impl.pb.ProtoBase;
public class GetTaskReportsRequestPBImpl extends ProtoBase<GetTaskReportsRequestProto> implements GetTaskReportsRequest {
GetTaskReportsRequestProto proto = GetTaskReportsRequestProto.getDefaultInstance();
GetTaskReportsRequestProto.Builder builder = null;
boolean viaProto = false;
private JobId jobId = null;
public GetTaskReportsRequestPBImpl() {
builder = GetTaskReportsRequestProto.newBuilder();
}
public GetTaskReportsRequestPBImpl(GetTaskReportsRequestProto proto) {
this.proto = proto;
viaProto = true;
}
public GetTaskReportsRequestProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private void mergeLocalToBuilder() {
if (this.jobId != null) {
builder.setJobId(convertToProtoFormat(this.jobId));
}
}
private void mergeLocalToProto() {
if (viaProto)
maybeInitBuilder();
mergeLocalToBuilder();
proto = builder.build();
viaProto = true;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = GetTaskReportsRequestProto.newBuilder(proto);
}
viaProto = false;
}
@Override
public JobId getJobId() {
GetTaskReportsRequestProtoOrBuilder p = viaProto ? proto : builder;
if (this.jobId != null) {
return this.jobId;
}
if (!p.hasJobId()) {
return null;
}
this.jobId = convertFromProtoFormat(p.getJobId());
return this.jobId;
}
@Override
public void setJobId(JobId jobId) {
maybeInitBuilder();
if (jobId == null)
builder.clearJobId();
this.jobId = jobId;
}
@Override
public TaskType getTaskType() {
GetTaskReportsRequestProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasTaskType()) {
return null;
}
return convertFromProtoFormat(p.getTaskType());
}
@Override
public void setTaskType(TaskType taskType) {
maybeInitBuilder();
if (taskType == null) {
builder.clearTaskType();
return;
}
builder.setTaskType(convertToProtoFormat(taskType));
}
private JobIdPBImpl convertFromProtoFormat(JobIdProto p) {
return new JobIdPBImpl(p);
}
private JobIdProto convertToProtoFormat(JobId t) {
return ((JobIdPBImpl)t).getProto();
}
private TaskTypeProto convertToProtoFormat(TaskType e) {
return MRProtoUtils.convertToProtoFormat(e);
}
private TaskType convertFromProtoFormat(TaskTypeProto e) {
return MRProtoUtils.convertFromProtoFormat(e);
}
}
| 4,134 | 28.748201 | 121 |
java
|
hadoop
|
hadoop-master/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetTaskAttemptCompletionEventsRequestPBImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptCompletionEventsRequest;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.impl.pb.JobIdPBImpl;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.JobIdProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptCompletionEventsRequestProto;
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptCompletionEventsRequestProtoOrBuilder;
import org.apache.hadoop.yarn.api.records.impl.pb.ProtoBase;
public class GetTaskAttemptCompletionEventsRequestPBImpl extends ProtoBase<GetTaskAttemptCompletionEventsRequestProto> implements GetTaskAttemptCompletionEventsRequest {
GetTaskAttemptCompletionEventsRequestProto proto = GetTaskAttemptCompletionEventsRequestProto.getDefaultInstance();
GetTaskAttemptCompletionEventsRequestProto.Builder builder = null;
boolean viaProto = false;
private JobId jobId = null;
public GetTaskAttemptCompletionEventsRequestPBImpl() {
builder = GetTaskAttemptCompletionEventsRequestProto.newBuilder();
}
public GetTaskAttemptCompletionEventsRequestPBImpl(GetTaskAttemptCompletionEventsRequestProto proto) {
this.proto = proto;
viaProto = true;
}
public GetTaskAttemptCompletionEventsRequestProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
private void mergeLocalToBuilder() {
if (this.jobId != null) {
builder.setJobId(convertToProtoFormat(this.jobId));
}
}
private void mergeLocalToProto() {
if (viaProto)
maybeInitBuilder();
mergeLocalToBuilder();
proto = builder.build();
viaProto = true;
}
private void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = GetTaskAttemptCompletionEventsRequestProto.newBuilder(proto);
}
viaProto = false;
}
@Override
public JobId getJobId() {
GetTaskAttemptCompletionEventsRequestProtoOrBuilder p = viaProto ? proto : builder;
if (this.jobId != null) {
return this.jobId;
}
if (!p.hasJobId()) {
return null;
}
this.jobId = convertFromProtoFormat(p.getJobId());
return this.jobId;
}
@Override
public void setJobId(JobId jobId) {
maybeInitBuilder();
if (jobId == null)
builder.clearJobId();
this.jobId = jobId;
}
@Override
public int getFromEventId() {
GetTaskAttemptCompletionEventsRequestProtoOrBuilder p = viaProto ? proto : builder;
return (p.getFromEventId());
}
@Override
public void setFromEventId(int fromEventId) {
maybeInitBuilder();
builder.setFromEventId((fromEventId));
}
@Override
public int getMaxEvents() {
GetTaskAttemptCompletionEventsRequestProtoOrBuilder p = viaProto ? proto : builder;
return (p.getMaxEvents());
}
@Override
public void setMaxEvents(int maxEvents) {
maybeInitBuilder();
builder.setMaxEvents((maxEvents));
}
private JobIdPBImpl convertFromProtoFormat(JobIdProto p) {
return new JobIdPBImpl(p);
}
private JobIdProto convertToProtoFormat(JobId t) {
return ((JobIdPBImpl)t).getProto();
}
}
| 4,114 | 30.174242 | 169 |
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.