repo
stringlengths
1
191
file
stringlengths
23
351
code
stringlengths
0
5.32M
file_length
int64
0
5.32M
avg_line_length
float64
0
2.9k
max_line_length
int64
0
288k
extension_type
stringclasses
1 value
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPInputStream.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.ftp; import java.io.IOException; import java.io.InputStream; import org.apache.commons.net.ftp.FTPClient; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.FSInputStream; import org.apache.hadoop.fs.FileSystem; @InterfaceAudience.Private @InterfaceStability.Unstable public class FTPInputStream extends FSInputStream { InputStream wrappedStream; FTPClient client; FileSystem.Statistics stats; boolean closed; long pos; public FTPInputStream(InputStream stream, FTPClient client, FileSystem.Statistics stats) { if (stream == null) { throw new IllegalArgumentException("Null InputStream"); } if (client == null || !client.isConnected()) { throw new IllegalArgumentException("FTP client null or not connected"); } this.wrappedStream = stream; this.client = client; this.stats = stats; this.pos = 0; this.closed = false; } @Override public long getPos() throws IOException { return pos; } // We don't support seek. @Override public void seek(long pos) throws IOException { throw new IOException("Seek not supported"); } @Override public boolean seekToNewSource(long targetPos) throws IOException { throw new IOException("Seek not supported"); } @Override public synchronized int read() throws IOException { if (closed) { throw new IOException("Stream closed"); } int byteRead = wrappedStream.read(); if (byteRead >= 0) { pos++; } if (stats != null && byteRead >= 0) { stats.incrementBytesRead(1); } return byteRead; } @Override public synchronized int read(byte buf[], int off, int len) throws IOException { if (closed) { throw new IOException("Stream closed"); } int result = wrappedStream.read(buf, off, len); if (result > 0) { pos += result; } if (stats != null && result > 0) { stats.incrementBytesRead(result); } return result; } @Override public synchronized void close() throws IOException { if (closed) { return; } super.close(); closed = true; if (!client.isConnected()) { throw new FTPException("Client not connected"); } boolean cmdCompleted = client.completePendingCommand(); client.logout(); client.disconnect(); if (!cmdCompleted) { throw new FTPException("Could not complete transfer, Reply Code - " + client.getReplyCode()); } } // Not supported. @Override public boolean markSupported() { return false; } @Override public void mark(int readLimit) { // Do nothing } @Override public void reset() throws IOException { throw new IOException("Mark not supported"); } }
3,652
25.092857
81
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.ftp; import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; import java.net.ConnectException; import java.net.URI; import com.google.common.base.Preconditions; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.commons.net.ftp.FTP; import org.apache.commons.net.ftp.FTPClient; import org.apache.commons.net.ftp.FTPFile; import org.apache.commons.net.ftp.FTPReply; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.util.Progressable; /** * <p> * A {@link FileSystem} backed by an FTP client provided by <a * href="http://commons.apache.org/net/">Apache Commons Net</a>. * </p> */ @InterfaceAudience.Public @InterfaceStability.Stable public class FTPFileSystem extends FileSystem { public static final Log LOG = LogFactory .getLog(FTPFileSystem.class); public static final int DEFAULT_BUFFER_SIZE = 1024 * 1024; public static final int DEFAULT_BLOCK_SIZE = 4 * 1024; public static final String FS_FTP_USER_PREFIX = "fs.ftp.user."; public static final String FS_FTP_HOST = "fs.ftp.host"; public static final String FS_FTP_HOST_PORT = "fs.ftp.host.port"; public static final String FS_FTP_PASSWORD_PREFIX = "fs.ftp.password."; public static final String E_SAME_DIRECTORY_ONLY = "only same directory renames are supported"; private URI uri; /** * Return the protocol scheme for the FileSystem. * <p/> * * @return <code>ftp</code> */ @Override public String getScheme() { return "ftp"; } /** * Get the default port for this FTPFileSystem. * * @return the default port */ @Override protected int getDefaultPort() { return FTP.DEFAULT_PORT; } @Override public void initialize(URI uri, Configuration conf) throws IOException { // get super.initialize(uri, conf); // get host information from uri (overrides info in conf) String host = uri.getHost(); host = (host == null) ? conf.get(FS_FTP_HOST, null) : host; if (host == null) { throw new IOException("Invalid host specified"); } conf.set(FS_FTP_HOST, host); // get port information from uri, (overrides info in conf) int port = uri.getPort(); port = (port == -1) ? FTP.DEFAULT_PORT : port; conf.setInt("fs.ftp.host.port", port); // get user/password information from URI (overrides info in conf) String userAndPassword = uri.getUserInfo(); if (userAndPassword == null) { userAndPassword = (conf.get("fs.ftp.user." + host, null) + ":" + conf .get("fs.ftp.password." + host, null)); } String[] userPasswdInfo = userAndPassword.split(":"); Preconditions.checkState(userPasswdInfo.length > 1, "Invalid username / password"); conf.set(FS_FTP_USER_PREFIX + host, userPasswdInfo[0]); conf.set(FS_FTP_PASSWORD_PREFIX + host, userPasswdInfo[1]); setConf(conf); this.uri = uri; } /** * Connect to the FTP server using configuration parameters * * * @return An FTPClient instance * @throws IOException */ private FTPClient connect() throws IOException { FTPClient client = null; Configuration conf = getConf(); String host = conf.get(FS_FTP_HOST); int port = conf.getInt(FS_FTP_HOST_PORT, FTP.DEFAULT_PORT); String user = conf.get(FS_FTP_USER_PREFIX + host); String password = conf.get(FS_FTP_PASSWORD_PREFIX + host); client = new FTPClient(); client.connect(host, port); int reply = client.getReplyCode(); if (!FTPReply.isPositiveCompletion(reply)) { throw NetUtils.wrapException(host, port, NetUtils.UNKNOWN_HOST, 0, new ConnectException("Server response " + reply)); } else if (client.login(user, password)) { client.setFileTransferMode(FTP.BLOCK_TRANSFER_MODE); client.setFileType(FTP.BINARY_FILE_TYPE); client.setBufferSize(DEFAULT_BUFFER_SIZE); } else { throw new IOException("Login failed on server - " + host + ", port - " + port + " as user '" + user + "'"); } return client; } /** * Logout and disconnect the given FTPClient. * * * @param client * @throws IOException */ private void disconnect(FTPClient client) throws IOException { if (client != null) { if (!client.isConnected()) { throw new FTPException("Client not connected"); } boolean logoutSuccess = client.logout(); client.disconnect(); if (!logoutSuccess) { LOG.warn("Logout failed while disconnecting, error code - " + client.getReplyCode()); } } } /** * Resolve against given working directory. * * * @param workDir * @param path * @return */ private Path makeAbsolute(Path workDir, Path path) { if (path.isAbsolute()) { return path; } return new Path(workDir, path); } @Override public FSDataInputStream open(Path file, int bufferSize) throws IOException { FTPClient client = connect(); Path workDir = new Path(client.printWorkingDirectory()); Path absolute = makeAbsolute(workDir, file); FileStatus fileStat = getFileStatus(client, absolute); if (fileStat.isDirectory()) { disconnect(client); throw new FileNotFoundException("Path " + file + " is a directory."); } client.allocate(bufferSize); Path parent = absolute.getParent(); // Change to parent directory on the // server. Only then can we read the // file // on the server by opening up an InputStream. As a side effect the working // directory on the server is changed to the parent directory of the file. // The FTP client connection is closed when close() is called on the // FSDataInputStream. client.changeWorkingDirectory(parent.toUri().getPath()); InputStream is = client.retrieveFileStream(file.getName()); FSDataInputStream fis = new FSDataInputStream(new FTPInputStream(is, client, statistics)); if (!FTPReply.isPositivePreliminary(client.getReplyCode())) { // The ftpClient is an inconsistent state. Must close the stream // which in turn will logout and disconnect from FTP server fis.close(); throw new IOException("Unable to open file: " + file + ", Aborting"); } return fis; } /** * A stream obtained via this call must be closed before using other APIs of * this class or else the invocation will block. */ @Override public FSDataOutputStream create(Path file, FsPermission permission, boolean overwrite, int bufferSize, short replication, long blockSize, Progressable progress) throws IOException { final FTPClient client = connect(); Path workDir = new Path(client.printWorkingDirectory()); Path absolute = makeAbsolute(workDir, file); FileStatus status; try { status = getFileStatus(client, file); } catch (FileNotFoundException fnfe) { status = null; } if (status != null) { if (overwrite && !status.isDirectory()) { delete(client, file, false); } else { disconnect(client); throw new FileAlreadyExistsException("File already exists: " + file); } } Path parent = absolute.getParent(); if (parent == null || !mkdirs(client, parent, FsPermission.getDirDefault())) { parent = (parent == null) ? new Path("/") : parent; disconnect(client); throw new IOException("create(): Mkdirs failed to create: " + parent); } client.allocate(bufferSize); // Change to parent directory on the server. Only then can we write to the // file on the server by opening up an OutputStream. As a side effect the // working directory on the server is changed to the parent directory of the // file. The FTP client connection is closed when close() is called on the // FSDataOutputStream. client.changeWorkingDirectory(parent.toUri().getPath()); FSDataOutputStream fos = new FSDataOutputStream(client.storeFileStream(file .getName()), statistics) { @Override public void close() throws IOException { super.close(); if (!client.isConnected()) { throw new FTPException("Client not connected"); } boolean cmdCompleted = client.completePendingCommand(); disconnect(client); if (!cmdCompleted) { throw new FTPException("Could not complete transfer, Reply Code - " + client.getReplyCode()); } } }; if (!FTPReply.isPositivePreliminary(client.getReplyCode())) { // The ftpClient is an inconsistent state. Must close the stream // which in turn will logout and disconnect from FTP server fos.close(); throw new IOException("Unable to create file: " + file + ", Aborting"); } return fos; } /** This optional operation is not yet supported. */ @Override public FSDataOutputStream append(Path f, int bufferSize, Progressable progress) throws IOException { throw new IOException("Not supported"); } /** * Convenience method, so that we don't open a new connection when using this * method from within another method. Otherwise every API invocation incurs * the overhead of opening/closing a TCP connection. * @throws IOException on IO problems other than FileNotFoundException */ private boolean exists(FTPClient client, Path file) throws IOException { try { getFileStatus(client, file); return true; } catch (FileNotFoundException fnfe) { return false; } } @Override public boolean delete(Path file, boolean recursive) throws IOException { FTPClient client = connect(); try { boolean success = delete(client, file, recursive); return success; } finally { disconnect(client); } } /** * Convenience method, so that we don't open a new connection when using this * method from within another method. Otherwise every API invocation incurs * the overhead of opening/closing a TCP connection. */ private boolean delete(FTPClient client, Path file, boolean recursive) throws IOException { Path workDir = new Path(client.printWorkingDirectory()); Path absolute = makeAbsolute(workDir, file); String pathName = absolute.toUri().getPath(); try { FileStatus fileStat = getFileStatus(client, absolute); if (fileStat.isFile()) { return client.deleteFile(pathName); } } catch (FileNotFoundException e) { //the file is not there return false; } FileStatus[] dirEntries = listStatus(client, absolute); if (dirEntries != null && dirEntries.length > 0 && !(recursive)) { throw new IOException("Directory: " + file + " is not empty."); } for (FileStatus dirEntry : dirEntries) { delete(client, new Path(absolute, dirEntry.getPath()), recursive); } return client.removeDirectory(pathName); } private FsAction getFsAction(int accessGroup, FTPFile ftpFile) { FsAction action = FsAction.NONE; if (ftpFile.hasPermission(accessGroup, FTPFile.READ_PERMISSION)) { action.or(FsAction.READ); } if (ftpFile.hasPermission(accessGroup, FTPFile.WRITE_PERMISSION)) { action.or(FsAction.WRITE); } if (ftpFile.hasPermission(accessGroup, FTPFile.EXECUTE_PERMISSION)) { action.or(FsAction.EXECUTE); } return action; } private FsPermission getPermissions(FTPFile ftpFile) { FsAction user, group, others; user = getFsAction(FTPFile.USER_ACCESS, ftpFile); group = getFsAction(FTPFile.GROUP_ACCESS, ftpFile); others = getFsAction(FTPFile.WORLD_ACCESS, ftpFile); return new FsPermission(user, group, others); } @Override public URI getUri() { return uri; } @Override public FileStatus[] listStatus(Path file) throws IOException { FTPClient client = connect(); try { FileStatus[] stats = listStatus(client, file); return stats; } finally { disconnect(client); } } /** * Convenience method, so that we don't open a new connection when using this * method from within another method. Otherwise every API invocation incurs * the overhead of opening/closing a TCP connection. */ private FileStatus[] listStatus(FTPClient client, Path file) throws IOException { Path workDir = new Path(client.printWorkingDirectory()); Path absolute = makeAbsolute(workDir, file); FileStatus fileStat = getFileStatus(client, absolute); if (fileStat.isFile()) { return new FileStatus[] { fileStat }; } FTPFile[] ftpFiles = client.listFiles(absolute.toUri().getPath()); FileStatus[] fileStats = new FileStatus[ftpFiles.length]; for (int i = 0; i < ftpFiles.length; i++) { fileStats[i] = getFileStatus(ftpFiles[i], absolute); } return fileStats; } @Override public FileStatus getFileStatus(Path file) throws IOException { FTPClient client = connect(); try { FileStatus status = getFileStatus(client, file); return status; } finally { disconnect(client); } } /** * Convenience method, so that we don't open a new connection when using this * method from within another method. Otherwise every API invocation incurs * the overhead of opening/closing a TCP connection. */ private FileStatus getFileStatus(FTPClient client, Path file) throws IOException { FileStatus fileStat = null; Path workDir = new Path(client.printWorkingDirectory()); Path absolute = makeAbsolute(workDir, file); Path parentPath = absolute.getParent(); if (parentPath == null) { // root dir long length = -1; // Length of root dir on server not known boolean isDir = true; int blockReplication = 1; long blockSize = DEFAULT_BLOCK_SIZE; // Block Size not known. long modTime = -1; // Modification time of root dir not known. Path root = new Path("/"); return new FileStatus(length, isDir, blockReplication, blockSize, modTime, root.makeQualified(this)); } String pathName = parentPath.toUri().getPath(); FTPFile[] ftpFiles = client.listFiles(pathName); if (ftpFiles != null) { for (FTPFile ftpFile : ftpFiles) { if (ftpFile.getName().equals(file.getName())) { // file found in dir fileStat = getFileStatus(ftpFile, parentPath); break; } } if (fileStat == null) { throw new FileNotFoundException("File " + file + " does not exist."); } } else { throw new FileNotFoundException("File " + file + " does not exist."); } return fileStat; } /** * Convert the file information in FTPFile to a {@link FileStatus} object. * * * @param ftpFile * @param parentPath * @return FileStatus */ private FileStatus getFileStatus(FTPFile ftpFile, Path parentPath) { long length = ftpFile.getSize(); boolean isDir = ftpFile.isDirectory(); int blockReplication = 1; // Using default block size since there is no way in FTP client to know of // block sizes on server. The assumption could be less than ideal. long blockSize = DEFAULT_BLOCK_SIZE; long modTime = ftpFile.getTimestamp().getTimeInMillis(); long accessTime = 0; FsPermission permission = getPermissions(ftpFile); String user = ftpFile.getUser(); String group = ftpFile.getGroup(); Path filePath = new Path(parentPath, ftpFile.getName()); return new FileStatus(length, isDir, blockReplication, blockSize, modTime, accessTime, permission, user, group, filePath.makeQualified(this)); } @Override public boolean mkdirs(Path file, FsPermission permission) throws IOException { FTPClient client = connect(); try { boolean success = mkdirs(client, file, permission); return success; } finally { disconnect(client); } } /** * Convenience method, so that we don't open a new connection when using this * method from within another method. Otherwise every API invocation incurs * the overhead of opening/closing a TCP connection. */ private boolean mkdirs(FTPClient client, Path file, FsPermission permission) throws IOException { boolean created = true; Path workDir = new Path(client.printWorkingDirectory()); Path absolute = makeAbsolute(workDir, file); String pathName = absolute.getName(); if (!exists(client, absolute)) { Path parent = absolute.getParent(); created = (parent == null || mkdirs(client, parent, FsPermission .getDirDefault())); if (created) { String parentDir = parent.toUri().getPath(); client.changeWorkingDirectory(parentDir); created = created && client.makeDirectory(pathName); } } else if (isFile(client, absolute)) { throw new ParentNotDirectoryException(String.format( "Can't make directory for path %s since it is a file.", absolute)); } return created; } /** * Convenience method, so that we don't open a new connection when using this * method from within another method. Otherwise every API invocation incurs * the overhead of opening/closing a TCP connection. */ private boolean isFile(FTPClient client, Path file) { try { return getFileStatus(client, file).isFile(); } catch (FileNotFoundException e) { return false; // file does not exist } catch (IOException ioe) { throw new FTPException("File check failed", ioe); } } /* * Assuming that parent of both source and destination is the same. Is the * assumption correct or it is suppose to work like 'move' ? */ @Override public boolean rename(Path src, Path dst) throws IOException { FTPClient client = connect(); try { boolean success = rename(client, src, dst); return success; } finally { disconnect(client); } } /** * Probe for a path being a parent of another * @param parent parent path * @param child possible child path * @return true if the parent's path matches the start of the child's */ private boolean isParentOf(Path parent, Path child) { URI parentURI = parent.toUri(); String parentPath = parentURI.getPath(); if (!parentPath.endsWith("/")) { parentPath += "/"; } URI childURI = child.toUri(); String childPath = childURI.getPath(); return childPath.startsWith(parentPath); } /** * Convenience method, so that we don't open a new connection when using this * method from within another method. Otherwise every API invocation incurs * the overhead of opening/closing a TCP connection. * * @param client * @param src * @param dst * @return * @throws IOException */ private boolean rename(FTPClient client, Path src, Path dst) throws IOException { Path workDir = new Path(client.printWorkingDirectory()); Path absoluteSrc = makeAbsolute(workDir, src); Path absoluteDst = makeAbsolute(workDir, dst); if (!exists(client, absoluteSrc)) { throw new FileNotFoundException("Source path " + src + " does not exist"); } if (isDirectory(absoluteDst)) { // destination is a directory: rename goes underneath it with the // source name absoluteDst = new Path(absoluteDst, absoluteSrc.getName()); } if (exists(client, absoluteDst)) { throw new FileAlreadyExistsException("Destination path " + dst + " already exists"); } String parentSrc = absoluteSrc.getParent().toUri().toString(); String parentDst = absoluteDst.getParent().toUri().toString(); if (isParentOf(absoluteSrc, absoluteDst)) { throw new IOException("Cannot rename " + absoluteSrc + " under itself" + " : "+ absoluteDst); } if (!parentSrc.equals(parentDst)) { throw new IOException("Cannot rename source: " + absoluteSrc + " to " + absoluteDst + " -"+ E_SAME_DIRECTORY_ONLY); } String from = absoluteSrc.getName(); String to = absoluteDst.getName(); client.changeWorkingDirectory(parentSrc); boolean renamed = client.rename(from, to); return renamed; } @Override public Path getWorkingDirectory() { // Return home directory always since we do not maintain state. return getHomeDirectory(); } @Override public Path getHomeDirectory() { FTPClient client = null; try { client = connect(); Path homeDir = new Path(client.printWorkingDirectory()); return homeDir; } catch (IOException ioe) { throw new FTPException("Failed to get home directory", ioe); } finally { try { disconnect(client); } catch (IOException ioe) { throw new FTPException("Failed to disconnect", ioe); } } } @Override public void setWorkingDirectory(Path newDir) { // we do not maintain the working directory state } }
22,370
33.791602
82
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FtpFs.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.ftp; import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; import org.apache.commons.net.ftp.FTP; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.AbstractFileSystem; import org.apache.hadoop.fs.DelegateToFileSystem; import org.apache.hadoop.fs.FsConstants; import org.apache.hadoop.fs.FsServerDefaults; /** * The FtpFs implementation of AbstractFileSystem. * This impl delegates to the old FileSystem */ @InterfaceAudience.Private @InterfaceStability.Evolving /*Evolving for a release,to be changed to Stable */ public class FtpFs extends DelegateToFileSystem { /** * This constructor has the signature needed by * {@link AbstractFileSystem#createFileSystem(URI, Configuration)}. * * @param theUri which must be that of localFs * @param conf * @throws IOException * @throws URISyntaxException */ FtpFs(final URI theUri, final Configuration conf) throws IOException, URISyntaxException { super(theUri, new FTPFileSystem(), conf, FsConstants.FTP_SCHEME, true); } @Override public int getUriDefaultPort() { return FTP.DEFAULT_PORT; } @Override public FsServerDefaults getServerDefaults() throws IOException { return FtpConfigKeys.getServerDefaults(); } }
2,237
33.96875
80
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FtpConfigKeys.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.ftp; import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FsServerDefaults; import org.apache.hadoop.util.DataChecksum; /** * This class contains constants for configuration keys used * in the ftp file system. * * Note that the settings for unimplemented features are ignored. * E.g. checksum related settings are just place holders. Even when * wrapped with {@link ChecksumFileSystem}, these settings are not * used. */ @InterfaceAudience.Private @InterfaceStability.Unstable public class FtpConfigKeys extends CommonConfigurationKeys { public static final String BLOCK_SIZE_KEY = "ftp.blocksize"; public static final long BLOCK_SIZE_DEFAULT = 4*1024; public static final String REPLICATION_KEY = "ftp.replication"; public static final short REPLICATION_DEFAULT = 1; public static final String STREAM_BUFFER_SIZE_KEY = "ftp.stream-buffer-size"; public static final int STREAM_BUFFER_SIZE_DEFAULT = 1024*1024; public static final String BYTES_PER_CHECKSUM_KEY = "ftp.bytes-per-checksum"; public static final int BYTES_PER_CHECKSUM_DEFAULT = 512; public static final String CLIENT_WRITE_PACKET_SIZE_KEY = "ftp.client-write-packet-size"; public static final int CLIENT_WRITE_PACKET_SIZE_DEFAULT = 64*1024; public static final boolean ENCRYPT_DATA_TRANSFER_DEFAULT = false; public static final long FS_TRASH_INTERVAL_DEFAULT = 0; public static final DataChecksum.Type CHECKSUM_TYPE_DEFAULT = DataChecksum.Type.CRC32; protected static FsServerDefaults getServerDefaults() throws IOException { return new FsServerDefaults( BLOCK_SIZE_DEFAULT, BYTES_PER_CHECKSUM_DEFAULT, CLIENT_WRITE_PACKET_SIZE_DEFAULT, REPLICATION_DEFAULT, STREAM_BUFFER_SIZE_DEFAULT, ENCRYPT_DATA_TRANSFER_DEFAULT, FS_TRASH_INTERVAL_DEFAULT, CHECKSUM_TYPE_DEFAULT); } }
3,049
41.957746
79
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPException.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.ftp; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * A class to wrap a {@link Throwable} into a Runtime Exception. */ @InterfaceAudience.Public @InterfaceStability.Stable public class FTPException extends RuntimeException { private static final long serialVersionUID = 1L; public FTPException(String message) { super(message); } public FTPException(Throwable t) { super(t); } public FTPException(String message, Throwable t) { super(message, t); } }
1,397
30.772727
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/PermissionStatus.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.permission; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.io.*; import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; /** * Store permission related information. */ @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"}) @InterfaceStability.Unstable public class PermissionStatus implements Writable { static final WritableFactory FACTORY = new WritableFactory() { @Override public Writable newInstance() { return new PermissionStatus(); } }; static { // register a ctor WritableFactories.setFactory(PermissionStatus.class, FACTORY); } /** Create an immutable {@link PermissionStatus} object. */ public static PermissionStatus createImmutable( String user, String group, FsPermission permission) { return new PermissionStatus(user, group, permission) { @Override public PermissionStatus applyUMask(FsPermission umask) { throw new UnsupportedOperationException(); } @Override public void readFields(DataInput in) throws IOException { throw new UnsupportedOperationException(); } }; } private String username; private String groupname; private FsPermission permission; private PermissionStatus() {} /** Constructor */ public PermissionStatus(String user, String group, FsPermission permission) { username = user; groupname = group; this.permission = permission; } /** Return user name */ public String getUserName() {return username;} /** Return group name */ public String getGroupName() {return groupname;} /** Return permission */ public FsPermission getPermission() {return permission;} /** * Apply umask. * @see FsPermission#applyUMask(FsPermission) */ public PermissionStatus applyUMask(FsPermission umask) { permission = permission.applyUMask(umask); return this; } @Override public void readFields(DataInput in) throws IOException { username = Text.readString(in, Text.DEFAULT_MAX_LEN); groupname = Text.readString(in, Text.DEFAULT_MAX_LEN); permission = FsPermission.read(in); } @Override public void write(DataOutput out) throws IOException { write(out, username, groupname, permission); } /** * Create and initialize a {@link PermissionStatus} from {@link DataInput}. */ public static PermissionStatus read(DataInput in) throws IOException { PermissionStatus p = new PermissionStatus(); p.readFields(in); return p; } /** * Serialize a {@link PermissionStatus} from its base components. */ public static void write(DataOutput out, String username, String groupname, FsPermission permission) throws IOException { Text.writeString(out, username, Text.DEFAULT_MAX_LEN); Text.writeString(out, groupname, Text.DEFAULT_MAX_LEN); permission.write(out); } @Override public String toString() { return username + ":" + groupname + ":" + permission; } }
3,989
30.666667
79
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/PermissionParser.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.permission; import java.util.regex.Matcher; import java.util.regex.Pattern; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * Base class for parsing either chmod permissions or umask permissions. * Includes common code needed by either operation as implemented in * UmaskParser and ChmodParser classes. */ @InterfaceAudience.Private @InterfaceStability.Unstable class PermissionParser { protected boolean symbolic = false; protected short userMode; protected short groupMode; protected short othersMode; protected short stickyMode; protected char userType = '+'; protected char groupType = '+'; protected char othersType = '+'; protected char stickyBitType = '+'; /** * Begin parsing permission stored in modeStr * * @param modeStr Permission mode, either octal or symbolic * @param symbolic Use-case specific symbolic pattern to match against * @throws IllegalArgumentException if unable to parse modeStr */ public PermissionParser(String modeStr, Pattern symbolic, Pattern octal) throws IllegalArgumentException { Matcher matcher = null; if ((matcher = symbolic.matcher(modeStr)).find()) { applyNormalPattern(modeStr, matcher); } else if ((matcher = octal.matcher(modeStr)).matches()) { applyOctalPattern(modeStr, matcher); } else { throw new IllegalArgumentException(modeStr); } } private void applyNormalPattern(String modeStr, Matcher matcher) { // Are there multiple permissions stored in one chmod? boolean commaSeperated = false; for (int i = 0; i < 1 || matcher.end() < modeStr.length(); i++) { if (i > 0 && (!commaSeperated || !matcher.find())) { throw new IllegalArgumentException(modeStr); } /* * groups : 1 : [ugoa]* 2 : [+-=] 3 : [rwxXt]+ 4 : [,\s]* */ String str = matcher.group(2); char type = str.charAt(str.length() - 1); boolean user, group, others, stickyBit; user = group = others = stickyBit = false; for (char c : matcher.group(1).toCharArray()) { switch (c) { case 'u': user = true; break; case 'g': group = true; break; case 'o': others = true; break; case 'a': break; default: throw new RuntimeException("Unexpected"); } } if (!(user || group || others)) { // same as specifying 'a' user = group = others = true; } short mode = 0; for (char c : matcher.group(3).toCharArray()) { switch (c) { case 'r': mode |= 4; break; case 'w': mode |= 2; break; case 'x': mode |= 1; break; case 'X': mode |= 8; break; case 't': stickyBit = true; break; default: throw new RuntimeException("Unexpected"); } } if (user) { userMode = mode; userType = type; } if (group) { groupMode = mode; groupType = type; } if (others) { othersMode = mode; othersType = type; stickyMode = (short) (stickyBit ? 1 : 0); stickyBitType = type; } commaSeperated = matcher.group(4).contains(","); } symbolic = true; } private void applyOctalPattern(String modeStr, Matcher matcher) { userType = groupType = othersType = '='; // Check if sticky bit is specified String sb = matcher.group(1); if (!sb.isEmpty()) { stickyMode = Short.valueOf(sb.substring(0, 1)); stickyBitType = '='; } String str = matcher.group(2); userMode = Short.valueOf(str.substring(0, 1)); groupMode = Short.valueOf(str.substring(1, 2)); othersMode = Short.valueOf(str.substring(2, 3)); } protected int combineModes(int existing, boolean exeOk) { return combineModeSegments(stickyBitType, stickyMode, (existing>>>9), false) << 9 | combineModeSegments(userType, userMode, (existing>>>6)&7, exeOk) << 6 | combineModeSegments(groupType, groupMode, (existing>>>3)&7, exeOk) << 3 | combineModeSegments(othersType, othersMode, existing&7, exeOk); } protected int combineModeSegments(char type, int mode, int existing, boolean exeOk) { boolean capX = false; if ((mode&8) != 0) { // convert X to x; capX = true; mode &= ~8; mode |= 1; } switch (type) { case '+' : mode = mode | existing; break; case '-' : mode = (~mode) & existing; break; case '=' : break; default : throw new RuntimeException("Unexpected"); } // if X is specified add 'x' only if exeOk or x was already set. if (capX && !exeOk && (mode&1) != 0 && (existing&1) == 0) { mode &= ~1; // remove x } return mode; } }
5,896
28.049261
76
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AccessControlException.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.permission; import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * An exception class for access control related issues. * @deprecated Use {@link org.apache.hadoop.security.AccessControlException} * instead. */ @Deprecated @InterfaceAudience.Public @InterfaceStability.Stable public class AccessControlException extends IOException { //Required by {@link java.io.Serializable}. private static final long serialVersionUID = 1L; /** * Default constructor is needed for unwrapping from * {@link org.apache.hadoop.ipc.RemoteException}. */ public AccessControlException() { super("Permission denied."); } /** * Constructs an {@link AccessControlException} * with the specified detail message. * @param s the detail message. */ public AccessControlException(String s) { super(s); } /** * Constructs a new exception with the specified cause and a detail * message of <tt>(cause==null ? null : cause.toString())</tt> (which * typically contains the class and detail message of <tt>cause</tt>). * @param cause the cause (which is saved for later retrieval by the * {@link #getCause()} method). (A <tt>null</tt> value is * permitted, and indicates that the cause is nonexistent or * unknown.) */ public AccessControlException(Throwable cause) { super(cause); } }
2,318
33.61194
77
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.permission; import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.WritableFactories; import org.apache.hadoop.io.WritableFactory; /** * A class for file/directory permissions. */ @InterfaceAudience.Public @InterfaceStability.Stable public class FsPermission implements Writable { private static final Log LOG = LogFactory.getLog(FsPermission.class); static final WritableFactory FACTORY = new WritableFactory() { @Override public Writable newInstance() { return new FsPermission(); } }; static { // register a ctor WritableFactories.setFactory(FsPermission.class, FACTORY); WritableFactories.setFactory(ImmutableFsPermission.class, FACTORY); } /** Maximum acceptable length of a permission string to parse */ public static final int MAX_PERMISSION_LENGTH = 10; /** Create an immutable {@link FsPermission} object. */ public static FsPermission createImmutable(short permission) { return new ImmutableFsPermission(permission); } //POSIX permission style private FsAction useraction = null; private FsAction groupaction = null; private FsAction otheraction = null; private boolean stickyBit = false; private FsPermission() {} /** * Construct by the given {@link FsAction}. * @param u user action * @param g group action * @param o other action */ public FsPermission(FsAction u, FsAction g, FsAction o) { this(u, g, o, false); } public FsPermission(FsAction u, FsAction g, FsAction o, boolean sb) { set(u, g, o, sb); } /** * Construct by the given mode. * @param mode * @see #toShort() */ public FsPermission(short mode) { fromShort(mode); } /** * Copy constructor * * @param other other permission */ public FsPermission(FsPermission other) { this.useraction = other.useraction; this.groupaction = other.groupaction; this.otheraction = other.otheraction; this.stickyBit = other.stickyBit; } /** * Construct by given mode, either in octal or symbolic format. * @param mode mode as a string, either in octal or symbolic format * @throws IllegalArgumentException if <code>mode</code> is invalid */ public FsPermission(String mode) { this(new UmaskParser(mode).getUMask()); } /** Return user {@link FsAction}. */ public FsAction getUserAction() {return useraction;} /** Return group {@link FsAction}. */ public FsAction getGroupAction() {return groupaction;} /** Return other {@link FsAction}. */ public FsAction getOtherAction() {return otheraction;} private void set(FsAction u, FsAction g, FsAction o, boolean sb) { useraction = u; groupaction = g; otheraction = o; stickyBit = sb; } public void fromShort(short n) { FsAction[] v = FSACTION_VALUES; set(v[(n >>> 6) & 7], v[(n >>> 3) & 7], v[n & 7], (((n >>> 9) & 1) == 1) ); } @Override public void write(DataOutput out) throws IOException { out.writeShort(toShort()); } @Override public void readFields(DataInput in) throws IOException { fromShort(in.readShort()); } /** * Create and initialize a {@link FsPermission} from {@link DataInput}. */ public static FsPermission read(DataInput in) throws IOException { FsPermission p = new FsPermission(); p.readFields(in); return p; } /** * Encode the object to a short. */ public short toShort() { int s = (stickyBit ? 1 << 9 : 0) | (useraction.ordinal() << 6) | (groupaction.ordinal() << 3) | otheraction.ordinal(); return (short)s; } /** * Encodes the object to a short. Unlike {@link #toShort()}, this method may * return values outside the fixed range 00000 - 01777 if extended features * are encoded into this permission, such as the ACL bit. * * @return short extended short representation of this permission */ public short toExtendedShort() { return toShort(); } @Override public boolean equals(Object obj) { if (obj instanceof FsPermission) { FsPermission that = (FsPermission)obj; return this.useraction == that.useraction && this.groupaction == that.groupaction && this.otheraction == that.otheraction && this.stickyBit == that.stickyBit; } return false; } @Override public int hashCode() {return toShort();} @Override public String toString() { String str = useraction.SYMBOL + groupaction.SYMBOL + otheraction.SYMBOL; if(stickyBit) { StringBuilder str2 = new StringBuilder(str); str2.replace(str2.length() - 1, str2.length(), otheraction.implies(FsAction.EXECUTE) ? "t" : "T"); str = str2.toString(); } return str; } /** * Apply a umask to this permission and return a new one. * * The umask is used by create, mkdir, and other Hadoop filesystem operations. * The mode argument for these operations is modified by removing the bits * which are set in the umask. Thus, the umask limits the permissions which * newly created files and directories get. * * @param umask The umask to use * * @return The effective permission */ public FsPermission applyUMask(FsPermission umask) { return new FsPermission(useraction.and(umask.useraction.not()), groupaction.and(umask.groupaction.not()), otheraction.and(umask.otheraction.not())); } /** umask property label deprecated key and code in getUMask method * to accommodate it may be removed in version .23 */ public static final String DEPRECATED_UMASK_LABEL = "dfs.umask"; public static final String UMASK_LABEL = CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY; public static final int DEFAULT_UMASK = CommonConfigurationKeys.FS_PERMISSIONS_UMASK_DEFAULT; private static final FsAction[] FSACTION_VALUES = FsAction.values(); /** * Get the user file creation mask (umask) * * {@code UMASK_LABEL} config param has umask value that is either symbolic * or octal. * * Symbolic umask is applied relative to file mode creation mask; * the permission op characters '+' clears the corresponding bit in the mask, * '-' sets bits in the mask. * * Octal umask, the specified bits are set in the file mode creation mask. * * {@code DEPRECATED_UMASK_LABEL} config param has umask value set to decimal. */ public static FsPermission getUMask(Configuration conf) { int umask = DEFAULT_UMASK; // To ensure backward compatibility first use the deprecated key. // If the deprecated key is not present then check for the new key if(conf != null) { String confUmask = conf.get(UMASK_LABEL); int oldUmask = conf.getInt(DEPRECATED_UMASK_LABEL, Integer.MIN_VALUE); try { if(confUmask != null) { umask = new UmaskParser(confUmask).getUMask(); } } catch(IllegalArgumentException iae) { // Provide more explanation for user-facing message String type = iae instanceof NumberFormatException ? "decimal" : "octal or symbolic"; String error = "Unable to parse configuration " + UMASK_LABEL + " with value " + confUmask + " as " + type + " umask."; LOG.warn(error); // If oldUmask is not set, then throw the exception if (oldUmask == Integer.MIN_VALUE) { throw new IllegalArgumentException(error); } } if(oldUmask != Integer.MIN_VALUE) { // Property was set with old key if (umask != oldUmask) { LOG.warn(DEPRECATED_UMASK_LABEL + " configuration key is deprecated. " + "Convert to " + UMASK_LABEL + ", using octal or symbolic umask " + "specifications."); // Old and new umask values do not match - Use old umask umask = oldUmask; } } } return new FsPermission((short)umask); } public boolean getStickyBit() { return stickyBit; } /** * Returns true if there is also an ACL (access control list). * * @return boolean true if there is also an ACL (access control list). */ public boolean getAclBit() { // File system subclasses that support the ACL bit would override this. return false; } /** * Returns true if the file is encrypted or directory is in an encryption zone */ public boolean getEncryptedBit() { return false; } /** Set the user file creation mask (umask) */ public static void setUMask(Configuration conf, FsPermission umask) { conf.set(UMASK_LABEL, String.format("%1$03o", umask.toShort())); conf.setInt(DEPRECATED_UMASK_LABEL, umask.toShort()); } /** * Get the default permission for directory and symlink. * In previous versions, this default permission was also used to * create files, so files created end up with ugo+x permission. * See HADOOP-9155 for detail. * Two new methods are added to solve this, please use * {@link FsPermission#getDirDefault()} for directory, and use * {@link FsPermission#getFileDefault()} for file. * This method is kept for compatibility. */ public static FsPermission getDefault() { return new FsPermission((short)00777); } /** * Get the default permission for directory. */ public static FsPermission getDirDefault() { return new FsPermission((short)00777); } /** * Get the default permission for file. */ public static FsPermission getFileDefault() { return new FsPermission((short)00666); } /** * Get the default permission for cache pools. */ public static FsPermission getCachePoolDefault() { return new FsPermission((short)00755); } /** * Create a FsPermission from a Unix symbolic permission string * @param unixSymbolicPermission e.g. "-rw-rw-rw-" */ public static FsPermission valueOf(String unixSymbolicPermission) { if (unixSymbolicPermission == null) { return null; } else if (unixSymbolicPermission.length() != MAX_PERMISSION_LENGTH) { throw new IllegalArgumentException(String.format( "length != %d(unixSymbolicPermission=%s)", MAX_PERMISSION_LENGTH, unixSymbolicPermission)); } int n = 0; for(int i = 1; i < unixSymbolicPermission.length(); i++) { n = n << 1; char c = unixSymbolicPermission.charAt(i); n += (c == '-' || c == 'T' || c == 'S') ? 0: 1; } // Add sticky bit value if set if(unixSymbolicPermission.charAt(9) == 't' || unixSymbolicPermission.charAt(9) == 'T') n += 01000; return new FsPermission((short)n); } private static class ImmutableFsPermission extends FsPermission { public ImmutableFsPermission(short permission) { super(permission); } @Override public void readFields(DataInput in) throws IOException { throw new UnsupportedOperationException(); } } }
12,230
30.768831
80
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclStatus.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.permission; import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import com.google.common.base.Objects; import com.google.common.base.Preconditions; import com.google.common.collect.Lists; /** * An AclStatus contains the ACL information of a specific file. AclStatus * instances are immutable. Use a {@link Builder} to create a new instance. */ @InterfaceAudience.Public @InterfaceStability.Evolving public class AclStatus { private final String owner; private final String group; private final boolean stickyBit; private final List<AclEntry> entries; private final FsPermission permission; /** * Returns the file owner. * * @return String file owner */ public String getOwner() { return owner; } /** * Returns the file group. * * @return String file group */ public String getGroup() { return group; } /** * Returns the sticky bit. * * @return boolean sticky bit */ public boolean isStickyBit() { return stickyBit; } /** * Returns the list of all ACL entries, ordered by their natural ordering. * * @return List<AclEntry> unmodifiable ordered list of all ACL entries */ public List<AclEntry> getEntries() { return entries; } /** * Returns the permission set for the path * @return {@link FsPermission} for the path */ public FsPermission getPermission() { return permission; } @Override public boolean equals(Object o) { if (o == null) { return false; } if (getClass() != o.getClass()) { return false; } AclStatus other = (AclStatus)o; return Objects.equal(owner, other.owner) && Objects.equal(group, other.group) && stickyBit == other.stickyBit && Objects.equal(entries, other.entries); } @Override public int hashCode() { return Objects.hashCode(owner, group, stickyBit, entries); } @Override public String toString() { return new StringBuilder() .append("owner: ").append(owner) .append(", group: ").append(group) .append(", acl: {") .append("entries: ").append(entries) .append(", stickyBit: ").append(stickyBit) .append('}') .toString(); } /** * Builder for creating new Acl instances. */ public static class Builder { private String owner; private String group; private boolean stickyBit; private List<AclEntry> entries = Lists.newArrayList(); private FsPermission permission = null; /** * Sets the file owner. * * @param owner String file owner * @return Builder this builder, for call chaining */ public Builder owner(String owner) { this.owner = owner; return this; } /** * Sets the file group. * * @param group String file group * @return Builder this builder, for call chaining */ public Builder group(String group) { this.group = group; return this; } /** * Adds an ACL entry. * * @param e AclEntry entry to add * @return Builder this builder, for call chaining */ public Builder addEntry(AclEntry e) { this.entries.add(e); return this; } /** * Adds a list of ACL entries. * * @param entries AclEntry entries to add * @return Builder this builder, for call chaining */ public Builder addEntries(Iterable<AclEntry> entries) { for (AclEntry e : entries) this.entries.add(e); return this; } /** * Sets sticky bit. If this method is not called, then the builder assumes * false. * * @param stickyBit * boolean sticky bit * @return Builder this builder, for call chaining */ public Builder stickyBit(boolean stickyBit) { this.stickyBit = stickyBit; return this; } /** * Sets the permission for the file. * @param permission */ public Builder setPermission(FsPermission permission) { this.permission = permission; return this; } /** * Builds a new AclStatus populated with the set properties. * * @return AclStatus new AclStatus */ public AclStatus build() { return new AclStatus(owner, group, stickyBit, entries, permission); } } /** * Private constructor. * * @param file Path file associated to this ACL * @param owner String file owner * @param group String file group * @param stickyBit the sticky bit * @param entries the ACL entries * @param permission permission of the path */ private AclStatus(String owner, String group, boolean stickyBit, Iterable<AclEntry> entries, FsPermission permission) { this.owner = owner; this.group = group; this.stickyBit = stickyBit; this.entries = Lists.newArrayList(entries); this.permission = permission; } /** * Get the effective permission for the AclEntry * @param entry AclEntry to get the effective action */ public FsAction getEffectivePermission(AclEntry entry) { return getEffectivePermission(entry, permission); } /** * Get the effective permission for the AclEntry. <br> * Recommended to use this API ONLY if client communicates with the old * NameNode, needs to pass the Permission for the path to get effective * permission, else use {@link AclStatus#getEffectivePermission(AclEntry)}. * @param entry AclEntry to get the effective action * @param permArg Permission for the path. However if the client is NOT * communicating with old namenode, then this argument will not have * any preference. * @return Returns the effective permission for the entry. * @throws IllegalArgumentException If the client communicating with old * namenode and permission is not passed as an argument. */ public FsAction getEffectivePermission(AclEntry entry, FsPermission permArg) throws IllegalArgumentException { // At least one permission bits should be available. Preconditions.checkArgument(this.permission != null || permArg != null, "Permission bits are not available to calculate effective permission"); if (this.permission != null) { // permission bits from server response will have the priority for // accuracy. permArg = this.permission; } if ((entry.getName() != null || entry.getType() == AclEntryType.GROUP)) { if (entry.getScope() == AclEntryScope.ACCESS) { FsAction entryPerm = entry.getPermission(); return entryPerm.and(permArg.getGroupAction()); } else { Preconditions.checkArgument(this.entries.contains(entry) && this.entries.size() >= 3, "Passed default ACL entry not found in the list of ACLs"); // default mask entry for effective permission calculation will be the // penultimate entry. This can be mask entry in case of extended ACLs. // In case of minimal ACL, this is the owner group entry, and we end up // intersecting group FsAction with itself, which is a no-op. FsAction defaultMask = this.entries.get(this.entries.size() - 2) .getPermission(); FsAction entryPerm = entry.getPermission(); return entryPerm.and(defaultMask); } } else { return entry.getPermission(); } } }
8,273
28.870036
79
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/UmaskParser.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.permission; import java.util.regex.Pattern; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * Parse umask value provided as a string, either in octal or symbolic * format and return it as a short value. Umask values are slightly * different from standard modes as they cannot specify sticky bit * or X. * */ @InterfaceAudience.Private @InterfaceStability.Unstable class UmaskParser extends PermissionParser { private static Pattern chmodOctalPattern = Pattern.compile("^\\s*[+]?(0*)([0-7]{3})\\s*$"); // no leading 1 for sticky bit private static Pattern umaskSymbolicPattern = /* not allow X or t */ Pattern.compile("\\G\\s*([ugoa]*)([+=-]+)([rwx]*)([,\\s]*)\\s*"); final short umaskMode; public UmaskParser(String modeStr) throws IllegalArgumentException { super(modeStr, umaskSymbolicPattern, chmodOctalPattern); umaskMode = (short)combineModes(0, false); } /** * To be used for file/directory creation only. Symbolic umask is applied * relative to file mode creation mask; the permission op characters '+' * results in clearing the corresponding bit in the mask, '-' results in bits * for indicated permission to be set in the mask. * * For octal umask, the specified bits are set in the file mode creation mask. * * @return umask */ public short getUMask() { if (symbolic) { // Return the complement of octal equivalent of umask that was computed return (short) (~umaskMode & 0777); } return umaskMode; } }
2,431
36.415385
83
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/ScopedAclEntries.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.permission; import java.util.Collections; import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclEntryScope; /** * Groups a list of ACL entries into separate lists for access entries vs. * default entries. */ @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"}) @InterfaceStability.Unstable public final class ScopedAclEntries { private static final int PIVOT_NOT_FOUND = -1; private final List<AclEntry> accessEntries; private final List<AclEntry> defaultEntries; /** * Creates a new ScopedAclEntries from the given list. It is assumed that the * list is already sorted such that all access entries precede all default * entries. * * @param aclEntries List<AclEntry> to separate */ public ScopedAclEntries(List<AclEntry> aclEntries) { int pivot = calculatePivotOnDefaultEntries(aclEntries); if (pivot != PIVOT_NOT_FOUND) { accessEntries = pivot != 0 ? aclEntries.subList(0, pivot) : Collections.<AclEntry>emptyList(); defaultEntries = aclEntries.subList(pivot, aclEntries.size()); } else { accessEntries = aclEntries; defaultEntries = Collections.emptyList(); } } /** * Returns access entries. * * @return List<AclEntry> containing just access entries, or an empty list if * there are no access entries */ public List<AclEntry> getAccessEntries() { return accessEntries; } /** * Returns default entries. * * @return List<AclEntry> containing just default entries, or an empty list if * there are no default entries */ public List<AclEntry> getDefaultEntries() { return defaultEntries; } /** * Returns the pivot point in the list between the access entries and the * default entries. This is the index of the first element in the list that is * a default entry. * * @param aclBuilder ArrayList<AclEntry> containing entries to build * @return int pivot point, or -1 if list contains no default entries */ private static int calculatePivotOnDefaultEntries(List<AclEntry> aclBuilder) { for (int i = 0; i < aclBuilder.size(); ++i) { if (aclBuilder.get(i).getScope() == AclEntryScope.DEFAULT) { return i; } } return PIVOT_NOT_FOUND; } }
3,254
32.90625
81
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclEntryType.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.permission; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * Specifies the type of an ACL entry. */ @InterfaceAudience.Public @InterfaceStability.Evolving public enum AclEntryType { /** * An ACL entry applied to a specific user. These ACL entries can be unnamed, * which applies to the file owner, or named, which applies to the specific * named user. */ USER, /** * An ACL entry applied to a specific group. These ACL entries can be * unnamed, which applies to the file's group, or named, which applies to the * specific named group. */ GROUP, /** * An ACL mask entry. Mask entries are unnamed. During permission checks, * the mask entry interacts with all ACL entries that are members of the group * class. This consists of all named user entries, the unnamed group entry, * and all named group entries. For each such entry, any permissions that are * absent from the mask entry are removed from the effective permissions used * during the permission check. */ MASK, /** * An ACL entry that applies to all other users that were not covered by one * of the more specific ACL entry types. */ OTHER; }
2,096
34.542373
80
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsAction.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.permission; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * File system actions, e.g. read, write, etc. */ @InterfaceAudience.Public @InterfaceStability.Stable public enum FsAction { // POSIX style NONE("---"), EXECUTE("--x"), WRITE("-w-"), WRITE_EXECUTE("-wx"), READ("r--"), READ_EXECUTE("r-x"), READ_WRITE("rw-"), ALL("rwx"); /** Retain reference to value array. */ private final static FsAction[] vals = values(); /** Symbolic representation */ public final String SYMBOL; private FsAction(String s) { SYMBOL = s; } /** * Return true if this action implies that action. * @param that */ public boolean implies(FsAction that) { if (that != null) { return (ordinal() & that.ordinal()) == that.ordinal(); } return false; } /** AND operation. */ public FsAction and(FsAction that) { return vals[ordinal() & that.ordinal()]; } /** OR operation. */ public FsAction or(FsAction that) { return vals[ordinal() | that.ordinal()]; } /** NOT operation. */ public FsAction not() { return vals[7 - ordinal()]; } /** * Get the FsAction enum for String representation of permissions * * @param permission * 3-character string representation of permission. ex: rwx * @return Returns FsAction enum if the corresponding FsAction exists for permission. * Otherwise returns null */ public static FsAction getFsAction(String permission) { for (FsAction fsAction : vals) { if (fsAction.SYMBOL.equals(permission)) { return fsAction; } } return null; } }
2,530
27.122222
87
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclUtil.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.permission; import java.util.Iterator; import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import com.google.common.collect.Lists; /** * AclUtil contains utility methods for manipulating ACLs. */ @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"}) @InterfaceStability.Unstable public final class AclUtil { /** * Given permissions and extended ACL entries, returns the full logical ACL. * * @param perm FsPermission containing permissions * @param entries List<AclEntry> containing extended ACL entries * @return List<AclEntry> containing full logical ACL */ public static List<AclEntry> getAclFromPermAndEntries(FsPermission perm, List<AclEntry> entries) { List<AclEntry> acl = Lists.newArrayListWithCapacity(entries.size() + 3); // Owner entry implied by owner permission bits. acl.add(new AclEntry.Builder() .setScope(AclEntryScope.ACCESS) .setType(AclEntryType.USER) .setPermission(perm.getUserAction()) .build()); // All extended access ACL entries. boolean hasAccessAcl = false; Iterator<AclEntry> entryIter = entries.iterator(); AclEntry curEntry = null; while (entryIter.hasNext()) { curEntry = entryIter.next(); if (curEntry.getScope() == AclEntryScope.DEFAULT) { break; } hasAccessAcl = true; acl.add(curEntry); } // Mask entry implied by group permission bits, or group entry if there is // no access ACL (only default ACL). acl.add(new AclEntry.Builder() .setScope(AclEntryScope.ACCESS) .setType(hasAccessAcl ? AclEntryType.MASK : AclEntryType.GROUP) .setPermission(perm.getGroupAction()) .build()); // Other entry implied by other bits. acl.add(new AclEntry.Builder() .setScope(AclEntryScope.ACCESS) .setType(AclEntryType.OTHER) .setPermission(perm.getOtherAction()) .build()); // Default ACL entries. if (curEntry != null && curEntry.getScope() == AclEntryScope.DEFAULT) { acl.add(curEntry); while (entryIter.hasNext()) { acl.add(entryIter.next()); } } return acl; } /** * Translates the given permission bits to the equivalent minimal ACL. * * @param perm FsPermission to translate * @return List<AclEntry> containing exactly 3 entries representing the owner, * group and other permissions */ public static List<AclEntry> getMinimalAcl(FsPermission perm) { return Lists.newArrayList( new AclEntry.Builder() .setScope(AclEntryScope.ACCESS) .setType(AclEntryType.USER) .setPermission(perm.getUserAction()) .build(), new AclEntry.Builder() .setScope(AclEntryScope.ACCESS) .setType(AclEntryType.GROUP) .setPermission(perm.getGroupAction()) .build(), new AclEntry.Builder() .setScope(AclEntryScope.ACCESS) .setType(AclEntryType.OTHER) .setPermission(perm.getOtherAction()) .build()); } /** * Checks if the given entries represent a minimal ACL (contains exactly 3 * entries). * * @param entries List<AclEntry> entries to check * @return boolean true if the entries represent a minimal ACL */ public static boolean isMinimalAcl(List<AclEntry> entries) { return entries.size() == 3; } /** * There is no reason to instantiate this class. */ private AclUtil() { } }
4,340
31.155556
80
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclEntry.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.permission; import java.util.ArrayList; import java.util.Collection; import java.util.List; import com.google.common.base.Objects; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.util.StringUtils; /** * Defines a single entry in an ACL. An ACL entry has a type (user, group, * mask, or other), an optional name (referring to a specific user or group), a * set of permissions (any combination of read, write and execute), and a scope * (access or default). AclEntry instances are immutable. Use a {@link Builder} * to create a new instance. */ @InterfaceAudience.Public @InterfaceStability.Evolving public class AclEntry { private final AclEntryType type; private final String name; private final FsAction permission; private final AclEntryScope scope; /** * Returns the ACL entry type. * * @return AclEntryType ACL entry type */ public AclEntryType getType() { return type; } /** * Returns the optional ACL entry name. * * @return String ACL entry name, or null if undefined */ public String getName() { return name; } /** * Returns the set of permissions in the ACL entry. * * @return FsAction set of permissions in the ACL entry */ public FsAction getPermission() { return permission; } /** * Returns the scope of the ACL entry. * * @return AclEntryScope scope of the ACL entry */ public AclEntryScope getScope() { return scope; } @Override public boolean equals(Object o) { if (o == null) { return false; } if (getClass() != o.getClass()) { return false; } AclEntry other = (AclEntry)o; return Objects.equal(type, other.type) && Objects.equal(name, other.name) && Objects.equal(permission, other.permission) && Objects.equal(scope, other.scope); } @Override public int hashCode() { return Objects.hashCode(type, name, permission, scope); } @Override public String toString() { StringBuilder sb = new StringBuilder(); if (scope == AclEntryScope.DEFAULT) { sb.append("default:"); } if (type != null) { sb.append(StringUtils.toLowerCase(type.toString())); } sb.append(':'); if (name != null) { sb.append(name); } sb.append(':'); if (permission != null) { sb.append(permission.SYMBOL); } return sb.toString(); } /** * Builder for creating new AclEntry instances. */ public static class Builder { private AclEntryType type; private String name; private FsAction permission; private AclEntryScope scope = AclEntryScope.ACCESS; /** * Sets the ACL entry type. * * @param type AclEntryType ACL entry type * @return Builder this builder, for call chaining */ public Builder setType(AclEntryType type) { this.type = type; return this; } /** * Sets the optional ACL entry name. * * @param name String optional ACL entry name * @return Builder this builder, for call chaining */ public Builder setName(String name) { if (name != null && !name.isEmpty()) { this.name = name; } return this; } /** * Sets the set of permissions in the ACL entry. * * @param permission FsAction set of permissions in the ACL entry * @return Builder this builder, for call chaining */ public Builder setPermission(FsAction permission) { this.permission = permission; return this; } /** * Sets the scope of the ACL entry. If this method is not called, then the * builder assumes {@link AclEntryScope#ACCESS}. * * @param scope AclEntryScope scope of the ACL entry * @return Builder this builder, for call chaining */ public Builder setScope(AclEntryScope scope) { this.scope = scope; return this; } /** * Builds a new AclEntry populated with the set properties. * * @return AclEntry new AclEntry */ public AclEntry build() { return new AclEntry(type, name, permission, scope); } } /** * Private constructor. * * @param type AclEntryType ACL entry type * @param name String optional ACL entry name * @param permission FsAction set of permissions in the ACL entry * @param scope AclEntryScope scope of the ACL entry */ private AclEntry(AclEntryType type, String name, FsAction permission, AclEntryScope scope) { this.type = type; this.name = name; this.permission = permission; this.scope = scope; } /** * Parses a string representation of an ACL spec into a list of AclEntry * objects. Example: "user::rwx,user:foo:rw-,group::r--,other::---" * * @param aclSpec * String representation of an ACL spec. * @param includePermission * for setAcl operations this will be true. i.e. AclSpec should * include permissions.<br> * But for removeAcl operation it will be false. i.e. AclSpec should * not contain permissions.<br> * Example: "user:foo,group:bar" * @return Returns list of {@link AclEntry} parsed */ public static List<AclEntry> parseAclSpec(String aclSpec, boolean includePermission) { List<AclEntry> aclEntries = new ArrayList<AclEntry>(); Collection<String> aclStrings = StringUtils.getStringCollection(aclSpec, ","); for (String aclStr : aclStrings) { AclEntry aclEntry = parseAclEntry(aclStr, includePermission); aclEntries.add(aclEntry); } return aclEntries; } /** * Parses a string representation of an ACL into a AclEntry object.<br> * * @param aclStr * String representation of an ACL.<br> * Example: "user:foo:rw-" * @param includePermission * for setAcl operations this will be true. i.e. Acl should include * permissions.<br> * But for removeAcl operation it will be false. i.e. Acl should not * contain permissions.<br> * Example: "user:foo,group:bar,mask::" * @return Returns an {@link AclEntry} object */ public static AclEntry parseAclEntry(String aclStr, boolean includePermission) { AclEntry.Builder builder = new AclEntry.Builder(); // Here "::" represent one empty string. // StringUtils.getStringCollection() will ignore this. String[] split = aclStr.split(":"); if (split.length == 0) { throw new HadoopIllegalArgumentException("Invalid <aclSpec> : " + aclStr); } int index = 0; if ("default".equals(split[0])) { // default entry index++; builder.setScope(AclEntryScope.DEFAULT); } if (split.length <= index) { throw new HadoopIllegalArgumentException("Invalid <aclSpec> : " + aclStr); } AclEntryType aclType = null; try { aclType = Enum.valueOf( AclEntryType.class, StringUtils.toUpperCase(split[index])); builder.setType(aclType); index++; } catch (IllegalArgumentException iae) { throw new HadoopIllegalArgumentException( "Invalid type of acl in <aclSpec> :" + aclStr); } if (split.length > index) { String name = split[index]; if (!name.isEmpty()) { builder.setName(name); } index++; } if (includePermission) { if (split.length <= index) { throw new HadoopIllegalArgumentException("Invalid <aclSpec> : " + aclStr); } String permission = split[index]; FsAction fsAction = FsAction.getFsAction(permission); if (null == fsAction) { throw new HadoopIllegalArgumentException( "Invalid permission in <aclSpec> : " + aclStr); } builder.setPermission(fsAction); index++; } if (split.length > index) { throw new HadoopIllegalArgumentException("Invalid <aclSpec> : " + aclStr); } AclEntry aclEntry = builder.build(); return aclEntry; } /** * Convert a List of AclEntries into a string - the reverse of parseAclSpec. * @param aclSpec List of AclEntries to convert * @return String representation of aclSpec */ public static String aclSpecToString(List<AclEntry> aclSpec) { StringBuilder buf = new StringBuilder(); for ( AclEntry e : aclSpec ) { buf.append(e.toString()); buf.append(","); } return buf.substring(0, buf.length()-1); // remove last , } }
9,428
28.557994
94
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/ChmodParser.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.permission; import java.util.regex.Pattern; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.FileStatus; /** * Parse a permission mode passed in from a chmod command and apply that * mode against an existing file. */ @InterfaceAudience.Private @InterfaceStability.Unstable public class ChmodParser extends PermissionParser { private static Pattern chmodOctalPattern = Pattern.compile("^\\s*[+]?([01]?)([0-7]{3})\\s*$"); private static Pattern chmodNormalPattern = Pattern.compile("\\G\\s*([ugoa]*)([+=-]+)([rwxXt]+)([,\\s]*)\\s*"); public ChmodParser(String modeStr) throws IllegalArgumentException { super(modeStr, chmodNormalPattern, chmodOctalPattern); } /** * Apply permission against specified file and determine what the * new mode would be * @param file File against which to apply mode * @return File's new mode if applied. */ public short applyNewPermission(FileStatus file) { FsPermission perms = file.getPermission(); int existing = perms.toShort(); boolean exeOk = file.isDirectory() || (existing & 0111) != 0; return (short)combineModes(existing, exeOk); } }
2,078
36.125
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclEntryScope.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.permission; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * Specifies the scope or intended usage of an ACL entry. */ @InterfaceAudience.Public @InterfaceStability.Evolving public enum AclEntryScope { /** * An ACL entry that is inspected during permission checks to enforce * permissions. */ ACCESS, /** * An ACL entry to be applied to a directory's children that do not otherwise * have their own ACL defined. Unlike an access ACL entry, a default ACL * entry is not inspected as part of permission enforcement on the directory * that owns it. */ DEFAULT; }
1,510
34.139535
79
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/local/package-info.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"}) @InterfaceStability.Unstable package org.apache.hadoop.fs.local; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability;
1,047
44.565217
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/local/LocalConfigKeys.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.local; import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FsServerDefaults; import org.apache.hadoop.util.DataChecksum; /** * This class contains constants for configuration keys used * in the local file system, raw local fs and checksum fs. * * Note that the settings for unimplemented features are ignored. * E.g. checksum related settings are just place holders. Even when * wrapped with {@link ChecksumFileSystem}, these settings are not * used. */ @InterfaceAudience.Private @InterfaceStability.Unstable public class LocalConfigKeys extends CommonConfigurationKeys { public static final String BLOCK_SIZE_KEY = "file.blocksize"; public static final long BLOCK_SIZE_DEFAULT = 64*1024*1024; public static final String REPLICATION_KEY = "file.replication"; public static final short REPLICATION_DEFAULT = 1; public static final String STREAM_BUFFER_SIZE_KEY = "file.stream-buffer-size"; public static final int STREAM_BUFFER_SIZE_DEFAULT = 4096; public static final String BYTES_PER_CHECKSUM_KEY = "file.bytes-per-checksum"; public static final int BYTES_PER_CHECKSUM_DEFAULT = 512; public static final String CLIENT_WRITE_PACKET_SIZE_KEY = "file.client-write-packet-size"; public static final int CLIENT_WRITE_PACKET_SIZE_DEFAULT = 64*1024; public static final boolean ENCRYPT_DATA_TRANSFER_DEFAULT = false; public static final long FS_TRASH_INTERVAL_DEFAULT = 0; public static final DataChecksum.Type CHECKSUM_TYPE_DEFAULT = DataChecksum.Type.CRC32; public static FsServerDefaults getServerDefaults() throws IOException { return new FsServerDefaults( BLOCK_SIZE_DEFAULT, BYTES_PER_CHECKSUM_DEFAULT, CLIENT_WRITE_PACKET_SIZE_DEFAULT, REPLICATION_DEFAULT, STREAM_BUFFER_SIZE_DEFAULT, ENCRYPT_DATA_TRANSFER_DEFAULT, FS_TRASH_INTERVAL_DEFAULT, CHECKSUM_TYPE_DEFAULT); } }
2,956
41.242857
80
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/local/RawLocalFs.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.local; import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.AbstractFileSystem; import org.apache.hadoop.fs.DelegateToFileSystem; import org.apache.hadoop.fs.FsConstants; import org.apache.hadoop.fs.FsServerDefaults; import org.apache.hadoop.fs.RawLocalFileSystem; /** * The RawLocalFs implementation of AbstractFileSystem. * This impl delegates to the old FileSystem */ @InterfaceAudience.Private @InterfaceStability.Evolving /*Evolving for a release,to be changed to Stable */ public class RawLocalFs extends DelegateToFileSystem { RawLocalFs(final Configuration conf) throws IOException, URISyntaxException { this(FsConstants.LOCAL_FS_URI, conf); } /** * This constructor has the signature needed by * {@link AbstractFileSystem#createFileSystem(URI, Configuration)}. * * @param theUri which must be that of localFs * @param conf * @throws IOException * @throws URISyntaxException */ RawLocalFs(final URI theUri, final Configuration conf) throws IOException, URISyntaxException { super(theUri, new RawLocalFileSystem(), conf, FsConstants.LOCAL_FS_URI.getScheme(), false); } @Override public int getUriDefaultPort() { return -1; // No default port for file:/// } @Override public FsServerDefaults getServerDefaults() throws IOException { return LocalConfigKeys.getServerDefaults(); } @Override public boolean isValidName(String src) { // Different local file systems have different validation rules. Skip // validation here and just let the OS handle it. This is consistent with // RawLocalFileSystem. return true; } }
2,699
33.615385
80
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/local/LocalFs.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.local; import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.AbstractFileSystem; import org.apache.hadoop.fs.ChecksumFs; /** * The LocalFs implementation of ChecksumFs. */ @InterfaceAudience.Private @InterfaceStability.Evolving /*Evolving for a release,to be changed to Stable */ public class LocalFs extends ChecksumFs { LocalFs(final Configuration conf) throws IOException, URISyntaxException { super(new RawLocalFs(conf)); } /** * This constructor has the signature needed by * {@link AbstractFileSystem#createFileSystem(URI, Configuration)}. * * @param theUri which must be that of localFs * @param conf * @throws IOException * @throws URISyntaxException */ LocalFs(final URI theUri, final Configuration conf) throws IOException, URISyntaxException { this(conf); } }
1,888
33.981481
80
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/package-info.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** SFTP FileSystem package. */ package org.apache.hadoop.fs.sftp;
872
44.947368
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPInputStream.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.sftp; import java.io.IOException; import java.io.InputStream; import org.apache.hadoop.fs.FSInputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.util.StringUtils; import com.jcraft.jsch.ChannelSftp; import com.jcraft.jsch.JSchException; import com.jcraft.jsch.Session; /** SFTP FileSystem input stream. */ class SFTPInputStream extends FSInputStream { public static final String E_SEEK_NOTSUPPORTED = "Seek not supported"; public static final String E_CLIENT_NULL = "SFTP client null or not connected"; public static final String E_NULL_INPUTSTREAM = "Null InputStream"; public static final String E_STREAM_CLOSED = "Stream closed"; public static final String E_CLIENT_NOTCONNECTED = "Client not connected"; private InputStream wrappedStream; private ChannelSftp channel; private FileSystem.Statistics stats; private boolean closed; private long pos; SFTPInputStream(InputStream stream, ChannelSftp channel, FileSystem.Statistics stats) { if (stream == null) { throw new IllegalArgumentException(E_NULL_INPUTSTREAM); } if (channel == null || !channel.isConnected()) { throw new IllegalArgumentException(E_CLIENT_NULL); } this.wrappedStream = stream; this.channel = channel; this.stats = stats; this.pos = 0; this.closed = false; } @Override public void seek(long position) throws IOException { throw new IOException(E_SEEK_NOTSUPPORTED); } @Override public boolean seekToNewSource(long targetPos) throws IOException { throw new IOException(E_SEEK_NOTSUPPORTED); } @Override public long getPos() throws IOException { return pos; } @Override public synchronized int read() throws IOException { if (closed) { throw new IOException(E_STREAM_CLOSED); } int byteRead = wrappedStream.read(); if (byteRead >= 0) { pos++; } if (stats != null & byteRead >= 0) { stats.incrementBytesRead(1); } return byteRead; } public synchronized int read(byte[] buf, int off, int len) throws IOException { if (closed) { throw new IOException(E_STREAM_CLOSED); } int result = wrappedStream.read(buf, off, len); if (result > 0) { pos += result; } if (stats != null & result > 0) { stats.incrementBytesRead(result); } return result; } public synchronized void close() throws IOException { if (closed) { return; } super.close(); closed = true; if (!channel.isConnected()) { throw new IOException(E_CLIENT_NOTCONNECTED); } try { Session session = channel.getSession(); channel.disconnect(); session.disconnect(); } catch (JSchException e) { throw new IOException(StringUtils.stringifyException(e)); } } }
3,662
26.961832
76
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPFileSystem.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.sftp; import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.net.URI; import java.net.URLDecoder; import java.util.ArrayList; import java.util.Vector; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.util.Progressable; import com.jcraft.jsch.ChannelSftp; import com.jcraft.jsch.ChannelSftp.LsEntry; import com.jcraft.jsch.SftpATTRS; import com.jcraft.jsch.SftpException; /** SFTP FileSystem. */ public class SFTPFileSystem extends FileSystem { public static final Log LOG = LogFactory.getLog(SFTPFileSystem.class); private SFTPConnectionPool connectionPool; private URI uri; private static final int DEFAULT_SFTP_PORT = 22; private static final int DEFAULT_MAX_CONNECTION = 5; public static final int DEFAULT_BUFFER_SIZE = 1024 * 1024; public static final int DEFAULT_BLOCK_SIZE = 4 * 1024; public static final String FS_SFTP_USER_PREFIX = "fs.sftp.user."; public static final String FS_SFTP_PASSWORD_PREFIX = "fs.sftp.password."; public static final String FS_SFTP_HOST = "fs.sftp.host"; public static final String FS_SFTP_HOST_PORT = "fs.sftp.host.port"; public static final String FS_SFTP_KEYFILE = "fs.sftp.keyfile"; public static final String FS_SFTP_CONNECTION_MAX = "fs.sftp.connection.max"; public static final String E_SAME_DIRECTORY_ONLY = "only same directory renames are supported"; public static final String E_HOST_NULL = "Invalid host specified"; public static final String E_USER_NULL = "No user specified for sftp connection. Expand URI or credential file."; public static final String E_PATH_DIR = "Path %s is a directory."; public static final String E_FILE_STATUS = "Failed to get file status"; public static final String E_FILE_NOTFOUND = "File %s does not exist."; public static final String E_FILE_EXIST = "File already exists: %s"; public static final String E_CREATE_DIR = "create(): Mkdirs failed to create: %s"; public static final String E_DIR_CREATE_FROMFILE = "Can't make directory for path %s since it is a file."; public static final String E_MAKE_DIR_FORPATH = "Can't make directory for path \"%s\" under \"%s\"."; public static final String E_DIR_NOTEMPTY = "Directory: %s is not empty."; public static final String E_FILE_CHECK_FAILED = "File check failed"; public static final String E_NOT_SUPPORTED = "Not supported"; public static final String E_SPATH_NOTEXIST = "Source path %s does not exist"; public static final String E_DPATH_EXIST = "Destination path %s already exist, cannot rename!"; public static final String E_FAILED_GETHOME = "Failed to get home directory"; public static final String E_FAILED_DISCONNECT = "Failed to disconnect"; /** * Set configuration from UI. * * @param uri * @param conf * @throws IOException */ private void setConfigurationFromURI(URI uriInfo, Configuration conf) throws IOException { // get host information from URI String host = uriInfo.getHost(); host = (host == null) ? conf.get(FS_SFTP_HOST, null) : host; if (host == null) { throw new IOException(E_HOST_NULL); } conf.set(FS_SFTP_HOST, host); int port = uriInfo.getPort(); port = (port == -1) ? conf.getInt(FS_SFTP_HOST_PORT, DEFAULT_SFTP_PORT) : port; conf.setInt(FS_SFTP_HOST_PORT, port); // get user/password information from URI String userAndPwdFromUri = uriInfo.getUserInfo(); if (userAndPwdFromUri != null) { String[] userPasswdInfo = userAndPwdFromUri.split(":"); String user = userPasswdInfo[0]; user = URLDecoder.decode(user, "UTF-8"); conf.set(FS_SFTP_USER_PREFIX + host, user); if (userPasswdInfo.length > 1) { conf.set(FS_SFTP_PASSWORD_PREFIX + host + "." + user, userPasswdInfo[1]); } } String user = conf.get(FS_SFTP_USER_PREFIX + host); if (user == null || user.equals("")) { throw new IllegalStateException(E_USER_NULL); } int connectionMax = conf.getInt(FS_SFTP_CONNECTION_MAX, DEFAULT_MAX_CONNECTION); connectionPool = new SFTPConnectionPool(connectionMax); } /** * Connecting by using configuration parameters. * * @return An FTPClient instance * @throws IOException */ private ChannelSftp connect() throws IOException { Configuration conf = getConf(); String host = conf.get(FS_SFTP_HOST, null); int port = conf.getInt(FS_SFTP_HOST_PORT, DEFAULT_SFTP_PORT); String user = conf.get(FS_SFTP_USER_PREFIX + host, null); String pwd = conf.get(FS_SFTP_PASSWORD_PREFIX + host + "." + user, null); String keyFile = conf.get(FS_SFTP_KEYFILE, null); ChannelSftp channel = connectionPool.connect(host, port, user, pwd, keyFile); return channel; } /** * Logout and disconnect the given channel. * * @param client * @throws IOException */ private void disconnect(ChannelSftp channel) throws IOException { connectionPool.disconnect(channel); } /** * Resolve against given working directory. * * @param workDir * @param path * @return absolute path */ private Path makeAbsolute(Path workDir, Path path) { if (path.isAbsolute()) { return path; } return new Path(workDir, path); } /** * Convenience method, so that we don't open a new connection when using this * method from within another method. Otherwise every API invocation incurs * the overhead of opening/closing a TCP connection. * @throws IOException */ private boolean exists(ChannelSftp channel, Path file) throws IOException { try { getFileStatus(channel, file); return true; } catch (FileNotFoundException fnfe) { return false; } catch (IOException ioe) { throw new IOException(E_FILE_STATUS, ioe); } } /** * Convenience method, so that we don't open a new connection when using this * method from within another method. Otherwise every API invocation incurs * the overhead of opening/closing a TCP connection. */ @SuppressWarnings("unchecked") private FileStatus getFileStatus(ChannelSftp client, Path file) throws IOException { FileStatus fileStat = null; Path workDir; try { workDir = new Path(client.pwd()); } catch (SftpException e) { throw new IOException(e); } Path absolute = makeAbsolute(workDir, file); Path parentPath = absolute.getParent(); if (parentPath == null) { // root directory long length = -1; // Length of root directory on server not known boolean isDir = true; int blockReplication = 1; long blockSize = DEFAULT_BLOCK_SIZE; // Block Size not known. long modTime = -1; // Modification time of root directory not known. Path root = new Path("/"); return new FileStatus(length, isDir, blockReplication, blockSize, modTime, root.makeQualified(this.getUri(), this.getWorkingDirectory())); } String pathName = parentPath.toUri().getPath(); Vector<LsEntry> sftpFiles; try { sftpFiles = (Vector<LsEntry>) client.ls(pathName); } catch (SftpException e) { throw new FileNotFoundException(String.format(E_FILE_NOTFOUND, file)); } if (sftpFiles != null) { for (LsEntry sftpFile : sftpFiles) { if (sftpFile.getFilename().equals(file.getName())) { // file found in directory fileStat = getFileStatus(client, sftpFile, parentPath); break; } } if (fileStat == null) { throw new FileNotFoundException(String.format(E_FILE_NOTFOUND, file)); } } else { throw new FileNotFoundException(String.format(E_FILE_NOTFOUND, file)); } return fileStat; } /** * Convert the file information in LsEntry to a {@link FileStatus} object. * * * @param sftpFile * @param parentPath * @return file status * @throws IOException */ private FileStatus getFileStatus(ChannelSftp channel, LsEntry sftpFile, Path parentPath) throws IOException { SftpATTRS attr = sftpFile.getAttrs(); long length = attr.getSize(); boolean isDir = attr.isDir(); boolean isLink = attr.isLink(); if (isLink) { String link = parentPath.toUri().getPath() + "/" + sftpFile.getFilename(); try { link = channel.realpath(link); Path linkParent = new Path("/", link); FileStatus fstat = getFileStatus(channel, linkParent); isDir = fstat.isDirectory(); length = fstat.getLen(); } catch (Exception e) { throw new IOException(e); } } int blockReplication = 1; // Using default block size since there is no way in SFTP channel to know of // block sizes on server. The assumption could be less than ideal. long blockSize = DEFAULT_BLOCK_SIZE; long modTime = attr.getMTime() * 1000; // convert to milliseconds long accessTime = 0; FsPermission permission = getPermissions(sftpFile); // not be able to get the real user group name, just use the user and group // id String user = Integer.toString(attr.getUId()); String group = Integer.toString(attr.getGId()); Path filePath = new Path(parentPath, sftpFile.getFilename()); return new FileStatus(length, isDir, blockReplication, blockSize, modTime, accessTime, permission, user, group, filePath.makeQualified( this.getUri(), this.getWorkingDirectory())); } /** * Return file permission. * * @param sftpFile * @return file permission */ private FsPermission getPermissions(LsEntry sftpFile) { return new FsPermission((short) sftpFile.getAttrs().getPermissions()); } /** * Convenience method, so that we don't open a new connection when using this * method from within another method. Otherwise every API invocation incurs * the overhead of opening/closing a TCP connection. */ private boolean mkdirs(ChannelSftp client, Path file, FsPermission permission) throws IOException { boolean created = true; Path workDir; try { workDir = new Path(client.pwd()); } catch (SftpException e) { throw new IOException(e); } Path absolute = makeAbsolute(workDir, file); String pathName = absolute.getName(); if (!exists(client, absolute)) { Path parent = absolute.getParent(); created = (parent == null || mkdirs(client, parent, FsPermission.getDefault())); if (created) { String parentDir = parent.toUri().getPath(); boolean succeeded = true; try { client.cd(parentDir); client.mkdir(pathName); } catch (SftpException e) { throw new IOException(String.format(E_MAKE_DIR_FORPATH, pathName, parentDir)); } created = created & succeeded; } } else if (isFile(client, absolute)) { throw new IOException(String.format(E_DIR_CREATE_FROMFILE, absolute)); } return created; } /** * Convenience method, so that we don't open a new connection when using this * method from within another method. Otherwise every API invocation incurs * the overhead of opening/closing a TCP connection. * @throws IOException */ private boolean isFile(ChannelSftp channel, Path file) throws IOException { try { return !getFileStatus(channel, file).isDirectory(); } catch (FileNotFoundException e) { return false; // file does not exist } catch (IOException ioe) { throw new IOException(E_FILE_CHECK_FAILED, ioe); } } /** * Convenience method, so that we don't open a new connection when using this * method from within another method. Otherwise every API invocation incurs * the overhead of opening/closing a TCP connection. */ private boolean delete(ChannelSftp channel, Path file, boolean recursive) throws IOException { Path workDir; try { workDir = new Path(channel.pwd()); } catch (SftpException e) { throw new IOException(e); } Path absolute = makeAbsolute(workDir, file); String pathName = absolute.toUri().getPath(); FileStatus fileStat = null; try { fileStat = getFileStatus(channel, absolute); } catch (FileNotFoundException e) { // file not found, no need to delete, return true return false; } if (!fileStat.isDirectory()) { boolean status = true; try { channel.rm(pathName); } catch (SftpException e) { status = false; } return status; } else { boolean status = true; FileStatus[] dirEntries = listStatus(channel, absolute); if (dirEntries != null && dirEntries.length > 0) { if (!recursive) { throw new IOException(String.format(E_DIR_NOTEMPTY, file)); } for (int i = 0; i < dirEntries.length; ++i) { delete(channel, new Path(absolute, dirEntries[i].getPath()), recursive); } } try { channel.rmdir(pathName); } catch (SftpException e) { status = false; } return status; } } /** * Convenience method, so that we don't open a new connection when using this * method from within another method. Otherwise every API invocation incurs * the overhead of opening/closing a TCP connection. */ @SuppressWarnings("unchecked") private FileStatus[] listStatus(ChannelSftp client, Path file) throws IOException { Path workDir; try { workDir = new Path(client.pwd()); } catch (SftpException e) { throw new IOException(e); } Path absolute = makeAbsolute(workDir, file); FileStatus fileStat = getFileStatus(client, absolute); if (!fileStat.isDirectory()) { return new FileStatus[] {fileStat}; } Vector<LsEntry> sftpFiles; try { sftpFiles = (Vector<LsEntry>) client.ls(absolute.toUri().getPath()); } catch (SftpException e) { throw new IOException(e); } ArrayList<FileStatus> fileStats = new ArrayList<FileStatus>(); for (int i = 0; i < sftpFiles.size(); i++) { LsEntry entry = sftpFiles.get(i); String fname = entry.getFilename(); // skip current and parent directory, ie. "." and ".." if (!".".equalsIgnoreCase(fname) && !"..".equalsIgnoreCase(fname)) { fileStats.add(getFileStatus(client, entry, absolute)); } } return fileStats.toArray(new FileStatus[fileStats.size()]); } /** * Convenience method, so that we don't open a new connection when using this * method from within another method. Otherwise every API invocation incurs * the overhead of opening/closing a TCP connection. * * @param channel * @param src * @param dst * @return rename successful? * @throws IOException */ private boolean rename(ChannelSftp channel, Path src, Path dst) throws IOException { Path workDir; try { workDir = new Path(channel.pwd()); } catch (SftpException e) { throw new IOException(e); } Path absoluteSrc = makeAbsolute(workDir, src); Path absoluteDst = makeAbsolute(workDir, dst); if (!exists(channel, absoluteSrc)) { throw new IOException(String.format(E_SPATH_NOTEXIST, src)); } if (exists(channel, absoluteDst)) { throw new IOException(String.format(E_DPATH_EXIST, dst)); } boolean renamed = true; try { channel.cd("/"); channel.rename(src.toUri().getPath(), dst.toUri().getPath()); } catch (SftpException e) { renamed = false; } return renamed; } @Override public void initialize(URI uriInfo, Configuration conf) throws IOException { super.initialize(uriInfo, conf); setConfigurationFromURI(uriInfo, conf); setConf(conf); this.uri = uriInfo; } @Override public URI getUri() { return uri; } @Override public FSDataInputStream open(Path f, int bufferSize) throws IOException { ChannelSftp channel = connect(); Path workDir; try { workDir = new Path(channel.pwd()); } catch (SftpException e) { throw new IOException(e); } Path absolute = makeAbsolute(workDir, f); FileStatus fileStat = getFileStatus(channel, absolute); if (fileStat.isDirectory()) { disconnect(channel); throw new IOException(String.format(E_PATH_DIR, f)); } InputStream is; try { // the path could be a symbolic link, so get the real path absolute = new Path("/", channel.realpath(absolute.toUri().getPath())); is = channel.get(absolute.toUri().getPath()); } catch (SftpException e) { throw new IOException(e); } FSDataInputStream fis = new FSDataInputStream(new SFTPInputStream(is, channel, statistics)); return fis; } /** * A stream obtained via this call must be closed before using other APIs of * this class or else the invocation will block. */ @Override public FSDataOutputStream create(Path f, FsPermission permission, boolean overwrite, int bufferSize, short replication, long blockSize, Progressable progress) throws IOException { final ChannelSftp client = connect(); Path workDir; try { workDir = new Path(client.pwd()); } catch (SftpException e) { throw new IOException(e); } Path absolute = makeAbsolute(workDir, f); if (exists(client, f)) { if (overwrite) { delete(client, f, false); } else { disconnect(client); throw new IOException(String.format(E_FILE_EXIST, f)); } } Path parent = absolute.getParent(); if (parent == null || !mkdirs(client, parent, FsPermission.getDefault())) { parent = (parent == null) ? new Path("/") : parent; disconnect(client); throw new IOException(String.format(E_CREATE_DIR, parent)); } OutputStream os; try { client.cd(parent.toUri().getPath()); os = client.put(f.getName()); } catch (SftpException e) { throw new IOException(e); } FSDataOutputStream fos = new FSDataOutputStream(os, statistics) { @Override public void close() throws IOException { super.close(); disconnect(client); } }; return fos; } @Override public FSDataOutputStream append(Path f, int bufferSize, Progressable progress) throws IOException { throw new IOException(E_NOT_SUPPORTED); } /* * The parent of source and destination can be different. It is suppose to * work like 'move' */ @Override public boolean rename(Path src, Path dst) throws IOException { ChannelSftp channel = connect(); try { boolean success = rename(channel, src, dst); return success; } finally { disconnect(channel); } } @Override public boolean delete(Path f, boolean recursive) throws IOException { ChannelSftp channel = connect(); try { boolean success = delete(channel, f, recursive); return success; } finally { disconnect(channel); } } @Override public FileStatus[] listStatus(Path f) throws IOException { ChannelSftp client = connect(); try { FileStatus[] stats = listStatus(client, f); return stats; } finally { disconnect(client); } } @Override public void setWorkingDirectory(Path newDir) { // we do not maintain the working directory state } @Override public Path getWorkingDirectory() { // Return home directory always since we do not maintain state. return getHomeDirectory(); } @Override public Path getHomeDirectory() { ChannelSftp channel = null; try { channel = connect(); Path homeDir = new Path(channel.pwd()); return homeDir; } catch (Exception ioe) { return null; } finally { try { disconnect(channel); } catch (IOException ioe) { return null; } } } @Override public boolean mkdirs(Path f, FsPermission permission) throws IOException { ChannelSftp client = connect(); try { boolean success = mkdirs(client, f, permission); return success; } finally { disconnect(client); } } @Override public FileStatus getFileStatus(Path f) throws IOException { ChannelSftp channel = connect(); try { FileStatus status = getFileStatus(channel, f); return status; } finally { disconnect(channel); } } }
21,683
31.267857
80
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPConnectionPool.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.sftp; import java.io.IOException; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.Set; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.util.StringUtils; import com.jcraft.jsch.ChannelSftp; import com.jcraft.jsch.JSch; import com.jcraft.jsch.JSchException; import com.jcraft.jsch.Session; /** Concurrent/Multiple Connections. */ class SFTPConnectionPool { public static final Log LOG = LogFactory.getLog(SFTPFileSystem.class); // Maximum number of allowed live connections. This doesn't mean we cannot // have more live connections. It means that when we have more // live connections than this threshold, any unused connection will be // closed. private int maxConnection; private int liveConnectionCount = 0; private HashMap<ConnectionInfo, HashSet<ChannelSftp>> idleConnections = new HashMap<ConnectionInfo, HashSet<ChannelSftp>>(); private HashMap<ChannelSftp, ConnectionInfo> con2infoMap = new HashMap<ChannelSftp, ConnectionInfo>(); SFTPConnectionPool(int maxConnection) { this.maxConnection = maxConnection; } synchronized ChannelSftp getFromPool(ConnectionInfo info) throws IOException { Set<ChannelSftp> cons = idleConnections.get(info); ChannelSftp channel; if (cons != null && cons.size() > 0) { Iterator<ChannelSftp> it = cons.iterator(); if (it.hasNext()) { channel = it.next(); idleConnections.remove(info); return channel; } else { throw new IOException("Connection pool error."); } } return null; } /** Add the channel into pool. * @param channel */ synchronized void returnToPool(ChannelSftp channel) { ConnectionInfo info = con2infoMap.get(channel); HashSet<ChannelSftp> cons = idleConnections.get(info); if (cons == null) { cons = new HashSet<ChannelSftp>(); idleConnections.put(info, cons); } cons.add(channel); } /** Shutdown the connection pool and close all open connections. */ synchronized void shutdown() { if (this.con2infoMap == null){ return; // already shutdown in case it is called } LOG.info("Inside shutdown, con2infoMap size=" + con2infoMap.size()); this.maxConnection = 0; Set<ChannelSftp> cons = con2infoMap.keySet(); if (cons != null && cons.size() > 0) { // make a copy since we need to modify the underlying Map Set<ChannelSftp> copy = new HashSet<ChannelSftp>(cons); // Initiate disconnect from all outstanding connections for (ChannelSftp con : copy) { try { disconnect(con); } catch (IOException ioe) { ConnectionInfo info = con2infoMap.get(con); LOG.error( "Error encountered while closing connection to " + info.getHost(), ioe); } } } // make sure no further connections can be returned. this.idleConnections = null; this.con2infoMap = null; } public synchronized int getMaxConnection() { return maxConnection; } public synchronized void setMaxConnection(int maxConn) { this.maxConnection = maxConn; } public ChannelSftp connect(String host, int port, String user, String password, String keyFile) throws IOException { // get connection from pool ConnectionInfo info = new ConnectionInfo(host, port, user); ChannelSftp channel = getFromPool(info); if (channel != null) { if (channel.isConnected()) { return channel; } else { channel = null; synchronized (this) { --liveConnectionCount; con2infoMap.remove(channel); } } } // create a new connection and add to pool JSch jsch = new JSch(); Session session = null; try { if (user == null || user.length() == 0) { user = System.getProperty("user.name"); } if (password == null) { password = ""; } if (keyFile != null && keyFile.length() > 0) { jsch.addIdentity(keyFile); } if (port <= 0) { session = jsch.getSession(user, host); } else { session = jsch.getSession(user, host, port); } session.setPassword(password); java.util.Properties config = new java.util.Properties(); config.put("StrictHostKeyChecking", "no"); session.setConfig(config); session.connect(); channel = (ChannelSftp) session.openChannel("sftp"); channel.connect(); synchronized (this) { con2infoMap.put(channel, info); liveConnectionCount++; } return channel; } catch (JSchException e) { throw new IOException(StringUtils.stringifyException(e)); } } void disconnect(ChannelSftp channel) throws IOException { if (channel != null) { // close connection if too many active connections boolean closeConnection = false; synchronized (this) { if (liveConnectionCount > maxConnection) { --liveConnectionCount; con2infoMap.remove(channel); closeConnection = true; } } if (closeConnection) { if (channel.isConnected()) { try { Session session = channel.getSession(); channel.disconnect(); session.disconnect(); } catch (JSchException e) { throw new IOException(StringUtils.stringifyException(e)); } } } else { returnToPool(channel); } } } public int getIdleCount() { return this.idleConnections.size(); } public int getLiveConnCount() { return this.liveConnectionCount; } public int getConnPoolSize() { return this.con2infoMap.size(); } /** * Class to capture the minimal set of information that distinguish * between different connections. */ static class ConnectionInfo { private String host = ""; private int port; private String user = ""; ConnectionInfo(String hst, int prt, String usr) { this.host = hst; this.port = prt; this.user = usr; } public String getHost() { return host; } public void setHost(String hst) { this.host = hst; } public int getPort() { return port; } public void setPort(int prt) { this.port = prt; } public String getUser() { return user; } public void setUser(String usr) { this.user = usr; } @Override public boolean equals(Object obj) { if (this == obj) { return true; } if (obj instanceof ConnectionInfo) { ConnectionInfo con = (ConnectionInfo) obj; boolean ret = true; if (this.host == null || !this.host.equalsIgnoreCase(con.host)) { ret = false; } if (this.port >= 0 && this.port != con.port) { ret = false; } if (this.user == null || !this.user.equalsIgnoreCase(con.user)) { ret = false; } return ret; } else { return false; } } @Override public int hashCode() { int hashCode = 0; if (host != null) { hashCode += host.hashCode(); } hashCode += port; if (user != null) { hashCode += user.hashCode(); } return hashCode; } } }
8,237
26.098684
80
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.viewfs; import java.io.FileNotFoundException; import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; import java.util.Collection; import java.util.EnumSet; import java.util.List; import java.util.Map; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.AbstractFileSystem; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.BlockStoragePolicySpi; import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileChecksum; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FsServerDefaults; import org.apache.hadoop.fs.FsStatus; import org.apache.hadoop.fs.Options.ChecksumOpt; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.Progressable; /** * <code>ChrootedFs</code> is a file system with its root some path * below the root of its base file system. * Example: For a base file system hdfs://nn1/ with chRoot at /usr/foo, the * members will be setup as shown below. * <ul> * <li>myFs is the base file system and points to hdfs at nn1</li> * <li>myURI is hdfs://nn1/user/foo</li> * <li>chRootPathPart is /user/foo</li> * <li>workingDir is a directory related to chRoot</li> * </ul> * * The paths are resolved as follows by ChRootedFileSystem: * <ul> * <li> Absolute path /a/b/c is resolved to /user/foo/a/b/c at myFs</li> * <li> Relative path x/y is resolved to /user/foo/<workingDir>/x/y</li> * </ul> * */ @InterfaceAudience.Private @InterfaceStability.Evolving /*Evolving for a release,to be changed to Stable */ class ChRootedFs extends AbstractFileSystem { private final AbstractFileSystem myFs; // the base file system whose root is changed private final URI myUri; // the base URI + the chroot private final Path chRootPathPart; // the root below the root of the base private final String chRootPathPartString; protected AbstractFileSystem getMyFs() { return myFs; } /** * * @param path * @return return full path including the chroot */ protected Path fullPath(final Path path) { super.checkPath(path); return new Path((chRootPathPart.isRoot() ? "" : chRootPathPartString) + path.toUri().getPath()); } @Override public boolean isValidName(String src) { return myFs.isValidName(fullPath(new Path(src)).toUri().toString()); } public ChRootedFs(final AbstractFileSystem fs, final Path theRoot) throws URISyntaxException { super(fs.getUri(), fs.getUri().getScheme(), fs.getUri().getAuthority() != null, fs.getUriDefaultPort()); myFs = fs; myFs.checkPath(theRoot); chRootPathPart = new Path(myFs.getUriPath(theRoot)); chRootPathPartString = chRootPathPart.toUri().getPath(); /* * We are making URI include the chrootedPath: e.g. file:///chrootedPath. * This is questionable since Path#makeQualified(uri, path) ignores * the pathPart of a uri. Since this class is internal we can ignore * this issue but if we were to make it external then this needs * to be resolved. */ // Handle the two cases: // scheme:/// and scheme://authority/ myUri = new URI(myFs.getUri().toString() + (myFs.getUri().getAuthority() == null ? "" : Path.SEPARATOR) + chRootPathPart.toUri().getPath().substring(1)); super.checkPath(theRoot); } @Override public URI getUri() { return myUri; } /** * * Strip out the root from the path. * * @param p - fully qualified path p * @return - the remaining path without the begining / */ public String stripOutRoot(final Path p) { try { checkPath(p); } catch (IllegalArgumentException e) { throw new RuntimeException("Internal Error - path " + p + " should have been with URI" + myUri); } String pathPart = p.toUri().getPath(); return (pathPart.length() == chRootPathPartString.length()) ? "" : pathPart.substring(chRootPathPartString.length() + (chRootPathPart.isRoot() ? 0 : 1)); } @Override public Path getHomeDirectory() { return myFs.getHomeDirectory(); } @Override public Path getInitialWorkingDirectory() { /* * 3 choices here: return null or / or strip out the root out of myFs's * inital wd. * Only reasonable choice for initialWd for chrooted fds is null */ return null; } public Path getResolvedQualifiedPath(final Path f) throws FileNotFoundException { return myFs.makeQualified( new Path(chRootPathPartString + f.toUri().toString())); } @Override public FSDataOutputStream createInternal(final Path f, final EnumSet<CreateFlag> flag, final FsPermission absolutePermission, final int bufferSize, final short replication, final long blockSize, final Progressable progress, final ChecksumOpt checksumOpt, final boolean createParent) throws IOException, UnresolvedLinkException { return myFs.createInternal(fullPath(f), flag, absolutePermission, bufferSize, replication, blockSize, progress, checksumOpt, createParent); } @Override public boolean delete(final Path f, final boolean recursive) throws IOException, UnresolvedLinkException { return myFs.delete(fullPath(f), recursive); } @Override public BlockLocation[] getFileBlockLocations(final Path f, final long start, final long len) throws IOException, UnresolvedLinkException { return myFs.getFileBlockLocations(fullPath(f), start, len); } @Override public FileChecksum getFileChecksum(final Path f) throws IOException, UnresolvedLinkException { return myFs.getFileChecksum(fullPath(f)); } @Override public FileStatus getFileStatus(final Path f) throws IOException, UnresolvedLinkException { return myFs.getFileStatus(fullPath(f)); } public void access(Path path, FsAction mode) throws AccessControlException, FileNotFoundException, UnresolvedLinkException, IOException { myFs.access(fullPath(path), mode); } @Override public FileStatus getFileLinkStatus(final Path f) throws IOException, UnresolvedLinkException { return myFs.getFileLinkStatus(fullPath(f)); } @Override public FsStatus getFsStatus() throws IOException { return myFs.getFsStatus(); } @Override public FsServerDefaults getServerDefaults() throws IOException { return myFs.getServerDefaults(); } @Override public int getUriDefaultPort() { return myFs.getUriDefaultPort(); } @Override public FileStatus[] listStatus(final Path f) throws IOException, UnresolvedLinkException { return myFs.listStatus(fullPath(f)); } @Override public void mkdir(final Path dir, final FsPermission permission, final boolean createParent) throws IOException, UnresolvedLinkException { myFs.mkdir(fullPath(dir), permission, createParent); } @Override public FSDataInputStream open(final Path f, final int bufferSize) throws IOException, UnresolvedLinkException { return myFs.open(fullPath(f), bufferSize); } @Override public boolean truncate(final Path f, final long newLength) throws IOException, UnresolvedLinkException { return myFs.truncate(fullPath(f), newLength); } @Override public void renameInternal(final Path src, final Path dst) throws IOException, UnresolvedLinkException { // note fullPath will check that paths are relative to this FileSystem. // Hence both are in same file system and a rename is valid myFs.renameInternal(fullPath(src), fullPath(dst)); } @Override public void renameInternal(final Path src, final Path dst, final boolean overwrite) throws IOException, UnresolvedLinkException { // note fullPath will check that paths are relative to this FileSystem. // Hence both are in same file system and a rename is valid myFs.renameInternal(fullPath(src), fullPath(dst), overwrite); } @Override public void setOwner(final Path f, final String username, final String groupname) throws IOException, UnresolvedLinkException { myFs.setOwner(fullPath(f), username, groupname); } @Override public void setPermission(final Path f, final FsPermission permission) throws IOException, UnresolvedLinkException { myFs.setPermission(fullPath(f), permission); } @Override public boolean setReplication(final Path f, final short replication) throws IOException, UnresolvedLinkException { return myFs.setReplication(fullPath(f), replication); } @Override public void setTimes(final Path f, final long mtime, final long atime) throws IOException, UnresolvedLinkException { myFs.setTimes(fullPath(f), mtime, atime); } @Override public void modifyAclEntries(Path path, List<AclEntry> aclSpec) throws IOException { myFs.modifyAclEntries(fullPath(path), aclSpec); } @Override public void removeAclEntries(Path path, List<AclEntry> aclSpec) throws IOException { myFs.removeAclEntries(fullPath(path), aclSpec); } @Override public void removeDefaultAcl(Path path) throws IOException { myFs.removeDefaultAcl(fullPath(path)); } @Override public void removeAcl(Path path) throws IOException { myFs.removeAcl(fullPath(path)); } @Override public void setAcl(Path path, List<AclEntry> aclSpec) throws IOException { myFs.setAcl(fullPath(path), aclSpec); } @Override public AclStatus getAclStatus(Path path) throws IOException { return myFs.getAclStatus(fullPath(path)); } @Override public void setXAttr(Path path, String name, byte[] value, EnumSet<XAttrSetFlag> flag) throws IOException { myFs.setXAttr(fullPath(path), name, value, flag); } @Override public byte[] getXAttr(Path path, String name) throws IOException { return myFs.getXAttr(fullPath(path), name); } @Override public Map<String, byte[]> getXAttrs(Path path) throws IOException { return myFs.getXAttrs(fullPath(path)); } @Override public Map<String, byte[]> getXAttrs(Path path, List<String> names) throws IOException { return myFs.getXAttrs(fullPath(path), names); } @Override public List<String> listXAttrs(Path path) throws IOException { return myFs.listXAttrs(fullPath(path)); } @Override public void removeXAttr(Path path, String name) throws IOException { myFs.removeXAttr(fullPath(path), name); } @Override public Path createSnapshot(Path path, String name) throws IOException { return myFs.createSnapshot(fullPath(path), name); } @Override public void renameSnapshot(Path path, String snapshotOldName, String snapshotNewName) throws IOException { myFs.renameSnapshot(fullPath(path), snapshotOldName, snapshotNewName); } @Override public void deleteSnapshot(Path snapshotDir, String snapshotName) throws IOException { myFs.deleteSnapshot(fullPath(snapshotDir), snapshotName); } @Override public void setStoragePolicy(Path path, String policyName) throws IOException { myFs.setStoragePolicy(fullPath(path), policyName); } @Override public BlockStoragePolicySpi getStoragePolicy(final Path src) throws IOException { return myFs.getStoragePolicy(src); } @Override public Collection<? extends BlockStoragePolicySpi> getAllStoragePolicies() throws IOException { return myFs.getAllStoragePolicies(); } @Override public void setVerifyChecksum(final boolean verifyChecksum) throws IOException, UnresolvedLinkException { myFs.setVerifyChecksum(verifyChecksum); } @Override public boolean supportsSymlinks() { return myFs.supportsSymlinks(); } @Override public void createSymlink(final Path target, final Path link, final boolean createParent) throws IOException, UnresolvedLinkException { /* * We leave the link alone: * If qualified or link relative then of course it is okay. * If absolute (ie / relative) then the link has to be resolved * relative to the changed root. */ myFs.createSymlink(fullPath(target), link, createParent); } @Override public Path getLinkTarget(final Path f) throws IOException { return myFs.getLinkTarget(fullPath(f)); } @Override public List<Token<?>> getDelegationTokens(String renewer) throws IOException { return myFs.getDelegationTokens(renewer); } }
13,841
30.894009
87
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/Constants.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.viewfs; import org.apache.hadoop.fs.permission.FsPermission; /** * Config variable prefixes for ViewFs - * see {@link org.apache.hadoop.fs.viewfs.ViewFs} for examples. * The mount table is specified in the config using these prefixes. * See {@link org.apache.hadoop.fs.viewfs.ConfigUtil} for convenience lib. */ public interface Constants { /** * Prefix for the config variable prefix for the ViewFs mount-table */ public static final String CONFIG_VIEWFS_PREFIX = "fs.viewfs.mounttable"; /** * Prefix for the home dir for the mount table - if not specified * then the hadoop default value (/user) is used. */ public static final String CONFIG_VIEWFS_HOMEDIR = "homedir"; /** * Config variable name for the default mount table. */ public static final String CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE = "default"; /** * Config variable full prefix for the default mount table. */ public static final String CONFIG_VIEWFS_PREFIX_DEFAULT_MOUNT_TABLE = CONFIG_VIEWFS_PREFIX + "." + CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE; /** * Config variable for specifying a simple link */ public static final String CONFIG_VIEWFS_LINK = "link"; /** * Config variable for specifying a merge link */ public static final String CONFIG_VIEWFS_LINK_MERGE = "linkMerge"; /** * Config variable for specifying a merge of the root of the mount-table * with the root of another file system. */ public static final String CONFIG_VIEWFS_LINK_MERGE_SLASH = "linkMergeSlash"; static public final FsPermission PERMISSION_555 = new FsPermission((short) 0555); }
2,478
34.414286
79
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.viewfs; import java.io.FileNotFoundException; import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Map.Entry; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.UnsupportedFileSystemException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.StringUtils; /** * InodeTree implements a mount-table as a tree of inodes. * It is used to implement ViewFs and ViewFileSystem. * In order to use it the caller must subclass it and implement * the abstract methods {@link #getTargetFileSystem(INodeDir)}, etc. * * The mountable is initialized from the config variables as * specified in {@link ViewFs} * * @param <T> is AbstractFileSystem or FileSystem * * The three main methods are * {@link #InodeTreel(Configuration)} // constructor * {@link #InodeTree(Configuration, String)} // constructor * {@link #resolve(String, boolean)} */ @InterfaceAudience.Private @InterfaceStability.Unstable abstract class InodeTree<T> { static enum ResultKind {isInternalDir, isExternalDir;}; static final Path SlashPath = new Path("/"); final INodeDir<T> root; // the root of the mount table final String homedirPrefix; // the homedir config value for this mount table List<MountPoint<T>> mountPoints = new ArrayList<MountPoint<T>>(); static class MountPoint<T> { String src; INodeLink<T> target; MountPoint(String srcPath, INodeLink<T> mountLink) { src = srcPath; target = mountLink; } } /** * Breaks file path into component names. * @param path * @return array of names component names */ static String[] breakIntoPathComponents(final String path) { return path == null ? null : path.split(Path.SEPARATOR); } /** * Internal class for inode tree * @param <T> */ abstract static class INode<T> { final String fullPath; // the full path to the root public INode(String pathToNode, UserGroupInformation aUgi) { fullPath = pathToNode; } }; /** * Internal class to represent an internal dir of the mount table * @param <T> */ static class INodeDir<T> extends INode<T> { final Map<String,INode<T>> children = new HashMap<String,INode<T>>(); T InodeDirFs = null; // file system of this internal directory of mountT boolean isRoot = false; INodeDir(final String pathToNode, final UserGroupInformation aUgi) { super(pathToNode, aUgi); } INode<T> resolve(final String pathComponent) throws FileNotFoundException { final INode<T> result = resolveInternal(pathComponent); if (result == null) { throw new FileNotFoundException(); } return result; } INode<T> resolveInternal(final String pathComponent) throws FileNotFoundException { return children.get(pathComponent); } INodeDir<T> addDir(final String pathComponent, final UserGroupInformation aUgi) throws FileAlreadyExistsException { if (children.containsKey(pathComponent)) { throw new FileAlreadyExistsException(); } final INodeDir<T> newDir = new INodeDir<T>(fullPath+ (isRoot ? "" : "/") + pathComponent, aUgi); children.put(pathComponent, newDir); return newDir; } void addLink(final String pathComponent, final INodeLink<T> link) throws FileAlreadyExistsException { if (children.containsKey(pathComponent)) { throw new FileAlreadyExistsException(); } children.put(pathComponent, link); } } /** * In internal class to represent a mount link * A mount link can be single dir link or a merge dir link. * A merge dir link is a merge (junction) of links to dirs: * example : <merge of 2 dirs * /users -> hdfs:nn1//users * /users -> hdfs:nn2//users * * For a merge, each target is checked to be dir when created but if target * is changed later it is then ignored (a dir with null entries) */ static class INodeLink<T> extends INode<T> { final boolean isMergeLink; // true if MergeLink final URI[] targetDirLinkList; final T targetFileSystem; // file system object created from the link. /** * Construct a mergeLink */ INodeLink(final String pathToNode, final UserGroupInformation aUgi, final T targetMergeFs, final URI[] aTargetDirLinkList) { super(pathToNode, aUgi); targetFileSystem = targetMergeFs; targetDirLinkList = aTargetDirLinkList; isMergeLink = true; } /** * Construct a simple link (i.e. not a mergeLink) */ INodeLink(final String pathToNode, final UserGroupInformation aUgi, final T targetFs, final URI aTargetDirLink) { super(pathToNode, aUgi); targetFileSystem = targetFs; targetDirLinkList = new URI[1]; targetDirLinkList[0] = aTargetDirLink; isMergeLink = false; } /** * Get the target of the link * If a merge link then it returned as "," separated URI list. */ Path getTargetLink() { // is merge link - use "," as separator between the merged URIs //String result = targetDirLinkList[0].toString(); StringBuilder result = new StringBuilder(targetDirLinkList[0].toString()); for (int i=1; i < targetDirLinkList.length; ++i) { result.append(',').append(targetDirLinkList[i].toString()); } return new Path(result.toString()); } } private void createLink(final String src, final String target, final boolean isLinkMerge, final UserGroupInformation aUgi) throws URISyntaxException, IOException, FileAlreadyExistsException, UnsupportedFileSystemException { // Validate that src is valid absolute path final Path srcPath = new Path(src); if (!srcPath.isAbsoluteAndSchemeAuthorityNull()) { throw new IOException("ViewFs:Non absolute mount name in config:" + src); } final String[] srcPaths = breakIntoPathComponents(src); INodeDir<T> curInode = root; int i; // Ignore first initial slash, process all except last component for (i = 1; i < srcPaths.length-1; i++) { final String iPath = srcPaths[i]; INode<T> nextInode = curInode.resolveInternal(iPath); if (nextInode == null) { INodeDir<T> newDir = curInode.addDir(iPath, aUgi); newDir.InodeDirFs = getTargetFileSystem(newDir); nextInode = newDir; } if (nextInode instanceof INodeLink) { // Error - expected a dir but got a link throw new FileAlreadyExistsException("Path " + nextInode.fullPath + " already exists as link"); } else { assert(nextInode instanceof INodeDir); curInode = (INodeDir<T>) nextInode; } } // Now process the last component // Add the link in 2 cases: does not exist or a link exists String iPath = srcPaths[i];// last component if (curInode.resolveInternal(iPath) != null) { // directory/link already exists StringBuilder strB = new StringBuilder(srcPaths[0]); for (int j = 1; j <= i; ++j) { strB.append('/').append(srcPaths[j]); } throw new FileAlreadyExistsException("Path " + strB + " already exists as dir; cannot create link here"); } final INodeLink<T> newLink; final String fullPath = curInode.fullPath + (curInode == root ? "" : "/") + iPath; if (isLinkMerge) { // Target is list of URIs String[] targetsList = StringUtils.getStrings(target); URI[] targetsListURI = new URI[targetsList.length]; int k = 0; for (String itarget : targetsList) { targetsListURI[k++] = new URI(itarget); } newLink = new INodeLink<T>(fullPath, aUgi, getTargetFileSystem(targetsListURI), targetsListURI); } else { newLink = new INodeLink<T>(fullPath, aUgi, getTargetFileSystem(new URI(target)), new URI(target)); } curInode.addLink(iPath, newLink); mountPoints.add(new MountPoint<T>(src, newLink)); } /** * Below the "public" methods of InodeTree */ /** * The user of this class must subclass and implement the following * 3 abstract methods. * @throws IOException */ protected abstract T getTargetFileSystem(final URI uri) throws UnsupportedFileSystemException, URISyntaxException, IOException; protected abstract T getTargetFileSystem(final INodeDir<T> dir) throws URISyntaxException; protected abstract T getTargetFileSystem(final URI[] mergeFsURIList) throws UnsupportedFileSystemException, URISyntaxException; /** * Create Inode Tree from the specified mount-table specified in Config * @param config - the mount table keys are prefixed with * FsConstants.CONFIG_VIEWFS_PREFIX * @param viewName - the name of the mount table - if null use defaultMT name * @throws UnsupportedFileSystemException * @throws URISyntaxException * @throws FileAlreadyExistsException * @throws IOException */ protected InodeTree(final Configuration config, final String viewName) throws UnsupportedFileSystemException, URISyntaxException, FileAlreadyExistsException, IOException { String vName = viewName; if (vName == null) { vName = Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE; } homedirPrefix = ConfigUtil.getHomeDirValue(config, vName); root = new INodeDir<T>("/", UserGroupInformation.getCurrentUser()); root.InodeDirFs = getTargetFileSystem(root); root.isRoot = true; final String mtPrefix = Constants.CONFIG_VIEWFS_PREFIX + "." + vName + "."; final String linkPrefix = Constants.CONFIG_VIEWFS_LINK + "."; final String linkMergePrefix = Constants.CONFIG_VIEWFS_LINK_MERGE + "."; boolean gotMountTableEntry = false; final UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); for (Entry<String, String> si : config) { final String key = si.getKey(); if (key.startsWith(mtPrefix)) { gotMountTableEntry = true; boolean isMergeLink = false; String src = key.substring(mtPrefix.length()); if (src.startsWith(linkPrefix)) { src = src.substring(linkPrefix.length()); } else if (src.startsWith(linkMergePrefix)) { // A merge link isMergeLink = true; src = src.substring(linkMergePrefix.length()); } else if (src.startsWith(Constants.CONFIG_VIEWFS_HOMEDIR)) { // ignore - we set home dir from config continue; } else { throw new IOException( "ViewFs: Cannot initialize: Invalid entry in Mount table in config: "+ src); } final String target = si.getValue(); // link or merge link createLink(src, target, isMergeLink, ugi); } } if (!gotMountTableEntry) { throw new IOException( "ViewFs: Cannot initialize: Empty Mount table in config for " + vName == null ? "viewfs:///" : ("viewfs://" + vName + "/")); } } /** * Resolve returns ResolveResult. * The caller can continue the resolution of the remainingPath * in the targetFileSystem. * * If the input pathname leads to link to another file system then * the targetFileSystem is the one denoted by the link (except it is * file system chrooted to link target. * If the input pathname leads to an internal mount-table entry then * the target file system is one that represents the internal inode. */ static class ResolveResult<T> { final ResultKind kind; final T targetFileSystem; final String resolvedPath; final Path remainingPath; // to resolve in the target FileSystem ResolveResult(final ResultKind k, final T targetFs, final String resolveP, final Path remainingP) { kind = k; targetFileSystem = targetFs; resolvedPath = resolveP; remainingPath = remainingP; } // isInternalDir of path resolution completed within the mount table boolean isInternalDir() { return (kind == ResultKind.isInternalDir); } } /** * Resolve the pathname p relative to root InodeDir * @param p - inout path * @param resolveLastComponent * @return ResolveResult which allows further resolution of the remaining path * @throws FileNotFoundException */ ResolveResult<T> resolve(final String p, final boolean resolveLastComponent) throws FileNotFoundException { // TO DO: - more efficient to not split the path, but simply compare String[] path = breakIntoPathComponents(p); if (path.length <= 1) { // special case for when path is "/" ResolveResult<T> res = new ResolveResult<T>(ResultKind.isInternalDir, root.InodeDirFs, root.fullPath, SlashPath); return res; } INodeDir<T> curInode = root; int i; // ignore first slash for (i = 1; i < path.length - (resolveLastComponent ? 0 : 1); i++) { INode<T> nextInode = curInode.resolveInternal(path[i]); if (nextInode == null) { StringBuilder failedAt = new StringBuilder(path[0]); for ( int j = 1; j <=i; ++j) { failedAt.append('/').append(path[j]); } throw (new FileNotFoundException(failedAt.toString())); } if (nextInode instanceof INodeLink) { final INodeLink<T> link = (INodeLink<T>) nextInode; final Path remainingPath; if (i >= path.length-1) { remainingPath = SlashPath; } else { StringBuilder remainingPathStr = new StringBuilder("/" + path[i+1]); for (int j = i+2; j< path.length; ++j) { remainingPathStr.append('/').append(path[j]); } remainingPath = new Path(remainingPathStr.toString()); } final ResolveResult<T> res = new ResolveResult<T>(ResultKind.isExternalDir, link.targetFileSystem, nextInode.fullPath, remainingPath); return res; } else if (nextInode instanceof INodeDir) { curInode = (INodeDir<T>) nextInode; } } // We have resolved to an internal dir in mount table. Path remainingPath; if (resolveLastComponent) { remainingPath = SlashPath; } else { // note we have taken care of when path is "/" above // for internal dirs rem-path does not start with / since the lookup // that follows will do a children.get(remaningPath) and will have to // strip-out the initial / StringBuilder remainingPathStr = new StringBuilder("/" + path[i]); for (int j = i+1; j< path.length; ++j) { remainingPathStr.append('/').append(path[j]); } remainingPath = new Path(remainingPathStr.toString()); } final ResolveResult<T> res = new ResolveResult<T>(ResultKind.isInternalDir, curInode.InodeDirFs, curInode.fullPath, remainingPath); return res; } List<MountPoint<T>> getMountPoints() { return mountPoints; } /** * * @return home dir value from mount table; null if no config value * was found. */ String getHomeDirPrefixValue() { return homedirPrefix; } }
16,410
34.598698
81
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.viewfs; import java.io.FileNotFoundException; import java.io.IOException; import java.net.URI; import java.util.EnumSet; import java.util.List; import java.util.Map; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileChecksum; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FilterFileSystem; import org.apache.hadoop.fs.FsServerDefaults; import org.apache.hadoop.fs.FsStatus; import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.util.Progressable; /** * <code>ChRootedFileSystem</code> is a file system with its root some path * below the root of its base file system. * * Example: For a base file system hdfs://nn1/ with chRoot at /usr/foo, the * members will be setup as shown below. * <ul> * <li>myFs is the base file system and points to hdfs at nn1</li> * <li>myURI is hdfs://nn1/user/foo</li> * <li>chRootPathPart is /user/foo</li> * <li>workingDir is a directory related to chRoot</li> * </ul> * * The paths are resolved as follows by ChRootedFileSystem: * <ul> * <li> Absolute path /a/b/c is resolved to /user/foo/a/b/c at myFs</li> * <li> Relative path x/y is resolved to /user/foo/<workingDir>/x/y</li> * </ul> */ @InterfaceAudience.Private @InterfaceStability.Evolving /*Evolving for a release,to be changed to Stable */ class ChRootedFileSystem extends FilterFileSystem { private final URI myUri; // the base URI + the chRoot private final Path chRootPathPart; // the root below the root of the base private final String chRootPathPartString; private Path workingDir; protected FileSystem getMyFs() { return getRawFileSystem(); } /** * @param path * @return full path including the chroot */ protected Path fullPath(final Path path) { super.checkPath(path); return path.isAbsolute() ? new Path((chRootPathPart.isRoot() ? "" : chRootPathPartString) + path.toUri().getPath()) : new Path(chRootPathPartString + workingDir.toUri().getPath(), path); } /** * Constructor * @param uri base file system * @param conf configuration * @throws IOException */ public ChRootedFileSystem(final URI uri, Configuration conf) throws IOException { super(FileSystem.get(uri, conf)); String pathString = uri.getPath(); if (pathString.isEmpty()) { pathString = "/"; } chRootPathPart = new Path(pathString); chRootPathPartString = chRootPathPart.toUri().getPath(); myUri = uri; workingDir = getHomeDirectory(); // We don't use the wd of the myFs } /** * Called after a new FileSystem instance is constructed. * @param name a uri whose authority section names the host, port, etc. * for this FileSystem * @param conf the configuration */ @Override public void initialize(final URI name, final Configuration conf) throws IOException { super.initialize(name, conf); setConf(conf); } @Override public URI getUri() { return myUri; } /** * Strip out the root from the path. * @param p - fully qualified path p * @return - the remaining path without the begining / * @throws IOException if the p is not prefixed with root */ String stripOutRoot(final Path p) throws IOException { try { checkPath(p); } catch (IllegalArgumentException e) { throw new IOException("Internal Error - path " + p + " should have been with URI: " + myUri); } String pathPart = p.toUri().getPath(); return (pathPart.length() == chRootPathPartString.length()) ? "" : pathPart .substring(chRootPathPartString.length() + (chRootPathPart.isRoot() ? 0 : 1)); } @Override protected Path getInitialWorkingDirectory() { /* * 3 choices here: * null or / or /user/<uname> or strip out the root out of myFs's * inital wd. * Only reasonable choice for initialWd for chrooted fds is null * so that the default rule for wd is applied */ return null; } public Path getResolvedQualifiedPath(final Path f) throws FileNotFoundException { return makeQualified( new Path(chRootPathPartString + f.toUri().toString())); } @Override public Path getWorkingDirectory() { return workingDir; } @Override public void setWorkingDirectory(final Path new_dir) { workingDir = new_dir.isAbsolute() ? new_dir : new Path(workingDir, new_dir); } @Override public FSDataOutputStream create(final Path f, final FsPermission permission, final boolean overwrite, final int bufferSize, final short replication, final long blockSize, final Progressable progress) throws IOException { return super.create(fullPath(f), permission, overwrite, bufferSize, replication, blockSize, progress); } @Override @Deprecated public FSDataOutputStream createNonRecursive(Path f, FsPermission permission, EnumSet<CreateFlag> flags, int bufferSize, short replication, long blockSize, Progressable progress) throws IOException { return super.createNonRecursive(fullPath(f), permission, flags, bufferSize, replication, blockSize, progress); } @Override public boolean delete(final Path f, final boolean recursive) throws IOException { return super.delete(fullPath(f), recursive); } @Override @SuppressWarnings("deprecation") public boolean delete(Path f) throws IOException { return delete(f, true); } @Override public BlockLocation[] getFileBlockLocations(final FileStatus fs, final long start, final long len) throws IOException { return super.getFileBlockLocations( new ViewFsFileStatus(fs, fullPath(fs.getPath())), start, len); } @Override public FileChecksum getFileChecksum(final Path f) throws IOException { return super.getFileChecksum(fullPath(f)); } @Override public FileStatus getFileStatus(final Path f) throws IOException { return super.getFileStatus(fullPath(f)); } @Override public void access(Path path, FsAction mode) throws AccessControlException, FileNotFoundException, IOException { super.access(fullPath(path), mode); } @Override public FsStatus getStatus(Path p) throws IOException { return super.getStatus(fullPath(p)); } @Override public FileStatus[] listStatus(final Path f) throws IOException { return super.listStatus(fullPath(f)); } @Override public RemoteIterator<LocatedFileStatus> listLocatedStatus(Path f) throws IOException { return super.listLocatedStatus(fullPath(f)); } @Override public boolean mkdirs(final Path f, final FsPermission permission) throws IOException { return super.mkdirs(fullPath(f), permission); } @Override public FSDataInputStream open(final Path f, final int bufferSize) throws IOException { return super.open(fullPath(f), bufferSize); } @Override public FSDataOutputStream append(final Path f, final int bufferSize, final Progressable progress) throws IOException { return super.append(fullPath(f), bufferSize, progress); } @Override public boolean rename(final Path src, final Path dst) throws IOException { // note fullPath will check that paths are relative to this FileSystem. // Hence both are in same file system and a rename is valid return super.rename(fullPath(src), fullPath(dst)); } @Override public void setOwner(final Path f, final String username, final String groupname) throws IOException { super.setOwner(fullPath(f), username, groupname); } @Override public void setPermission(final Path f, final FsPermission permission) throws IOException { super.setPermission(fullPath(f), permission); } @Override public boolean setReplication(final Path f, final short replication) throws IOException { return super.setReplication(fullPath(f), replication); } @Override public void setTimes(final Path f, final long mtime, final long atime) throws IOException { super.setTimes(fullPath(f), mtime, atime); } @Override public void modifyAclEntries(Path path, List<AclEntry> aclSpec) throws IOException { super.modifyAclEntries(fullPath(path), aclSpec); } @Override public void removeAclEntries(Path path, List<AclEntry> aclSpec) throws IOException { super.removeAclEntries(fullPath(path), aclSpec); } @Override public void removeDefaultAcl(Path path) throws IOException { super.removeDefaultAcl(fullPath(path)); } @Override public void removeAcl(Path path) throws IOException { super.removeAcl(fullPath(path)); } @Override public void setAcl(Path path, List<AclEntry> aclSpec) throws IOException { super.setAcl(fullPath(path), aclSpec); } @Override public AclStatus getAclStatus(Path path) throws IOException { return super.getAclStatus(fullPath(path)); } @Override public void setXAttr(Path path, String name, byte[] value, EnumSet<XAttrSetFlag> flag) throws IOException { super.setXAttr(fullPath(path), name, value, flag); } @Override public byte[] getXAttr(Path path, String name) throws IOException { return super.getXAttr(fullPath(path), name); } @Override public Map<String, byte[]> getXAttrs(Path path) throws IOException { return super.getXAttrs(fullPath(path)); } @Override public Map<String, byte[]> getXAttrs(Path path, List<String> names) throws IOException { return super.getXAttrs(fullPath(path), names); } @Override public List<String> listXAttrs(Path path) throws IOException { return super.listXAttrs(fullPath(path)); } @Override public void removeXAttr(Path path, String name) throws IOException { super.removeXAttr(fullPath(path), name); } @Override public Path createSnapshot(Path path, String name) throws IOException { return super.createSnapshot(fullPath(path), name); } @Override public void renameSnapshot(Path path, String snapshotOldName, String snapshotNewName) throws IOException { super.renameSnapshot(fullPath(path), snapshotOldName, snapshotNewName); } @Override public void deleteSnapshot(Path snapshotDir, String snapshotName) throws IOException { super.deleteSnapshot(fullPath(snapshotDir), snapshotName); } @Override public Path resolvePath(final Path p) throws IOException { return super.resolvePath(fullPath(p)); } @Override public ContentSummary getContentSummary(Path f) throws IOException { return fs.getContentSummary(fullPath(f)); } private static Path rootPath = new Path(Path.SEPARATOR); @Override public long getDefaultBlockSize() { return getDefaultBlockSize(fullPath(rootPath)); } @Override public long getDefaultBlockSize(Path f) { return super.getDefaultBlockSize(fullPath(f)); } @Override public short getDefaultReplication() { return getDefaultReplication(fullPath(rootPath)); } @Override public short getDefaultReplication(Path f) { return super.getDefaultReplication(fullPath(f)); } @Override public FsServerDefaults getServerDefaults() throws IOException { return getServerDefaults(fullPath(rootPath)); } @Override public FsServerDefaults getServerDefaults(Path f) throws IOException { return super.getServerDefaults(fullPath(f)); } }
12,993
29.502347
103
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ConfigUtil.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.viewfs; import java.net.URI; import org.apache.hadoop.conf.Configuration; /** * Utilities for config variables of the viewFs See {@link ViewFs} */ public class ConfigUtil { /** * Get the config variable prefix for the specified mount table * @param mountTableName - the name of the mount table * @return the config variable prefix for the specified mount table */ public static String getConfigViewFsPrefix(final String mountTableName) { return Constants.CONFIG_VIEWFS_PREFIX + "." + mountTableName; } /** * Get the config variable prefix for the default mount table * @return the config variable prefix for the default mount table */ public static String getConfigViewFsPrefix() { return getConfigViewFsPrefix(Constants.CONFIG_VIEWFS_PREFIX_DEFAULT_MOUNT_TABLE); } /** * Add a link to the config for the specified mount table * @param conf - add the link to this conf * @param mountTableName * @param src - the src path name * @param target - the target URI link */ public static void addLink(Configuration conf, final String mountTableName, final String src, final URI target) { conf.set(getConfigViewFsPrefix(mountTableName) + "." + Constants.CONFIG_VIEWFS_LINK + "." + src, target.toString()); } /** * Add a link to the config for the default mount table * @param conf - add the link to this conf * @param src - the src path name * @param target - the target URI link */ public static void addLink(final Configuration conf, final String src, final URI target) { addLink( conf, Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE, src, target); } /** * Add config variable for homedir for default mount table * @param conf - add to this conf * @param homedir - the home dir path starting with slash */ public static void setHomeDirConf(final Configuration conf, final String homedir) { setHomeDirConf( conf, Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE, homedir); } /** * Add config variable for homedir the specified mount table * @param conf - add to this conf * @param homedir - the home dir path starting with slash */ public static void setHomeDirConf(final Configuration conf, final String mountTableName, final String homedir) { if (!homedir.startsWith("/")) { throw new IllegalArgumentException("Home dir should start with /:" + homedir); } conf.set(getConfigViewFsPrefix(mountTableName) + "." + Constants.CONFIG_VIEWFS_HOMEDIR, homedir); } /** * Get the value of the home dir conf value for default mount table * @param conf - from this conf * @return home dir value, null if variable is not in conf */ public static String getHomeDirValue(final Configuration conf) { return getHomeDirValue(conf, Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE); } /** * Get the value of the home dir conf value for specfied mount table * @param conf - from this conf * @param mountTableName - the mount table * @return home dir value, null if variable is not in conf */ public static String getHomeDirValue(final Configuration conf, final String mountTableName) { return conf.get(getConfigViewFsPrefix(mountTableName) + "." + Constants.CONFIG_VIEWFS_HOMEDIR); } }
4,215
34.728814
80
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.viewfs; import static org.apache.hadoop.fs.viewfs.Constants.PERMISSION_555; import java.io.FileNotFoundException; import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; import java.util.Arrays; import java.util.EnumSet; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.Map.Entry; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileChecksum; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FsConstants; import org.apache.hadoop.fs.FsServerDefaults; import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.UnsupportedFileSystemException; import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.AclUtil; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.viewfs.InodeTree.INode; import org.apache.hadoop.fs.viewfs.InodeTree.INodeLink; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Progressable; import org.apache.hadoop.util.Time; /** * ViewFileSystem (extends the FileSystem interface) implements a client-side * mount table. Its spec and implementation is identical to {@link ViewFs}. */ @InterfaceAudience.Public @InterfaceStability.Evolving /*Evolving for a release,to be changed to Stable */ public class ViewFileSystem extends FileSystem { private static final Path ROOT_PATH = new Path(Path.SEPARATOR); static AccessControlException readOnlyMountTable(final String operation, final String p) { return new AccessControlException( "InternalDir of ViewFileSystem is readonly; operation=" + operation + "Path=" + p); } static AccessControlException readOnlyMountTable(final String operation, final Path p) { return readOnlyMountTable(operation, p.toString()); } static public class MountPoint { private Path src; // the src of the mount private URI[] targets; // target of the mount; Multiple targets imply mergeMount MountPoint(Path srcPath, URI[] targetURIs) { src = srcPath; targets = targetURIs; } Path getSrc() { return src; } URI[] getTargets() { return targets; } } final long creationTime; // of the the mount table final UserGroupInformation ugi; // the user/group of user who created mtable URI myUri; private Path workingDir; Configuration config; InodeTree<FileSystem> fsState; // the fs state; ie the mount table Path homeDir = null; /** * Make the path Absolute and get the path-part of a pathname. * Checks that URI matches this file system * and that the path-part is a valid name. * * @param p path * @return path-part of the Path p */ private String getUriPath(final Path p) { checkPath(p); return makeAbsolute(p).toUri().getPath(); } private Path makeAbsolute(final Path f) { return f.isAbsolute() ? f : new Path(workingDir, f); } /** * This is the constructor with the signature needed by * {@link FileSystem#createFileSystem(URI, Configuration)} * * After this constructor is called initialize() is called. * @throws IOException */ public ViewFileSystem() throws IOException { ugi = UserGroupInformation.getCurrentUser(); creationTime = Time.now(); } /** * Return the protocol scheme for the FileSystem. * <p/> * * @return <code>viewfs</code> */ @Override public String getScheme() { return "viewfs"; } /** * Called after a new FileSystem instance is constructed. * @param theUri a uri whose authority section names the host, port, etc. for * this FileSystem * @param conf the configuration */ @Override public void initialize(final URI theUri, final Configuration conf) throws IOException { super.initialize(theUri, conf); setConf(conf); config = conf; // Now build client side view (i.e. client side mount table) from config. final String authority = theUri.getAuthority(); try { myUri = new URI(FsConstants.VIEWFS_SCHEME, authority, "/", null, null); fsState = new InodeTree<FileSystem>(conf, authority) { @Override protected FileSystem getTargetFileSystem(final URI uri) throws URISyntaxException, IOException { return new ChRootedFileSystem(uri, config); } @Override protected FileSystem getTargetFileSystem(final INodeDir<FileSystem> dir) throws URISyntaxException { return new InternalDirOfViewFs(dir, creationTime, ugi, myUri); } @Override protected FileSystem getTargetFileSystem(URI[] mergeFsURIList) throws URISyntaxException, UnsupportedFileSystemException { throw new UnsupportedFileSystemException("mergefs not implemented"); // return MergeFs.createMergeFs(mergeFsURIList, config); } }; workingDir = this.getHomeDirectory(); } catch (URISyntaxException e) { throw new IOException("URISyntax exception: " + theUri); } } /** * Convenience Constructor for apps to call directly * @param theUri which must be that of ViewFileSystem * @param conf * @throws IOException */ ViewFileSystem(final URI theUri, final Configuration conf) throws IOException { this(); initialize(theUri, conf); } /** * Convenience Constructor for apps to call directly * @param conf * @throws IOException */ public ViewFileSystem(final Configuration conf) throws IOException { this(FsConstants.VIEWFS_URI, conf); } public Path getTrashCanLocation(final Path f) throws FileNotFoundException { final InodeTree.ResolveResult<FileSystem> res = fsState.resolve(getUriPath(f), true); return res.isInternalDir() ? null : res.targetFileSystem.getHomeDirectory(); } @Override public URI getUri() { return myUri; } @Override public Path resolvePath(final Path f) throws IOException { final InodeTree.ResolveResult<FileSystem> res; res = fsState.resolve(getUriPath(f), true); if (res.isInternalDir()) { return f; } return res.targetFileSystem.resolvePath(res.remainingPath); } @Override public Path getHomeDirectory() { if (homeDir == null) { String base = fsState.getHomeDirPrefixValue(); if (base == null) { base = "/user"; } homeDir = (base.equals("/") ? this.makeQualified(new Path(base + ugi.getShortUserName())): this.makeQualified(new Path(base + "/" + ugi.getShortUserName()))); } return homeDir; } @Override public Path getWorkingDirectory() { return workingDir; } @Override public void setWorkingDirectory(final Path new_dir) { getUriPath(new_dir); // this validates the path workingDir = makeAbsolute(new_dir); } @Override public FSDataOutputStream append(final Path f, final int bufferSize, final Progressable progress) throws IOException { InodeTree.ResolveResult<FileSystem> res = fsState.resolve(getUriPath(f), true); return res.targetFileSystem.append(res.remainingPath, bufferSize, progress); } @Override public FSDataOutputStream createNonRecursive(Path f, FsPermission permission, EnumSet<CreateFlag> flags, int bufferSize, short replication, long blockSize, Progressable progress) throws IOException { InodeTree.ResolveResult<FileSystem> res; try { res = fsState.resolve(getUriPath(f), false); } catch (FileNotFoundException e) { throw readOnlyMountTable("create", f); } assert(res.remainingPath != null); return res.targetFileSystem.createNonRecursive(res.remainingPath, permission, flags, bufferSize, replication, blockSize, progress); } @Override public FSDataOutputStream create(final Path f, final FsPermission permission, final boolean overwrite, final int bufferSize, final short replication, final long blockSize, final Progressable progress) throws IOException { InodeTree.ResolveResult<FileSystem> res; try { res = fsState.resolve(getUriPath(f), false); } catch (FileNotFoundException e) { throw readOnlyMountTable("create", f); } assert(res.remainingPath != null); return res.targetFileSystem.create(res.remainingPath, permission, overwrite, bufferSize, replication, blockSize, progress); } @Override public boolean delete(final Path f, final boolean recursive) throws AccessControlException, FileNotFoundException, IOException { InodeTree.ResolveResult<FileSystem> res = fsState.resolve(getUriPath(f), true); // If internal dir or target is a mount link (ie remainingPath is Slash) if (res.isInternalDir() || res.remainingPath == InodeTree.SlashPath) { throw readOnlyMountTable("delete", f); } return res.targetFileSystem.delete(res.remainingPath, recursive); } @Override @SuppressWarnings("deprecation") public boolean delete(final Path f) throws AccessControlException, FileNotFoundException, IOException { return delete(f, true); } @Override public BlockLocation[] getFileBlockLocations(FileStatus fs, long start, long len) throws IOException { final InodeTree.ResolveResult<FileSystem> res = fsState.resolve(getUriPath(fs.getPath()), true); return res.targetFileSystem.getFileBlockLocations( new ViewFsFileStatus(fs, res.remainingPath), start, len); } @Override public FileChecksum getFileChecksum(final Path f) throws AccessControlException, FileNotFoundException, IOException { InodeTree.ResolveResult<FileSystem> res = fsState.resolve(getUriPath(f), true); return res.targetFileSystem.getFileChecksum(res.remainingPath); } private static FileStatus fixFileStatus(FileStatus orig, Path qualified) throws IOException { // FileStatus#getPath is a fully qualified path relative to the root of // target file system. // We need to change it to viewfs URI - relative to root of mount table. // The implementors of RawLocalFileSystem were trying to be very smart. // They implement FileStatus#getOwner lazily -- the object // returned is really a RawLocalFileSystem that expect the // FileStatus#getPath to be unchanged so that it can get owner when needed. // Hence we need to interpose a new ViewFileSystemFileStatus that // works around. if ("file".equals(orig.getPath().toUri().getScheme())) { orig = wrapLocalFileStatus(orig, qualified); } orig.setPath(qualified); return orig; } private static FileStatus wrapLocalFileStatus(FileStatus orig, Path qualified) { return orig instanceof LocatedFileStatus ? new ViewFsLocatedFileStatus((LocatedFileStatus)orig, qualified) : new ViewFsFileStatus(orig, qualified); } @Override public FileStatus getFileStatus(final Path f) throws AccessControlException, FileNotFoundException, IOException { InodeTree.ResolveResult<FileSystem> res = fsState.resolve(getUriPath(f), true); FileStatus status = res.targetFileSystem.getFileStatus(res.remainingPath); return fixFileStatus(status, this.makeQualified(f)); } @Override public void access(Path path, FsAction mode) throws AccessControlException, FileNotFoundException, IOException { InodeTree.ResolveResult<FileSystem> res = fsState.resolve(getUriPath(path), true); res.targetFileSystem.access(res.remainingPath, mode); } @Override public FileStatus[] listStatus(final Path f) throws AccessControlException, FileNotFoundException, IOException { InodeTree.ResolveResult<FileSystem> res = fsState.resolve(getUriPath(f), true); FileStatus[] statusLst = res.targetFileSystem.listStatus(res.remainingPath); if (!res.isInternalDir()) { // We need to change the name in the FileStatus as described in // {@link #getFileStatus } int i = 0; for (FileStatus status : statusLst) { statusLst[i++] = fixFileStatus(status, getChrootedPath(res, status, f)); } } return statusLst; } @Override public RemoteIterator<LocatedFileStatus>listLocatedStatus(final Path f, final PathFilter filter) throws FileNotFoundException, IOException { final InodeTree.ResolveResult<FileSystem> res = fsState .resolve(getUriPath(f), true); final RemoteIterator<LocatedFileStatus> statusIter = res.targetFileSystem .listLocatedStatus(res.remainingPath); if (res.isInternalDir()) { return statusIter; } return new RemoteIterator<LocatedFileStatus>() { @Override public boolean hasNext() throws IOException { return statusIter.hasNext(); } @Override public LocatedFileStatus next() throws IOException { final LocatedFileStatus status = statusIter.next(); return (LocatedFileStatus)fixFileStatus(status, getChrootedPath(res, status, f)); } }; } private Path getChrootedPath(InodeTree.ResolveResult<FileSystem> res, FileStatus status, Path f) throws IOException { final String suffix = ((ChRootedFileSystem)res.targetFileSystem) .stripOutRoot(status.getPath()); return this.makeQualified( suffix.length() == 0 ? f : new Path(res.resolvedPath, suffix)); } @Override public boolean mkdirs(final Path dir, final FsPermission permission) throws IOException { InodeTree.ResolveResult<FileSystem> res = fsState.resolve(getUriPath(dir), false); return res.targetFileSystem.mkdirs(res.remainingPath, permission); } @Override public FSDataInputStream open(final Path f, final int bufferSize) throws AccessControlException, FileNotFoundException, IOException { InodeTree.ResolveResult<FileSystem> res = fsState.resolve(getUriPath(f), true); return res.targetFileSystem.open(res.remainingPath, bufferSize); } @Override public boolean rename(final Path src, final Path dst) throws IOException { // passing resolveLastComponet as false to catch renaming a mount point to // itself. We need to catch this as an internal operation and fail. InodeTree.ResolveResult<FileSystem> resSrc = fsState.resolve(getUriPath(src), false); if (resSrc.isInternalDir()) { throw readOnlyMountTable("rename", src); } InodeTree.ResolveResult<FileSystem> resDst = fsState.resolve(getUriPath(dst), false); if (resDst.isInternalDir()) { throw readOnlyMountTable("rename", dst); } /** // Alternate 1: renames within same file system - valid but we disallow // Alternate 2: (as described in next para - valid but we have disallowed it // // Note we compare the URIs. the URIs include the link targets. // hence we allow renames across mount links as long as the mount links // point to the same target. if (!resSrc.targetFileSystem.getUri().equals( resDst.targetFileSystem.getUri())) { throw new IOException("Renames across Mount points not supported"); } */ // // Alternate 3 : renames ONLY within the the same mount links. // if (resSrc.targetFileSystem !=resDst.targetFileSystem) { throw new IOException("Renames across Mount points not supported"); } return resSrc.targetFileSystem.rename(resSrc.remainingPath, resDst.remainingPath); } @Override public boolean truncate(final Path f, final long newLength) throws IOException { InodeTree.ResolveResult<FileSystem> res = fsState.resolve(getUriPath(f), true); return res.targetFileSystem.truncate(f, newLength); } @Override public void setOwner(final Path f, final String username, final String groupname) throws AccessControlException, FileNotFoundException, IOException { InodeTree.ResolveResult<FileSystem> res = fsState.resolve(getUriPath(f), true); res.targetFileSystem.setOwner(res.remainingPath, username, groupname); } @Override public void setPermission(final Path f, final FsPermission permission) throws AccessControlException, FileNotFoundException, IOException { InodeTree.ResolveResult<FileSystem> res = fsState.resolve(getUriPath(f), true); res.targetFileSystem.setPermission(res.remainingPath, permission); } @Override public boolean setReplication(final Path f, final short replication) throws AccessControlException, FileNotFoundException, IOException { InodeTree.ResolveResult<FileSystem> res = fsState.resolve(getUriPath(f), true); return res.targetFileSystem.setReplication(res.remainingPath, replication); } @Override public void setTimes(final Path f, final long mtime, final long atime) throws AccessControlException, FileNotFoundException, IOException { InodeTree.ResolveResult<FileSystem> res = fsState.resolve(getUriPath(f), true); res.targetFileSystem.setTimes(res.remainingPath, mtime, atime); } @Override public void modifyAclEntries(Path path, List<AclEntry> aclSpec) throws IOException { InodeTree.ResolveResult<FileSystem> res = fsState.resolve(getUriPath(path), true); res.targetFileSystem.modifyAclEntries(res.remainingPath, aclSpec); } @Override public void removeAclEntries(Path path, List<AclEntry> aclSpec) throws IOException { InodeTree.ResolveResult<FileSystem> res = fsState.resolve(getUriPath(path), true); res.targetFileSystem.removeAclEntries(res.remainingPath, aclSpec); } @Override public void removeDefaultAcl(Path path) throws IOException { InodeTree.ResolveResult<FileSystem> res = fsState.resolve(getUriPath(path), true); res.targetFileSystem.removeDefaultAcl(res.remainingPath); } @Override public void removeAcl(Path path) throws IOException { InodeTree.ResolveResult<FileSystem> res = fsState.resolve(getUriPath(path), true); res.targetFileSystem.removeAcl(res.remainingPath); } @Override public void setAcl(Path path, List<AclEntry> aclSpec) throws IOException { InodeTree.ResolveResult<FileSystem> res = fsState.resolve(getUriPath(path), true); res.targetFileSystem.setAcl(res.remainingPath, aclSpec); } @Override public AclStatus getAclStatus(Path path) throws IOException { InodeTree.ResolveResult<FileSystem> res = fsState.resolve(getUriPath(path), true); return res.targetFileSystem.getAclStatus(res.remainingPath); } @Override public void setXAttr(Path path, String name, byte[] value, EnumSet<XAttrSetFlag> flag) throws IOException { InodeTree.ResolveResult<FileSystem> res = fsState.resolve(getUriPath(path), true); res.targetFileSystem.setXAttr(res.remainingPath, name, value, flag); } @Override public byte[] getXAttr(Path path, String name) throws IOException { InodeTree.ResolveResult<FileSystem> res = fsState.resolve(getUriPath(path), true); return res.targetFileSystem.getXAttr(res.remainingPath, name); } @Override public Map<String, byte[]> getXAttrs(Path path) throws IOException { InodeTree.ResolveResult<FileSystem> res = fsState.resolve(getUriPath(path), true); return res.targetFileSystem.getXAttrs(res.remainingPath); } @Override public Map<String, byte[]> getXAttrs(Path path, List<String> names) throws IOException { InodeTree.ResolveResult<FileSystem> res = fsState.resolve(getUriPath(path), true); return res.targetFileSystem.getXAttrs(res.remainingPath, names); } @Override public List<String> listXAttrs(Path path) throws IOException { InodeTree.ResolveResult<FileSystem> res = fsState.resolve(getUriPath(path), true); return res.targetFileSystem.listXAttrs(res.remainingPath); } @Override public void removeXAttr(Path path, String name) throws IOException { InodeTree.ResolveResult<FileSystem> res = fsState.resolve(getUriPath(path), true); res.targetFileSystem.removeXAttr(res.remainingPath, name); } @Override public void setVerifyChecksum(final boolean verifyChecksum) { List<InodeTree.MountPoint<FileSystem>> mountPoints = fsState.getMountPoints(); for (InodeTree.MountPoint<FileSystem> mount : mountPoints) { mount.target.targetFileSystem.setVerifyChecksum(verifyChecksum); } } @Override public long getDefaultBlockSize() { throw new NotInMountpointException("getDefaultBlockSize"); } @Override public short getDefaultReplication() { throw new NotInMountpointException("getDefaultReplication"); } @Override public FsServerDefaults getServerDefaults() throws IOException { throw new NotInMountpointException("getServerDefaults"); } @Override public long getDefaultBlockSize(Path f) { try { InodeTree.ResolveResult<FileSystem> res = fsState.resolve(getUriPath(f), true); return res.targetFileSystem.getDefaultBlockSize(res.remainingPath); } catch (FileNotFoundException e) { throw new NotInMountpointException(f, "getDefaultBlockSize"); } } @Override public short getDefaultReplication(Path f) { try { InodeTree.ResolveResult<FileSystem> res = fsState.resolve(getUriPath(f), true); return res.targetFileSystem.getDefaultReplication(res.remainingPath); } catch (FileNotFoundException e) { throw new NotInMountpointException(f, "getDefaultReplication"); } } @Override public FsServerDefaults getServerDefaults(Path f) throws IOException { InodeTree.ResolveResult<FileSystem> res = fsState.resolve(getUriPath(f), true); return res.targetFileSystem.getServerDefaults(res.remainingPath); } @Override public ContentSummary getContentSummary(Path f) throws IOException { InodeTree.ResolveResult<FileSystem> res = fsState.resolve(getUriPath(f), true); return res.targetFileSystem.getContentSummary(res.remainingPath); } @Override public void setWriteChecksum(final boolean writeChecksum) { List<InodeTree.MountPoint<FileSystem>> mountPoints = fsState.getMountPoints(); for (InodeTree.MountPoint<FileSystem> mount : mountPoints) { mount.target.targetFileSystem.setWriteChecksum(writeChecksum); } } @Override public FileSystem[] getChildFileSystems() { List<InodeTree.MountPoint<FileSystem>> mountPoints = fsState.getMountPoints(); Set<FileSystem> children = new HashSet<FileSystem>(); for (InodeTree.MountPoint<FileSystem> mountPoint : mountPoints) { FileSystem targetFs = mountPoint.target.targetFileSystem; children.addAll(Arrays.asList(targetFs.getChildFileSystems())); } return children.toArray(new FileSystem[]{}); } public MountPoint[] getMountPoints() { List<InodeTree.MountPoint<FileSystem>> mountPoints = fsState.getMountPoints(); MountPoint[] result = new MountPoint[mountPoints.size()]; for ( int i = 0; i < mountPoints.size(); ++i ) { result[i] = new MountPoint(new Path(mountPoints.get(i).src), mountPoints.get(i).target.targetDirLinkList); } return result; } @Override public Path createSnapshot(Path path, String snapshotName) throws IOException { InodeTree.ResolveResult<FileSystem> res = fsState.resolve(getUriPath(path), true); return res.targetFileSystem.createSnapshot(res.remainingPath, snapshotName); } @Override public void renameSnapshot(Path path, String snapshotOldName, String snapshotNewName) throws IOException { InodeTree.ResolveResult<FileSystem> res = fsState.resolve(getUriPath(path), true); res.targetFileSystem.renameSnapshot(res.remainingPath, snapshotOldName, snapshotNewName); } @Override public void deleteSnapshot(Path path, String snapshotName) throws IOException { InodeTree.ResolveResult<FileSystem> res = fsState.resolve(getUriPath(path), true); res.targetFileSystem.deleteSnapshot(res.remainingPath, snapshotName); } /* * An instance of this class represents an internal dir of the viewFs * that is internal dir of the mount table. * It is a read only mount tables and create, mkdir or delete operations * are not allowed. * If called on create or mkdir then this target is the parent of the * directory in which one is trying to create or mkdir; hence * in this case the path name passed in is the last component. * Otherwise this target is the end point of the path and hence * the path name passed in is null. */ static class InternalDirOfViewFs extends FileSystem { final InodeTree.INodeDir<FileSystem> theInternalDir; final long creationTime; // of the the mount table final UserGroupInformation ugi; // the user/group of user who created mtable final URI myUri; public InternalDirOfViewFs(final InodeTree.INodeDir<FileSystem> dir, final long cTime, final UserGroupInformation ugi, URI uri) throws URISyntaxException { myUri = uri; try { initialize(myUri, new Configuration()); } catch (IOException e) { throw new RuntimeException("Cannot occur"); } theInternalDir = dir; creationTime = cTime; this.ugi = ugi; } static private void checkPathIsSlash(final Path f) throws IOException { if (f != InodeTree.SlashPath) { throw new IOException ( "Internal implementation error: expected file name to be /" ); } } @Override public URI getUri() { return myUri; } @Override public Path getWorkingDirectory() { throw new RuntimeException ( "Internal impl error: getWorkingDir should not have been called" ); } @Override public void setWorkingDirectory(final Path new_dir) { throw new RuntimeException ( "Internal impl error: getWorkingDir should not have been called" ); } @Override public FSDataOutputStream append(final Path f, final int bufferSize, final Progressable progress) throws IOException { throw readOnlyMountTable("append", f); } @Override public FSDataOutputStream create(final Path f, final FsPermission permission, final boolean overwrite, final int bufferSize, final short replication, final long blockSize, final Progressable progress) throws AccessControlException { throw readOnlyMountTable("create", f); } @Override public boolean delete(final Path f, final boolean recursive) throws AccessControlException, IOException { checkPathIsSlash(f); throw readOnlyMountTable("delete", f); } @Override @SuppressWarnings("deprecation") public boolean delete(final Path f) throws AccessControlException, IOException { return delete(f, true); } @Override public BlockLocation[] getFileBlockLocations(final FileStatus fs, final long start, final long len) throws FileNotFoundException, IOException { checkPathIsSlash(fs.getPath()); throw new FileNotFoundException("Path points to dir not a file"); } @Override public FileChecksum getFileChecksum(final Path f) throws FileNotFoundException, IOException { checkPathIsSlash(f); throw new FileNotFoundException("Path points to dir not a file"); } @Override public FileStatus getFileStatus(Path f) throws IOException { checkPathIsSlash(f); return new FileStatus(0, true, 0, 0, creationTime, creationTime, PERMISSION_555, ugi.getUserName(), ugi.getGroupNames()[0], new Path(theInternalDir.fullPath).makeQualified( myUri, ROOT_PATH)); } @Override public FileStatus[] listStatus(Path f) throws AccessControlException, FileNotFoundException, IOException { checkPathIsSlash(f); FileStatus[] result = new FileStatus[theInternalDir.children.size()]; int i = 0; for (Entry<String, INode<FileSystem>> iEntry : theInternalDir.children.entrySet()) { INode<FileSystem> inode = iEntry.getValue(); if (inode instanceof INodeLink ) { INodeLink<FileSystem> link = (INodeLink<FileSystem>) inode; result[i++] = new FileStatus(0, false, 0, 0, creationTime, creationTime, PERMISSION_555, ugi.getUserName(), ugi.getGroupNames()[0], link.getTargetLink(), new Path(inode.fullPath).makeQualified( myUri, null)); } else { result[i++] = new FileStatus(0, true, 0, 0, creationTime, creationTime, PERMISSION_555, ugi.getUserName(), ugi.getGroupNames()[0], new Path(inode.fullPath).makeQualified( myUri, null)); } } return result; } @Override public boolean mkdirs(Path dir, FsPermission permission) throws AccessControlException, FileAlreadyExistsException { if (theInternalDir.isRoot && dir == null) { throw new FileAlreadyExistsException("/ already exits"); } // Note dir starts with / if (theInternalDir.children.containsKey(dir.toString().substring(1))) { return true; // this is the stupid semantics of FileSystem } throw readOnlyMountTable("mkdirs", dir); } @Override public FSDataInputStream open(Path f, int bufferSize) throws AccessControlException, FileNotFoundException, IOException { checkPathIsSlash(f); throw new FileNotFoundException("Path points to dir not a file"); } @Override public boolean rename(Path src, Path dst) throws AccessControlException, IOException { checkPathIsSlash(src); checkPathIsSlash(dst); throw readOnlyMountTable("rename", src); } @Override public boolean truncate(Path f, long newLength) throws IOException { throw readOnlyMountTable("truncate", f); } @Override public void setOwner(Path f, String username, String groupname) throws AccessControlException, IOException { checkPathIsSlash(f); throw readOnlyMountTable("setOwner", f); } @Override public void setPermission(Path f, FsPermission permission) throws AccessControlException, IOException { checkPathIsSlash(f); throw readOnlyMountTable("setPermission", f); } @Override public boolean setReplication(Path f, short replication) throws AccessControlException, IOException { checkPathIsSlash(f); throw readOnlyMountTable("setReplication", f); } @Override public void setTimes(Path f, long mtime, long atime) throws AccessControlException, IOException { checkPathIsSlash(f); throw readOnlyMountTable("setTimes", f); } @Override public void setVerifyChecksum(boolean verifyChecksum) { // Noop for viewfs } @Override public FsServerDefaults getServerDefaults(Path f) throws IOException { throw new NotInMountpointException(f, "getServerDefaults"); } @Override public long getDefaultBlockSize(Path f) { throw new NotInMountpointException(f, "getDefaultBlockSize"); } @Override public short getDefaultReplication(Path f) { throw new NotInMountpointException(f, "getDefaultReplication"); } @Override public void modifyAclEntries(Path path, List<AclEntry> aclSpec) throws IOException { checkPathIsSlash(path); throw readOnlyMountTable("modifyAclEntries", path); } @Override public void removeAclEntries(Path path, List<AclEntry> aclSpec) throws IOException { checkPathIsSlash(path); throw readOnlyMountTable("removeAclEntries", path); } @Override public void removeDefaultAcl(Path path) throws IOException { checkPathIsSlash(path); throw readOnlyMountTable("removeDefaultAcl", path); } @Override public void removeAcl(Path path) throws IOException { checkPathIsSlash(path); throw readOnlyMountTable("removeAcl", path); } @Override public void setAcl(Path path, List<AclEntry> aclSpec) throws IOException { checkPathIsSlash(path); throw readOnlyMountTable("setAcl", path); } @Override public AclStatus getAclStatus(Path path) throws IOException { checkPathIsSlash(path); return new AclStatus.Builder().owner(ugi.getUserName()) .group(ugi.getGroupNames()[0]) .addEntries(AclUtil.getMinimalAcl(PERMISSION_555)) .stickyBit(false).build(); } @Override public void setXAttr(Path path, String name, byte[] value, EnumSet<XAttrSetFlag> flag) throws IOException { checkPathIsSlash(path); throw readOnlyMountTable("setXAttr", path); } @Override public byte[] getXAttr(Path path, String name) throws IOException { throw new NotInMountpointException(path, "getXAttr"); } @Override public Map<String, byte[]> getXAttrs(Path path) throws IOException { throw new NotInMountpointException(path, "getXAttrs"); } @Override public Map<String, byte[]> getXAttrs(Path path, List<String> names) throws IOException { throw new NotInMountpointException(path, "getXAttrs"); } @Override public List<String> listXAttrs(Path path) throws IOException { throw new NotInMountpointException(path, "listXAttrs"); } @Override public void removeXAttr(Path path, String name) throws IOException { checkPathIsSlash(path); throw readOnlyMountTable("removeXAttr", path); } @Override public Path createSnapshot(Path path, String snapshotName) throws IOException { checkPathIsSlash(path); throw readOnlyMountTable("createSnapshot", path); } @Override public void renameSnapshot(Path path, String snapshotOldName, String snapshotNewName) throws IOException { checkPathIsSlash(path); throw readOnlyMountTable("renameSnapshot", path); } @Override public void deleteSnapshot(Path path, String snapshotName) throws IOException { checkPathIsSlash(path); throw readOnlyMountTable("deleteSnapshot", path); } } }
36,229
32.828198
85
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFsFileStatus.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.viewfs; import java.io.IOException; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; /** * This class is needed to address the problem described in * {@link ViewFileSystem#getFileStatus(org.apache.hadoop.fs.Path)} and * {@link ViewFs#getFileStatus(org.apache.hadoop.fs.Path)} */ class ViewFsFileStatus extends FileStatus { final FileStatus myFs; Path modifiedPath; ViewFsFileStatus(FileStatus fs, Path newPath) { myFs = fs; modifiedPath = newPath; } @Override public boolean equals(Object o) { return super.equals(o); } @Override public int hashCode() { return super.hashCode(); } @Override public long getLen() { return myFs.getLen(); } @Override public boolean isFile() { return myFs.isFile(); } @Override public boolean isDirectory() { return myFs.isDirectory(); } @Override @SuppressWarnings("deprecation") public boolean isDir() { return myFs.isDirectory(); } @Override public boolean isSymlink() { return myFs.isSymlink(); } @Override public long getBlockSize() { return myFs.getBlockSize(); } @Override public short getReplication() { return myFs.getReplication(); } @Override public long getModificationTime() { return myFs.getModificationTime(); } @Override public long getAccessTime() { return myFs.getAccessTime(); } @Override public FsPermission getPermission() { return myFs.getPermission(); } @Override public String getOwner() { return myFs.getOwner(); } @Override public String getGroup() { return myFs.getGroup(); } @Override public Path getPath() { return modifiedPath; } @Override public void setPath(final Path p) { modifiedPath = p; } @Override public Path getSymlink() throws IOException { return myFs.getSymlink(); } }
2,873
21.629921
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.viewfs; import static org.apache.hadoop.fs.viewfs.Constants.PERMISSION_555; import java.io.FileNotFoundException; import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; import java.util.ArrayList; import java.util.EnumSet; import java.util.List; import java.util.Map; import java.util.Map.Entry; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.AbstractFileSystem; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.BlockStoragePolicySpi; import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileChecksum; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FsConstants; import org.apache.hadoop.fs.FsServerDefaults; import org.apache.hadoop.fs.FsStatus; import org.apache.hadoop.fs.Options.ChecksumOpt; import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.UnsupportedFileSystemException; import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.local.LocalConfigKeys; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclUtil; import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.viewfs.InodeTree.INode; import org.apache.hadoop.fs.viewfs.InodeTree.INodeLink; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.Progressable; import org.apache.hadoop.util.Time; /** * ViewFs (extends the AbstractFileSystem interface) implements a client-side * mount table. The viewFs file system is implemented completely in memory on * the client side. The client-side mount table allows a client to provide a * customized view of a file system namespace that is composed from * one or more individual file systems (a localFs or Hdfs, S3fs, etc). * For example one could have a mount table that provides links such as * <ul> * <li> /user -> hdfs://nnContainingUserDir/user * <li> /project/foo -> hdfs://nnProject1/projects/foo * <li> /project/bar -> hdfs://nnProject2/projects/bar * <li> /tmp -> hdfs://nnTmp/privateTmpForUserXXX * </ul> * * ViewFs is specified with the following URI: <b>viewfs:///</b> * <p> * To use viewfs one would typically set the default file system in the * config (i.e. fs.default.name< = viewfs:///) along with the * mount table config variables as described below. * * <p> * <b> ** Config variables to specify the mount table entries ** </b> * <p> * * The file system is initialized from the standard Hadoop config through * config variables. * See {@link FsConstants} for URI and Scheme constants; * See {@link Constants} for config var constants; * see {@link ConfigUtil} for convenient lib. * * <p> * All the mount table config entries for view fs are prefixed by * <b>fs.viewfs.mounttable.</b> * For example the above example can be specified with the following * config variables: * <ul> * <li> fs.viewfs.mounttable.default.link./user= * hdfs://nnContainingUserDir/user * <li> fs.viewfs.mounttable.default.link./project/foo= * hdfs://nnProject1/projects/foo * <li> fs.viewfs.mounttable.default.link./project/bar= * hdfs://nnProject2/projects/bar * <li> fs.viewfs.mounttable.default.link./tmp= * hdfs://nnTmp/privateTmpForUserXXX * </ul> * * The default mount table (when no authority is specified) is * from config variables prefixed by <b>fs.viewFs.mounttable.default </b> * The authority component of a URI can be used to specify a different mount * table. For example, * <ul> * <li> viewfs://sanjayMountable/ * </ul> * is initialized from fs.viewFs.mounttable.sanjayMountable.* config variables. * * <p> * <b> **** Merge Mounts **** </b>(NOTE: merge mounts are not implemented yet.) * <p> * * One can also use "MergeMounts" to merge several directories (this is * sometimes called union-mounts or junction-mounts in the literature. * For example of the home directories are stored on say two file systems * (because they do not fit on one) then one could specify a mount * entry such as following merges two dirs: * <ul> * <li> /user -> hdfs://nnUser1/user,hdfs://nnUser2/user * </ul> * Such a mergeLink can be specified with the following config var where "," * is used as the separator for each of links to be merged: * <ul> * <li> fs.viewfs.mounttable.default.linkMerge./user= * hdfs://nnUser1/user,hdfs://nnUser1/user * </ul> * A special case of the merge mount is where mount table's root is merged * with the root (slash) of another file system: * <ul> * <li> fs.viewfs.mounttable.default.linkMergeSlash=hdfs://nn99/ * </ul> * In this cases the root of the mount table is merged with the root of * <b>hdfs://nn99/ </b> */ @InterfaceAudience.Public @InterfaceStability.Evolving /*Evolving for a release,to be changed to Stable */ public class ViewFs extends AbstractFileSystem { final long creationTime; // of the the mount table final UserGroupInformation ugi; // the user/group of user who created mtable final Configuration config; InodeTree<AbstractFileSystem> fsState; // the fs state; ie the mount table Path homeDir = null; static AccessControlException readOnlyMountTable(final String operation, final String p) { return new AccessControlException( "InternalDir of ViewFileSystem is readonly; operation=" + operation + "Path=" + p); } static AccessControlException readOnlyMountTable(final String operation, final Path p) { return readOnlyMountTable(operation, p.toString()); } static public class MountPoint { private Path src; // the src of the mount private URI[] targets; // target of the mount; Multiple targets imply mergeMount MountPoint(Path srcPath, URI[] targetURIs) { src = srcPath; targets = targetURIs; } Path getSrc() { return src; } URI[] getTargets() { return targets; } } public ViewFs(final Configuration conf) throws IOException, URISyntaxException { this(FsConstants.VIEWFS_URI, conf); } /** * This constructor has the signature needed by * {@link AbstractFileSystem#createFileSystem(URI, Configuration)}. * * @param theUri which must be that of ViewFs * @param conf * @throws IOException * @throws URISyntaxException */ ViewFs(final URI theUri, final Configuration conf) throws IOException, URISyntaxException { super(theUri, FsConstants.VIEWFS_SCHEME, false, -1); creationTime = Time.now(); ugi = UserGroupInformation.getCurrentUser(); config = conf; // Now build client side view (i.e. client side mount table) from config. String authority = theUri.getAuthority(); fsState = new InodeTree<AbstractFileSystem>(conf, authority) { @Override protected AbstractFileSystem getTargetFileSystem(final URI uri) throws URISyntaxException, UnsupportedFileSystemException { String pathString = uri.getPath(); if (pathString.isEmpty()) { pathString = "/"; } return new ChRootedFs( AbstractFileSystem.createFileSystem(uri, config), new Path(pathString)); } @Override protected AbstractFileSystem getTargetFileSystem( final INodeDir<AbstractFileSystem> dir) throws URISyntaxException { return new InternalDirOfViewFs(dir, creationTime, ugi, getUri()); } @Override protected AbstractFileSystem getTargetFileSystem(URI[] mergeFsURIList) throws URISyntaxException, UnsupportedFileSystemException { throw new UnsupportedFileSystemException("mergefs not implemented yet"); // return MergeFs.createMergeFs(mergeFsURIList, config); } }; } @Override public FsServerDefaults getServerDefaults() throws IOException { return LocalConfigKeys.getServerDefaults(); } @Override public int getUriDefaultPort() { return -1; } @Override public Path getHomeDirectory() { if (homeDir == null) { String base = fsState.getHomeDirPrefixValue(); if (base == null) { base = "/user"; } homeDir = (base.equals("/") ? this.makeQualified(new Path(base + ugi.getShortUserName())): this.makeQualified(new Path(base + "/" + ugi.getShortUserName()))); } return homeDir; } @Override public Path resolvePath(final Path f) throws FileNotFoundException, AccessControlException, UnresolvedLinkException, IOException { final InodeTree.ResolveResult<AbstractFileSystem> res; res = fsState.resolve(getUriPath(f), true); if (res.isInternalDir()) { return f; } return res.targetFileSystem.resolvePath(res.remainingPath); } @Override public FSDataOutputStream createInternal(final Path f, final EnumSet<CreateFlag> flag, final FsPermission absolutePermission, final int bufferSize, final short replication, final long blockSize, final Progressable progress, final ChecksumOpt checksumOpt, final boolean createParent) throws AccessControlException, FileAlreadyExistsException, FileNotFoundException, ParentNotDirectoryException, UnsupportedFileSystemException, UnresolvedLinkException, IOException { InodeTree.ResolveResult<AbstractFileSystem> res; try { res = fsState.resolve(getUriPath(f), false); } catch (FileNotFoundException e) { if (createParent) { throw readOnlyMountTable("create", f); } else { throw e; } } assert(res.remainingPath != null); return res.targetFileSystem.createInternal(res.remainingPath, flag, absolutePermission, bufferSize, replication, blockSize, progress, checksumOpt, createParent); } @Override public boolean delete(final Path f, final boolean recursive) throws AccessControlException, FileNotFoundException, UnresolvedLinkException, IOException { InodeTree.ResolveResult<AbstractFileSystem> res = fsState.resolve(getUriPath(f), true); // If internal dir or target is a mount link (ie remainingPath is Slash) if (res.isInternalDir() || res.remainingPath == InodeTree.SlashPath) { throw new AccessControlException( "Cannot delete internal mount table directory: " + f); } return res.targetFileSystem.delete(res.remainingPath, recursive); } @Override public BlockLocation[] getFileBlockLocations(final Path f, final long start, final long len) throws AccessControlException, FileNotFoundException, UnresolvedLinkException, IOException { InodeTree.ResolveResult<AbstractFileSystem> res = fsState.resolve(getUriPath(f), true); return res.targetFileSystem.getFileBlockLocations(res.remainingPath, start, len); } @Override public FileChecksum getFileChecksum(final Path f) throws AccessControlException, FileNotFoundException, UnresolvedLinkException, IOException { InodeTree.ResolveResult<AbstractFileSystem> res = fsState.resolve(getUriPath(f), true); return res.targetFileSystem.getFileChecksum(res.remainingPath); } @Override public FileStatus getFileStatus(final Path f) throws AccessControlException, FileNotFoundException, UnresolvedLinkException, IOException { InodeTree.ResolveResult<AbstractFileSystem> res = fsState.resolve(getUriPath(f), true); // FileStatus#getPath is a fully qualified path relative to the root of // target file system. // We need to change it to viewfs URI - relative to root of mount table. // The implementors of RawLocalFileSystem were trying to be very smart. // They implement FileStatus#getOwener lazily -- the object // returned is really a RawLocalFileSystem that expect the // FileStatus#getPath to be unchanged so that it can get owner when needed. // Hence we need to interpose a new ViewFsFileStatus that works around. FileStatus status = res.targetFileSystem.getFileStatus(res.remainingPath); return new ViewFsFileStatus(status, this.makeQualified(f)); } @Override public void access(Path path, FsAction mode) throws AccessControlException, FileNotFoundException, UnresolvedLinkException, IOException { InodeTree.ResolveResult<AbstractFileSystem> res = fsState.resolve(getUriPath(path), true); res.targetFileSystem.access(res.remainingPath, mode); } @Override public FileStatus getFileLinkStatus(final Path f) throws AccessControlException, FileNotFoundException, UnsupportedFileSystemException, IOException { InodeTree.ResolveResult<AbstractFileSystem> res = fsState.resolve(getUriPath(f), false); // do not follow mount link return res.targetFileSystem.getFileLinkStatus(res.remainingPath); } @Override public FsStatus getFsStatus() throws AccessControlException, FileNotFoundException, IOException { return new FsStatus(0, 0, 0); } @Override public RemoteIterator<FileStatus> listStatusIterator(final Path f) throws AccessControlException, FileNotFoundException, UnresolvedLinkException, IOException { final InodeTree.ResolveResult<AbstractFileSystem> res = fsState.resolve(getUriPath(f), true); final RemoteIterator<FileStatus> fsIter = res.targetFileSystem.listStatusIterator(res.remainingPath); if (res.isInternalDir()) { return fsIter; } return new RemoteIterator<FileStatus>() { final RemoteIterator<FileStatus> myIter; final ChRootedFs targetFs; { // Init myIter = fsIter; targetFs = (ChRootedFs) res.targetFileSystem; } @Override public boolean hasNext() throws IOException { return myIter.hasNext(); } @Override public FileStatus next() throws IOException { FileStatus status = myIter.next(); String suffix = targetFs.stripOutRoot(status.getPath()); return new ViewFsFileStatus(status, makeQualified( suffix.length() == 0 ? f : new Path(res.resolvedPath, suffix))); } }; } @Override public FileStatus[] listStatus(final Path f) throws AccessControlException, FileNotFoundException, UnresolvedLinkException, IOException { InodeTree.ResolveResult<AbstractFileSystem> res = fsState.resolve(getUriPath(f), true); FileStatus[] statusLst = res.targetFileSystem.listStatus(res.remainingPath); if (!res.isInternalDir()) { // We need to change the name in the FileStatus as described in // {@link #getFileStatus } ChRootedFs targetFs; targetFs = (ChRootedFs) res.targetFileSystem; int i = 0; for (FileStatus status : statusLst) { String suffix = targetFs.stripOutRoot(status.getPath()); statusLst[i++] = new ViewFsFileStatus(status, this.makeQualified( suffix.length() == 0 ? f : new Path(res.resolvedPath, suffix))); } } return statusLst; } @Override public void mkdir(final Path dir, final FsPermission permission, final boolean createParent) throws AccessControlException, FileAlreadyExistsException, FileNotFoundException, UnresolvedLinkException, IOException { InodeTree.ResolveResult<AbstractFileSystem> res = fsState.resolve(getUriPath(dir), false); res.targetFileSystem.mkdir(res.remainingPath, permission, createParent); } @Override public FSDataInputStream open(final Path f, final int bufferSize) throws AccessControlException, FileNotFoundException, UnresolvedLinkException, IOException { InodeTree.ResolveResult<AbstractFileSystem> res = fsState.resolve(getUriPath(f), true); return res.targetFileSystem.open(res.remainingPath, bufferSize); } @Override public boolean truncate(final Path f, final long newLength) throws AccessControlException, FileNotFoundException, UnresolvedLinkException, IOException { InodeTree.ResolveResult<AbstractFileSystem> res = fsState.resolve(getUriPath(f), true); return res.targetFileSystem.truncate(res.remainingPath, newLength); } @Override public void renameInternal(final Path src, final Path dst, final boolean overwrite) throws IOException, UnresolvedLinkException { // passing resolveLastComponet as false to catch renaming a mount point // itself we need to catch this as an internal operation and fail. InodeTree.ResolveResult<AbstractFileSystem> resSrc = fsState.resolve(getUriPath(src), false); if (resSrc.isInternalDir()) { throw new AccessControlException( "Cannot Rename within internal dirs of mount table: it is readOnly"); } InodeTree.ResolveResult<AbstractFileSystem> resDst = fsState.resolve(getUriPath(dst), false); if (resDst.isInternalDir()) { throw new AccessControlException( "Cannot Rename within internal dirs of mount table: it is readOnly"); } /** // Alternate 1: renames within same file system - valid but we disallow // Alternate 2: (as described in next para - valid but we have disallowed it // // Note we compare the URIs. the URIs include the link targets. // hence we allow renames across mount links as long as the mount links // point to the same target. if (!resSrc.targetFileSystem.getUri().equals( resDst.targetFileSystem.getUri())) { throw new IOException("Renames across Mount points not supported"); } */ // // Alternate 3 : renames ONLY within the the same mount links. // if (resSrc.targetFileSystem !=resDst.targetFileSystem) { throw new IOException("Renames across Mount points not supported"); } resSrc.targetFileSystem.renameInternal(resSrc.remainingPath, resDst.remainingPath, overwrite); } @Override public void renameInternal(final Path src, final Path dst) throws AccessControlException, FileAlreadyExistsException, FileNotFoundException, ParentNotDirectoryException, UnresolvedLinkException, IOException { renameInternal(src, dst, false); } @Override public boolean supportsSymlinks() { return true; } @Override public void createSymlink(final Path target, final Path link, final boolean createParent) throws IOException, UnresolvedLinkException { InodeTree.ResolveResult<AbstractFileSystem> res; try { res = fsState.resolve(getUriPath(link), false); } catch (FileNotFoundException e) { if (createParent) { throw readOnlyMountTable("createSymlink", link); } else { throw e; } } assert(res.remainingPath != null); res.targetFileSystem.createSymlink(target, res.remainingPath, createParent); } @Override public Path getLinkTarget(final Path f) throws IOException { InodeTree.ResolveResult<AbstractFileSystem> res = fsState.resolve(getUriPath(f), false); // do not follow mount link return res.targetFileSystem.getLinkTarget(res.remainingPath); } @Override public void setOwner(final Path f, final String username, final String groupname) throws AccessControlException, FileNotFoundException, UnresolvedLinkException, IOException { InodeTree.ResolveResult<AbstractFileSystem> res = fsState.resolve(getUriPath(f), true); res.targetFileSystem.setOwner(res.remainingPath, username, groupname); } @Override public void setPermission(final Path f, final FsPermission permission) throws AccessControlException, FileNotFoundException, UnresolvedLinkException, IOException { InodeTree.ResolveResult<AbstractFileSystem> res = fsState.resolve(getUriPath(f), true); res.targetFileSystem.setPermission(res.remainingPath, permission); } @Override public boolean setReplication(final Path f, final short replication) throws AccessControlException, FileNotFoundException, UnresolvedLinkException, IOException { InodeTree.ResolveResult<AbstractFileSystem> res = fsState.resolve(getUriPath(f), true); return res.targetFileSystem.setReplication(res.remainingPath, replication); } @Override public void setTimes(final Path f, final long mtime, final long atime) throws AccessControlException, FileNotFoundException, UnresolvedLinkException, IOException { InodeTree.ResolveResult<AbstractFileSystem> res = fsState.resolve(getUriPath(f), true); res.targetFileSystem.setTimes(res.remainingPath, mtime, atime); } @Override public void setVerifyChecksum(final boolean verifyChecksum) throws AccessControlException, IOException { // This is a file system level operations, however ViewFs // points to many file systems. Noop for ViewFs. } public MountPoint[] getMountPoints() { List<InodeTree.MountPoint<AbstractFileSystem>> mountPoints = fsState.getMountPoints(); MountPoint[] result = new MountPoint[mountPoints.size()]; for ( int i = 0; i < mountPoints.size(); ++i ) { result[i] = new MountPoint(new Path(mountPoints.get(i).src), mountPoints.get(i).target.targetDirLinkList); } return result; } @Override public List<Token<?>> getDelegationTokens(String renewer) throws IOException { List<InodeTree.MountPoint<AbstractFileSystem>> mountPoints = fsState.getMountPoints(); int initialListSize = 0; for (InodeTree.MountPoint<AbstractFileSystem> im : mountPoints) { initialListSize += im.target.targetDirLinkList.length; } List<Token<?>> result = new ArrayList<Token<?>>(initialListSize); for ( int i = 0; i < mountPoints.size(); ++i ) { List<Token<?>> tokens = mountPoints.get(i).target.targetFileSystem.getDelegationTokens(renewer); if (tokens != null) { result.addAll(tokens); } } return result; } @Override public boolean isValidName(String src) { // Prefix validated at mount time and rest of path validated by mount // target. return true; } @Override public void modifyAclEntries(Path path, List<AclEntry> aclSpec) throws IOException { InodeTree.ResolveResult<AbstractFileSystem> res = fsState.resolve(getUriPath(path), true); res.targetFileSystem.modifyAclEntries(res.remainingPath, aclSpec); } @Override public void removeAclEntries(Path path, List<AclEntry> aclSpec) throws IOException { InodeTree.ResolveResult<AbstractFileSystem> res = fsState.resolve(getUriPath(path), true); res.targetFileSystem.removeAclEntries(res.remainingPath, aclSpec); } @Override public void removeDefaultAcl(Path path) throws IOException { InodeTree.ResolveResult<AbstractFileSystem> res = fsState.resolve(getUriPath(path), true); res.targetFileSystem.removeDefaultAcl(res.remainingPath); } @Override public void removeAcl(Path path) throws IOException { InodeTree.ResolveResult<AbstractFileSystem> res = fsState.resolve(getUriPath(path), true); res.targetFileSystem.removeAcl(res.remainingPath); } @Override public void setAcl(Path path, List<AclEntry> aclSpec) throws IOException { InodeTree.ResolveResult<AbstractFileSystem> res = fsState.resolve(getUriPath(path), true); res.targetFileSystem.setAcl(res.remainingPath, aclSpec); } @Override public AclStatus getAclStatus(Path path) throws IOException { InodeTree.ResolveResult<AbstractFileSystem> res = fsState.resolve(getUriPath(path), true); return res.targetFileSystem.getAclStatus(res.remainingPath); } @Override public void setXAttr(Path path, String name, byte[] value, EnumSet<XAttrSetFlag> flag) throws IOException { InodeTree.ResolveResult<AbstractFileSystem> res = fsState.resolve(getUriPath(path), true); res.targetFileSystem.setXAttr(res.remainingPath, name, value, flag); } @Override public byte[] getXAttr(Path path, String name) throws IOException { InodeTree.ResolveResult<AbstractFileSystem> res = fsState.resolve(getUriPath(path), true); return res.targetFileSystem.getXAttr(res.remainingPath, name); } @Override public Map<String, byte[]> getXAttrs(Path path) throws IOException { InodeTree.ResolveResult<AbstractFileSystem> res = fsState.resolve(getUriPath(path), true); return res.targetFileSystem.getXAttrs(res.remainingPath); } @Override public Map<String, byte[]> getXAttrs(Path path, List<String> names) throws IOException { InodeTree.ResolveResult<AbstractFileSystem> res = fsState.resolve(getUriPath(path), true); return res.targetFileSystem.getXAttrs(res.remainingPath, names); } @Override public List<String> listXAttrs(Path path) throws IOException { InodeTree.ResolveResult<AbstractFileSystem> res = fsState.resolve(getUriPath(path), true); return res.targetFileSystem.listXAttrs(res.remainingPath); } @Override public void removeXAttr(Path path, String name) throws IOException { InodeTree.ResolveResult<AbstractFileSystem> res = fsState.resolve(getUriPath(path), true); res.targetFileSystem.removeXAttr(res.remainingPath, name); } @Override public Path createSnapshot(Path path, String snapshotName) throws IOException { InodeTree.ResolveResult<AbstractFileSystem> res = fsState.resolve( getUriPath(path), true); return res.targetFileSystem.createSnapshot(res.remainingPath, snapshotName); } @Override public void renameSnapshot(Path path, String snapshotOldName, String snapshotNewName) throws IOException { InodeTree.ResolveResult<AbstractFileSystem> res = fsState.resolve( getUriPath(path), true); res.targetFileSystem.renameSnapshot(res.remainingPath, snapshotOldName, snapshotNewName); } @Override public void deleteSnapshot(Path path, String snapshotName) throws IOException { InodeTree.ResolveResult<AbstractFileSystem> res = fsState.resolve( getUriPath(path), true); res.targetFileSystem.deleteSnapshot(res.remainingPath, snapshotName); } @Override public void setStoragePolicy(final Path path, final String policyName) throws IOException { InodeTree.ResolveResult<AbstractFileSystem> res = fsState.resolve(getUriPath(path), true); res.targetFileSystem.setStoragePolicy(res.remainingPath, policyName); } /** * Retrieve the storage policy for a given file or directory. * * @param src file or directory path. * @return storage policy for give file. * @throws IOException */ public BlockStoragePolicySpi getStoragePolicy(final Path src) throws IOException { InodeTree.ResolveResult<AbstractFileSystem> res = fsState.resolve(getUriPath(src), true); return res.targetFileSystem.getStoragePolicy(res.remainingPath); } /* * An instance of this class represents an internal dir of the viewFs * ie internal dir of the mount table. * It is a ready only mount tbale and create, mkdir or delete operations * are not allowed. * If called on create or mkdir then this target is the parent of the * directory in which one is trying to create or mkdir; hence * in this case the path name passed in is the last component. * Otherwise this target is the end point of the path and hence * the path name passed in is null. */ static class InternalDirOfViewFs extends AbstractFileSystem { final InodeTree.INodeDir<AbstractFileSystem> theInternalDir; final long creationTime; // of the the mount table final UserGroupInformation ugi; // the user/group of user who created mtable final URI myUri; // the URI of the outer ViewFs public InternalDirOfViewFs(final InodeTree.INodeDir<AbstractFileSystem> dir, final long cTime, final UserGroupInformation ugi, final URI uri) throws URISyntaxException { super(FsConstants.VIEWFS_URI, FsConstants.VIEWFS_SCHEME, false, -1); theInternalDir = dir; creationTime = cTime; this.ugi = ugi; myUri = uri; } static private void checkPathIsSlash(final Path f) throws IOException { if (f != InodeTree.SlashPath) { throw new IOException ( "Internal implementation error: expected file name to be /" ); } } @Override public FSDataOutputStream createInternal(final Path f, final EnumSet<CreateFlag> flag, final FsPermission absolutePermission, final int bufferSize, final short replication, final long blockSize, final Progressable progress, final ChecksumOpt checksumOpt, final boolean createParent) throws AccessControlException, FileAlreadyExistsException, FileNotFoundException, ParentNotDirectoryException, UnsupportedFileSystemException, UnresolvedLinkException, IOException { throw readOnlyMountTable("create", f); } @Override public boolean delete(final Path f, final boolean recursive) throws AccessControlException, IOException { checkPathIsSlash(f); throw readOnlyMountTable("delete", f); } @Override public BlockLocation[] getFileBlockLocations(final Path f, final long start, final long len) throws FileNotFoundException, IOException { checkPathIsSlash(f); throw new FileNotFoundException("Path points to dir not a file"); } @Override public FileChecksum getFileChecksum(final Path f) throws FileNotFoundException, IOException { checkPathIsSlash(f); throw new FileNotFoundException("Path points to dir not a file"); } @Override public FileStatus getFileStatus(final Path f) throws IOException { checkPathIsSlash(f); return new FileStatus(0, true, 0, 0, creationTime, creationTime, PERMISSION_555, ugi.getUserName(), ugi.getGroupNames()[0], new Path(theInternalDir.fullPath).makeQualified( myUri, null)); } @Override public FileStatus getFileLinkStatus(final Path f) throws FileNotFoundException { // look up i internalDirs children - ignore first Slash INode<AbstractFileSystem> inode = theInternalDir.children.get(f.toUri().toString().substring(1)); if (inode == null) { throw new FileNotFoundException( "viewFs internal mount table - missing entry:" + f); } FileStatus result; if (inode instanceof INodeLink) { INodeLink<AbstractFileSystem> inodelink = (INodeLink<AbstractFileSystem>) inode; result = new FileStatus(0, false, 0, 0, creationTime, creationTime, PERMISSION_555, ugi.getUserName(), ugi.getGroupNames()[0], inodelink.getTargetLink(), new Path(inode.fullPath).makeQualified( myUri, null)); } else { result = new FileStatus(0, true, 0, 0, creationTime, creationTime, PERMISSION_555, ugi.getUserName(), ugi.getGroupNames()[0], new Path(inode.fullPath).makeQualified( myUri, null)); } return result; } @Override public FsStatus getFsStatus() { return new FsStatus(0, 0, 0); } @Override public FsServerDefaults getServerDefaults() throws IOException { throw new IOException("FsServerDefaults not implemented yet"); } @Override public int getUriDefaultPort() { return -1; } @Override public FileStatus[] listStatus(final Path f) throws AccessControlException, IOException { checkPathIsSlash(f); FileStatus[] result = new FileStatus[theInternalDir.children.size()]; int i = 0; for (Entry<String, INode<AbstractFileSystem>> iEntry : theInternalDir.children.entrySet()) { INode<AbstractFileSystem> inode = iEntry.getValue(); if (inode instanceof INodeLink ) { INodeLink<AbstractFileSystem> link = (INodeLink<AbstractFileSystem>) inode; result[i++] = new FileStatus(0, false, 0, 0, creationTime, creationTime, PERMISSION_555, ugi.getUserName(), ugi.getGroupNames()[0], link.getTargetLink(), new Path(inode.fullPath).makeQualified( myUri, null)); } else { result[i++] = new FileStatus(0, true, 0, 0, creationTime, creationTime, PERMISSION_555, ugi.getUserName(), ugi.getGroupNames()[0], new Path(inode.fullPath).makeQualified( myUri, null)); } } return result; } @Override public void mkdir(final Path dir, final FsPermission permission, final boolean createParent) throws AccessControlException, FileAlreadyExistsException { if (theInternalDir.isRoot && dir == null) { throw new FileAlreadyExistsException("/ already exits"); } throw readOnlyMountTable("mkdir", dir); } @Override public FSDataInputStream open(final Path f, final int bufferSize) throws FileNotFoundException, IOException { checkPathIsSlash(f); throw new FileNotFoundException("Path points to dir not a file"); } @Override public boolean truncate(final Path f, final long newLength) throws FileNotFoundException, IOException { checkPathIsSlash(f); throw readOnlyMountTable("truncate", f); } @Override public void renameInternal(final Path src, final Path dst) throws AccessControlException, IOException { checkPathIsSlash(src); checkPathIsSlash(dst); throw readOnlyMountTable("rename", src); } @Override public boolean supportsSymlinks() { return true; } @Override public void createSymlink(final Path target, final Path link, final boolean createParent) throws AccessControlException { throw readOnlyMountTable("createSymlink", link); } @Override public Path getLinkTarget(final Path f) throws FileNotFoundException, IOException { return getFileLinkStatus(f).getSymlink(); } @Override public void setOwner(final Path f, final String username, final String groupname) throws AccessControlException, IOException { checkPathIsSlash(f); throw readOnlyMountTable("setOwner", f); } @Override public void setPermission(final Path f, final FsPermission permission) throws AccessControlException, IOException { checkPathIsSlash(f); throw readOnlyMountTable("setPermission", f); } @Override public boolean setReplication(final Path f, final short replication) throws AccessControlException, IOException { checkPathIsSlash(f); throw readOnlyMountTable("setReplication", f); } @Override public void setTimes(final Path f, final long mtime, final long atime) throws AccessControlException, IOException { checkPathIsSlash(f); throw readOnlyMountTable("setTimes", f); } @Override public void setVerifyChecksum(final boolean verifyChecksum) throws AccessControlException { throw readOnlyMountTable("setVerifyChecksum", ""); } @Override public void modifyAclEntries(Path path, List<AclEntry> aclSpec) throws IOException { checkPathIsSlash(path); throw readOnlyMountTable("modifyAclEntries", path); } @Override public void removeAclEntries(Path path, List<AclEntry> aclSpec) throws IOException { checkPathIsSlash(path); throw readOnlyMountTable("removeAclEntries", path); } @Override public void removeDefaultAcl(Path path) throws IOException { checkPathIsSlash(path); throw readOnlyMountTable("removeDefaultAcl", path); } @Override public void removeAcl(Path path) throws IOException { checkPathIsSlash(path); throw readOnlyMountTable("removeAcl", path); } @Override public void setAcl(Path path, List<AclEntry> aclSpec) throws IOException { checkPathIsSlash(path); throw readOnlyMountTable("setAcl", path); } @Override public AclStatus getAclStatus(Path path) throws IOException { checkPathIsSlash(path); return new AclStatus.Builder().owner(ugi.getUserName()) .group(ugi.getGroupNames()[0]) .addEntries(AclUtil.getMinimalAcl(PERMISSION_555)) .stickyBit(false).build(); } @Override public void setXAttr(Path path, String name, byte[] value, EnumSet<XAttrSetFlag> flag) throws IOException { checkPathIsSlash(path); throw readOnlyMountTable("setXAttr", path); } @Override public byte[] getXAttr(Path path, String name) throws IOException { throw new NotInMountpointException(path, "getXAttr"); } @Override public Map<String, byte[]> getXAttrs(Path path) throws IOException { throw new NotInMountpointException(path, "getXAttrs"); } @Override public Map<String, byte[]> getXAttrs(Path path, List<String> names) throws IOException { throw new NotInMountpointException(path, "getXAttrs"); } @Override public List<String> listXAttrs(Path path) throws IOException { throw new NotInMountpointException(path, "listXAttrs"); } @Override public void removeXAttr(Path path, String name) throws IOException { checkPathIsSlash(path); throw readOnlyMountTable("removeXAttr", path); } @Override public Path createSnapshot(Path path, String snapshotName) throws IOException { checkPathIsSlash(path); throw readOnlyMountTable("createSnapshot", path); } @Override public void renameSnapshot(Path path, String snapshotOldName, String snapshotNewName) throws IOException { checkPathIsSlash(path); throw readOnlyMountTable("renameSnapshot", path); } @Override public void deleteSnapshot(Path path, String snapshotName) throws IOException { checkPathIsSlash(path); throw readOnlyMountTable("deleteSnapshot", path); } @Override public void setStoragePolicy(Path path, String policyName) throws IOException { throw readOnlyMountTable("setStoragePolicy", path); } } }
40,016
35.247283
85
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/NotInMountpointException.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.viewfs; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.Path; /** * NotInMountpointException extends the UnsupportedOperationException. * Exception class used in cases where the given path is not mounted * through viewfs. */ @InterfaceAudience.Public @InterfaceStability.Evolving /*Evolving for a release,to be changed to Stable */ @SuppressWarnings("serial") public class NotInMountpointException extends UnsupportedOperationException { final String msg; public NotInMountpointException(Path path, String operation) { msg = operation + " on path `" + path + "' is not within a mount point"; } public NotInMountpointException(String operation) { msg = operation + " on empty path is invalid"; } @Override public String getMessage() { return msg; } }
1,746
34.653061
80
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFsLocatedFileStatus.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.viewfs; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import java.io.IOException; class ViewFsLocatedFileStatus extends LocatedFileStatus { final LocatedFileStatus myFs; Path modifiedPath; ViewFsLocatedFileStatus(LocatedFileStatus locatedFileStatus, Path path) { myFs = locatedFileStatus; modifiedPath = path; } @Override public long getLen() { return myFs.getLen(); } @Override public boolean isFile() { return myFs.isFile(); } @Override public boolean isDirectory() { return myFs.isDirectory(); } @Override @SuppressWarnings("deprecation") public boolean isDir() { return myFs.isDirectory(); } @Override public boolean isSymlink() { return myFs.isSymlink(); } @Override public long getBlockSize() { return myFs.getBlockSize(); } @Override public short getReplication() { return myFs.getReplication(); } @Override public long getModificationTime() { return myFs.getModificationTime(); } @Override public long getAccessTime() { return myFs.getAccessTime(); } @Override public FsPermission getPermission() { return myFs.getPermission(); } @Override public String getOwner() { return myFs.getOwner(); } @Override public String getGroup() { return myFs.getGroup(); } @Override public Path getPath() { return modifiedPath; } @Override public void setPath(final Path p) { modifiedPath = p; } @Override public Path getSymlink() throws IOException { return myFs.getSymlink(); } @Override public void setSymlink(Path p) { myFs.setSymlink(p); } @Override public BlockLocation[] getBlockLocations() { return myFs.getBlockLocations(); } @Override public int compareTo(FileStatus o) { return super.compareTo(o); } @Override public boolean equals(Object o) { return super.equals(o); } @Override public int hashCode() { return super.hashCode(); } }
2,984
20.630435
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.shell; import java.io.IOException; import java.text.SimpleDateFormat; import java.util.Arrays; import java.util.Comparator; import java.util.Date; import java.util.LinkedList; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; /** * Get a listing of all files in that match the file patterns. */ @InterfaceAudience.Private @InterfaceStability.Unstable class Ls extends FsCommand { public static void registerCommands(CommandFactory factory) { factory.addClass(Ls.class, "-ls"); factory.addClass(Lsr.class, "-lsr"); } private static final String OPTION_PATHONLY = "C"; private static final String OPTION_DIRECTORY = "d"; private static final String OPTION_HUMAN = "h"; private static final String OPTION_RECURSIVE = "R"; private static final String OPTION_REVERSE = "r"; private static final String OPTION_MTIME = "t"; private static final String OPTION_ATIME = "u"; private static final String OPTION_SIZE = "S"; public static final String NAME = "ls"; public static final String USAGE = "[-" + OPTION_PATHONLY + "] [-" + OPTION_DIRECTORY + "] [-" + OPTION_HUMAN + "] [-" + OPTION_RECURSIVE + "] [-" + OPTION_MTIME + "] [-" + OPTION_SIZE + "] [-" + OPTION_REVERSE + "] [-" + OPTION_ATIME + "] [<path> ...]"; public static final String DESCRIPTION = "List the contents that match the specified file pattern. If " + "path is not specified, the contents of /user/<currentUser> " + "will be listed. For a directory a list of its direct children " + "is returned (unless -" + OPTION_DIRECTORY + " option is specified).\n\n" + "Directory entries are of the form:\n" + "\tpermissions - userId groupId sizeOfDirectory(in bytes) modificationDate(yyyy-MM-dd HH:mm) directoryName\n\n" + "and file entries are of the form:\n" + "\tpermissions numberOfReplicas userId groupId sizeOfFile(in bytes) modificationDate(yyyy-MM-dd HH:mm) fileName\n\n" + " -" + OPTION_PATHONLY + " Display the paths of files and directories only.\n" + " -" + OPTION_DIRECTORY + " Directories are listed as plain files.\n" + " -" + OPTION_HUMAN + " Formats the sizes of files in a human-readable fashion\n" + " rather than a number of bytes.\n" + " -" + OPTION_RECURSIVE + " Recursively list the contents of directories.\n" + " -" + OPTION_MTIME + " Sort files by modification time (most recent first).\n" + " -" + OPTION_SIZE + " Sort files by size.\n" + " -" + OPTION_REVERSE + " Reverse the order of the sort.\n" + " -" + OPTION_ATIME + " Use time of last access instead of modification for\n" + " display and sorting."; protected final SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm"); protected int maxRepl = 3, maxLen = 10, maxOwner = 0, maxGroup = 0; protected String lineFormat; private boolean pathOnly; protected boolean dirRecurse; private boolean orderReverse; private boolean orderTime; private boolean orderSize; private boolean useAtime; private Comparator<PathData> orderComparator; protected boolean humanReadable = false; protected Ls() {} protected Ls(Configuration conf) { super(conf); } protected String formatSize(long size) { return humanReadable ? StringUtils.TraditionalBinaryPrefix.long2String(size, "", 1) : String.valueOf(size); } @Override protected void processOptions(LinkedList<String> args) throws IOException { CommandFormat cf = new CommandFormat(0, Integer.MAX_VALUE, OPTION_PATHONLY, OPTION_DIRECTORY, OPTION_HUMAN, OPTION_RECURSIVE, OPTION_REVERSE, OPTION_MTIME, OPTION_SIZE, OPTION_ATIME); cf.parse(args); pathOnly = cf.getOpt(OPTION_PATHONLY); dirRecurse = !cf.getOpt(OPTION_DIRECTORY); setRecursive(cf.getOpt(OPTION_RECURSIVE) && dirRecurse); humanReadable = cf.getOpt(OPTION_HUMAN); orderReverse = cf.getOpt(OPTION_REVERSE); orderTime = cf.getOpt(OPTION_MTIME); orderSize = !orderTime && cf.getOpt(OPTION_SIZE); useAtime = cf.getOpt(OPTION_ATIME); if (args.isEmpty()) args.add(Path.CUR_DIR); initialiseOrderComparator(); } /** * Should display only paths of files and directories. * @return true display paths only, false display all fields */ @InterfaceAudience.Private boolean isPathOnly() { return this.pathOnly; } /** * Should the contents of the directory be shown or just the directory? * @return true if directory contents, false if just directory */ @InterfaceAudience.Private boolean isDirRecurse() { return this.dirRecurse; } /** * Should file sizes be returned in human readable format rather than bytes? * @return true is human readable, false if bytes */ @InterfaceAudience.Private boolean isHumanReadable() { return this.humanReadable; } /** * Should directory contents be displayed in reverse order * @return true reverse order, false default order */ @InterfaceAudience.Private boolean isOrderReverse() { return this.orderReverse; } /** * Should directory contents be displayed in mtime order. * @return true mtime order, false default order */ @InterfaceAudience.Private boolean isOrderTime() { return this.orderTime; } /** * Should directory contents be displayed in size order. * @return true size order, false default order */ @InterfaceAudience.Private boolean isOrderSize() { return this.orderSize; } /** * Should access time be used rather than modification time. * @return true use access time, false use modification time */ @InterfaceAudience.Private boolean isUseAtime() { return this.useAtime; } @Override protected void processPathArgument(PathData item) throws IOException { // implicitly recurse once for cmdline directories if (dirRecurse && item.stat.isDirectory()) { recursePath(item); } else { super.processPathArgument(item); } } @Override protected void processPaths(PathData parent, PathData ... items) throws IOException { if (parent != null && !isRecursive() && items.length != 0) { if (!pathOnly) { out.println("Found " + items.length + " items"); } Arrays.sort(items, getOrderComparator()); } if (!pathOnly) { adjustColumnWidths(items); } super.processPaths(parent, items); } @Override protected void processPath(PathData item) throws IOException { if (pathOnly) { out.println(item.toString()); return; } FileStatus stat = item.stat; String line = String.format(lineFormat, (stat.isDirectory() ? "d" : "-"), stat.getPermission() + (stat.getPermission().getAclBit() ? "+" : " "), (stat.isFile() ? stat.getReplication() : "-"), stat.getOwner(), stat.getGroup(), formatSize(stat.getLen()), dateFormat.format(new Date(isUseAtime() ? stat.getAccessTime() : stat.getModificationTime())), item); out.println(line); } /** * Compute column widths and rebuild the format string * @param items to find the max field width for each column */ private void adjustColumnWidths(PathData items[]) { for (PathData item : items) { FileStatus stat = item.stat; maxRepl = maxLength(maxRepl, stat.getReplication()); maxLen = maxLength(maxLen, stat.getLen()); maxOwner = maxLength(maxOwner, stat.getOwner()); maxGroup = maxLength(maxGroup, stat.getGroup()); } StringBuilder fmt = new StringBuilder(); fmt.append("%s%s"); // permission string fmt.append("%" + maxRepl + "s "); // Do not use '%-0s' as a formatting conversion, since it will throw a // a MissingFormatWidthException if it is used in String.format(). // http://docs.oracle.com/javase/1.5.0/docs/api/java/util/Formatter.html#intFlags fmt.append((maxOwner > 0) ? "%-" + maxOwner + "s " : "%s"); fmt.append((maxGroup > 0) ? "%-" + maxGroup + "s " : "%s"); fmt.append("%" + maxLen + "s "); fmt.append("%s %s"); // mod time & path lineFormat = fmt.toString(); } private int maxLength(int n, Object value) { return Math.max(n, (value != null) ? String.valueOf(value).length() : 0); } /** * Get the comparator to be used for sorting files. * @return comparator */ private Comparator<PathData> getOrderComparator() { return this.orderComparator; } /** * Initialise the comparator to be used for sorting files. If multiple options * are selected then the order is chosen in the following precedence: - * Modification time (or access time if requested) - File size - File name */ private void initialiseOrderComparator() { if (isOrderTime()) { // mtime is ordered latest date first in line with the unix ls -t command this.orderComparator = new Comparator<PathData>() { public int compare(PathData o1, PathData o2) { Long o1Time = (isUseAtime() ? o1.stat.getAccessTime() : o1.stat.getModificationTime()); Long o2Time = (isUseAtime() ? o2.stat.getAccessTime() : o2.stat.getModificationTime()); return o2Time.compareTo(o1Time) * (isOrderReverse() ? -1 : 1); } }; } else if (isOrderSize()) { // size is ordered largest first in line with the unix ls -S command this.orderComparator = new Comparator<PathData>() { public int compare(PathData o1, PathData o2) { Long o1Length = o1.stat.getLen(); Long o2Length = o2.stat.getLen(); return o2Length.compareTo(o1Length) * (isOrderReverse() ? -1 : 1); } }; } else { this.orderComparator = new Comparator<PathData>() { public int compare(PathData o1, PathData o2) { return o1.compareTo(o2) * (isOrderReverse() ? -1 : 1); } }; } } /** * Get a recursive listing of all files in that match the file patterns. * Same as "-ls -R" */ public static class Lsr extends Ls { public static final String NAME = "lsr"; @Override protected void processOptions(LinkedList<String> args) throws IOException { args.addFirst("-R"); super.processOptions(args); } @Override public String getReplacementCommand() { return "ls -R"; } } }
11,620
33.079179
128
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Test.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.shell; import java.io.IOException; import java.util.LinkedList; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * Perform shell-like file tests */ @InterfaceAudience.Private @InterfaceStability.Unstable class Test extends FsCommand { public static void registerCommands(CommandFactory factory) { factory.addClass(Test.class, "-test"); } public static final String NAME = "test"; public static final String USAGE = "-[defsz] <path>"; public static final String DESCRIPTION = "Answer various questions about <path>, with result via exit status.\n" + " -d return 0 if <path> is a directory.\n" + " -e return 0 if <path> exists.\n" + " -f return 0 if <path> is a file.\n" + " -s return 0 if file <path> is greater than zero bytes in size.\n" + " -z return 0 if file <path> is zero bytes in size, else return 1."; private char flag; @Override protected void processOptions(LinkedList<String> args) { CommandFormat cf = new CommandFormat(1, 1, "e", "d", "f", "s", "z"); cf.parse(args); String[] opts = cf.getOpts().toArray(new String[0]); switch (opts.length) { case 0: throw new IllegalArgumentException("No test flag given"); case 1: flag = opts[0].charAt(0); break; default: throw new IllegalArgumentException("Only one test flag is allowed"); } } @Override protected void processPath(PathData item) throws IOException { boolean test = false; switch (flag) { case 'e': test = true; break; case 'd': test = item.stat.isDirectory(); break; case 'f': test = item.stat.isFile(); break; case 's': test = (item.stat.getLen() > 0); break; case 'z': test = (item.stat.getLen() == 0); break; default: break; } if (!test) exitCode = 1; } @Override protected void processNonexistentPath(PathData item) throws IOException { exitCode = 1; } }
2,942
29.340206
77
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/package-info.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ @InterfaceAudience.Private @InterfaceStability.Unstable package org.apache.hadoop.fs.shell; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability;
1,018
41.458333
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/XAttrCommands.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.shell; import java.io.IOException; import java.util.Iterator; import java.util.LinkedList; import java.util.Locale; import java.util.Map; import java.util.Map.Entry; import com.google.common.base.Enums; import com.google.common.base.Function; import com.google.common.base.Preconditions; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.XAttrCodec; import org.apache.hadoop.util.StringUtils; /** * XAttr related operations */ @InterfaceAudience.Private @InterfaceStability.Evolving class XAttrCommands extends FsCommand { private static final String GET_FATTR = "getfattr"; private static final String SET_FATTR = "setfattr"; public static void registerCommands(CommandFactory factory) { factory.addClass(GetfattrCommand.class, "-" + GET_FATTR); factory.addClass(SetfattrCommand.class, "-" + SET_FATTR); } /** * Implements the '-getfattr' command for the FsShell. */ public static class GetfattrCommand extends FsCommand { public static final String NAME = GET_FATTR; public static final String USAGE = "[-R] {-n name | -d} [-e en] <path>"; public static final String DESCRIPTION = "Displays the extended attribute names and values (if any) for a " + "file or directory.\n" + "-R: Recursively list the attributes for all files and directories.\n" + "-n name: Dump the named extended attribute value.\n" + "-d: Dump all extended attribute values associated with pathname.\n" + "-e <encoding>: Encode values after retrieving them." + "Valid encodings are \"text\", \"hex\", and \"base64\". " + "Values encoded as text strings are enclosed in double quotes (\")," + " and values encoded as hexadecimal and base64 are prefixed with " + "0x and 0s, respectively.\n" + "<path>: The file or directory.\n"; private final static Function<String, XAttrCodec> enValueOfFunc = Enums.valueOfFunction(XAttrCodec.class); private String name = null; private boolean dump = false; private XAttrCodec encoding = XAttrCodec.TEXT; @Override protected void processOptions(LinkedList<String> args) throws IOException { name = StringUtils.popOptionWithArgument("-n", args); String en = StringUtils.popOptionWithArgument("-e", args); if (en != null) { try { encoding = enValueOfFunc.apply(StringUtils.toUpperCase(en)); } catch (IllegalArgumentException e) { throw new IllegalArgumentException( "Invalid/unsupported encoding option specified: " + en); } Preconditions.checkArgument(encoding != null, "Invalid/unsupported encoding option specified: " + en); } boolean r = StringUtils.popOption("-R", args); setRecursive(r); dump = StringUtils.popOption("-d", args); if (!dump && name == null) { throw new HadoopIllegalArgumentException( "Must specify '-n name' or '-d' option."); } if (args.isEmpty()) { throw new HadoopIllegalArgumentException("<path> is missing."); } if (args.size() > 1) { throw new HadoopIllegalArgumentException("Too many arguments."); } } @Override protected void processPath(PathData item) throws IOException { out.println("# file: " + item); if (dump) { Map<String, byte[]> xattrs = item.fs.getXAttrs(item.path); if (xattrs != null) { Iterator<Entry<String, byte[]>> iter = xattrs.entrySet().iterator(); while(iter.hasNext()) { Entry<String, byte[]> entry = iter.next(); printXAttr(entry.getKey(), entry.getValue()); } } } else { byte[] value = item.fs.getXAttr(item.path, name); printXAttr(name, value); } } private void printXAttr(String name, byte[] value) throws IOException{ if (value != null) { if (value.length != 0) { out.println(name + "=" + XAttrCodec.encodeValue(value, encoding)); } else { out.println(name); } } } } /** * Implements the '-setfattr' command for the FsShell. */ public static class SetfattrCommand extends FsCommand { public static final String NAME = SET_FATTR; public static final String USAGE = "{-n name [-v value] | -x name} <path>"; public static final String DESCRIPTION = "Sets an extended attribute name and value for a file or directory.\n" + "-n name: The extended attribute name.\n" + "-v value: The extended attribute value. There are three different " + "encoding methods for the value. If the argument is enclosed in double " + "quotes, then the value is the string inside the quotes. If the " + "argument is prefixed with 0x or 0X, then it is taken as a hexadecimal " + "number. If the argument begins with 0s or 0S, then it is taken as a " + "base64 encoding.\n" + "-x name: Remove the extended attribute.\n" + "<path>: The file or directory.\n"; private String name = null; private byte[] value = null; private String xname = null; @Override protected void processOptions(LinkedList<String> args) throws IOException { name = StringUtils.popOptionWithArgument("-n", args); String v = StringUtils.popOptionWithArgument("-v", args); if (v != null) { value = XAttrCodec.decodeValue(v); } xname = StringUtils.popOptionWithArgument("-x", args); if (name != null && xname != null) { throw new HadoopIllegalArgumentException( "Can not specify both '-n name' and '-x name' option."); } if (name == null && xname == null) { throw new HadoopIllegalArgumentException( "Must specify '-n name' or '-x name' option."); } if (args.isEmpty()) { throw new HadoopIllegalArgumentException("<path> is missing."); } if (args.size() > 1) { throw new HadoopIllegalArgumentException("Too many arguments."); } } @Override protected void processPath(PathData item) throws IOException { if (name != null) { item.fs.setXAttr(item.path, name, value); } else if (xname != null) { item.fs.removeXAttr(item.path, xname); } } } }
7,281
36.34359
80
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Truncate.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.shell; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.PathIsDirectoryException; import java.io.IOException; import java.util.LinkedList; import java.util.List; /** * Truncates a file to a new size */ @InterfaceAudience.Private @InterfaceStability.Unstable public class Truncate extends FsCommand { public static void registerCommands(CommandFactory factory) { factory.addClass(Truncate.class, "-truncate"); } public static final String NAME = "truncate"; public static final String USAGE = "[-w] <length> <path> ..."; public static final String DESCRIPTION = "Truncate all files that match the specified file pattern to the " + "specified length.\n" + "-w: Requests that the command wait for block recovery to complete, " + "if necessary."; protected long newLength = -1; protected List<PathData> waitList = new LinkedList<>(); protected boolean waitOpt = false; @Override protected void processOptions(LinkedList<String> args) throws IOException { CommandFormat cf = new CommandFormat(2, Integer.MAX_VALUE, "w"); cf.parse(args); waitOpt = cf.getOpt("w"); try { newLength = Long.parseLong(args.removeFirst()); } catch(NumberFormatException nfe) { displayWarning("Illegal length, a non-negative integer expected"); throw nfe; } if(newLength < 0) { throw new IllegalArgumentException("length must be >= 0"); } } @Override protected void processArguments(LinkedList<PathData> args) throws IOException { super.processArguments(args); if (waitOpt) waitForRecovery(); } @Override protected void processPath(PathData item) throws IOException { if(item.stat.isDirectory()) { throw new PathIsDirectoryException(item.toString()); } long oldLength = item.stat.getLen(); if(newLength > oldLength) { throw new IllegalArgumentException( "Cannot truncate to a larger file size. Current size: " + oldLength + ", truncate size: " + newLength + "."); } if(item.fs.truncate(item.path, newLength)) { out.println("Truncated " + item + " to length: " + newLength); } else if(waitOpt) { waitList.add(item); } else { out.println("Truncating " + item + " to length: " + newLength + ". " + "Wait for block recovery to complete before further updating this " + "file."); } } /** * Wait for all files in waitList to have length equal to newLength. */ private void waitForRecovery() throws IOException { for(PathData item : waitList) { out.println("Waiting for " + item + " ..."); out.flush(); for(;;) { item.refreshStatus(); if(item.stat.getLen() == newLength) break; try {Thread.sleep(1000);} catch(InterruptedException ignored) {} } out.println("Truncated " + item + " to length: " + newLength); out.flush(); } } }
3,866
31.771186
79
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/SnapshotCommands.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.shell; import java.io.IOException; import java.util.LinkedList; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathIsNotDirectoryException; import com.google.common.base.Preconditions; /** * Snapshot related operations */ @InterfaceAudience.Private @InterfaceStability.Unstable class SnapshotCommands extends FsCommand { private final static String CREATE_SNAPSHOT = "createSnapshot"; private final static String DELETE_SNAPSHOT = "deleteSnapshot"; private final static String RENAME_SNAPSHOT = "renameSnapshot"; public static void registerCommands(CommandFactory factory) { factory.addClass(CreateSnapshot.class, "-" + CREATE_SNAPSHOT); factory.addClass(DeleteSnapshot.class, "-" + DELETE_SNAPSHOT); factory.addClass(RenameSnapshot.class, "-" + RENAME_SNAPSHOT); } /** * Create a snapshot */ public static class CreateSnapshot extends FsCommand { public static final String NAME = CREATE_SNAPSHOT; public static final String USAGE = "<snapshotDir> [<snapshotName>]"; public static final String DESCRIPTION = "Create a snapshot on a directory"; private String snapshotName = null; @Override protected void processPath(PathData item) throws IOException { if (!item.stat.isDirectory()) { throw new PathIsNotDirectoryException(item.toString()); } } @Override protected void processOptions(LinkedList<String> args) throws IOException { if (args.size() == 0) { throw new IllegalArgumentException("<snapshotDir> is missing."); } if (args.size() > 2) { throw new IllegalArgumentException("Too many arguments."); } if (args.size() == 2) { snapshotName = args.removeLast(); } } @Override protected void processArguments(LinkedList<PathData> items) throws IOException { super.processArguments(items); if (numErrors != 0) { // check for error collecting paths return; } assert(items.size() == 1); PathData sroot = items.getFirst(); Path snapshotPath = sroot.fs.createSnapshot(sroot.path, snapshotName); out.println("Created snapshot " + snapshotPath); } } /** * Delete a snapshot */ public static class DeleteSnapshot extends FsCommand { public static final String NAME = DELETE_SNAPSHOT; public static final String USAGE = "<snapshotDir> <snapshotName>"; public static final String DESCRIPTION = "Delete a snapshot from a directory"; private String snapshotName; @Override protected void processPath(PathData item) throws IOException { if (!item.stat.isDirectory()) { throw new PathIsNotDirectoryException(item.toString()); } } @Override protected void processOptions(LinkedList<String> args) throws IOException { if (args.size() != 2) { throw new IllegalArgumentException("Incorrect number of arguments."); } snapshotName = args.removeLast(); } @Override protected void processArguments(LinkedList<PathData> items) throws IOException { super.processArguments(items); if (numErrors != 0) { // check for error collecting paths return; } assert (items.size() == 1); PathData sroot = items.getFirst(); sroot.fs.deleteSnapshot(sroot.path, snapshotName); } } /** * Rename a snapshot */ public static class RenameSnapshot extends FsCommand { public static final String NAME = RENAME_SNAPSHOT; public static final String USAGE = "<snapshotDir> <oldName> <newName>"; public static final String DESCRIPTION = "Rename a snapshot from oldName to newName"; private String oldName; private String newName; @Override protected void processPath(PathData item) throws IOException { if (!item.stat.isDirectory()) { throw new PathIsNotDirectoryException(item.toString()); } } @Override protected void processOptions(LinkedList<String> args) throws IOException { if (args.size() != 3) { throw new IllegalArgumentException("Incorrect number of arguments."); } newName = args.removeLast(); oldName = args.removeLast(); } @Override protected void processArguments(LinkedList<PathData> items) throws IOException { super.processArguments(items); if (numErrors != 0) { // check for error collecting paths return; } Preconditions.checkArgument(items.size() == 1); PathData sroot = items.getFirst(); sroot.fs.renameSnapshot(sroot.path, oldName, newName); } } }
5,620
31.304598
80
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Command.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.shell; import java.io.FileNotFoundException; import java.io.IOException; import java.io.PrintStream; import java.lang.reflect.Field; import java.util.ArrayList; import java.util.Arrays; import java.util.LinkedList; import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathNotFoundException; import org.apache.hadoop.util.StringUtils; /** * An abstract class for the execution of a file system command */ @InterfaceAudience.Private @InterfaceStability.Evolving abstract public class Command extends Configured { /** default name of the command */ public static String NAME; /** the command's usage switches and arguments format */ public static String USAGE; /** the command's long description */ public static String DESCRIPTION; protected String[] args; protected String name; protected int exitCode = 0; protected int numErrors = 0; protected boolean recursive = false; private int depth = 0; protected ArrayList<Exception> exceptions = new ArrayList<Exception>(); private static final Log LOG = LogFactory.getLog(Command.class); /** allows stdout to be captured if necessary */ public PrintStream out = System.out; /** allows stderr to be captured if necessary */ public PrintStream err = System.err; /** allows the command factory to be used if necessary */ private CommandFactory commandFactory = null; /** Constructor */ protected Command() { out = System.out; err = System.err; } /** Constructor */ protected Command(Configuration conf) { super(conf); } /** @return the command's name excluding the leading character - */ abstract public String getCommandName(); protected void setRecursive(boolean flag) { recursive = flag; } protected boolean isRecursive() { return recursive; } protected int getDepth() { return depth; } /** * Execute the command on the input path * * @param path the input path * @throws IOException if any error occurs */ abstract protected void run(Path path) throws IOException; /** * For each source path, execute the command * * @return 0 if it runs successfully; -1 if it fails */ public int runAll() { int exitCode = 0; for (String src : args) { try { PathData[] srcs = PathData.expandAsGlob(src, getConf()); for (PathData s : srcs) { run(s.path); } } catch (IOException e) { exitCode = -1; displayError(e); } } return exitCode; } /** sets the command factory for later use */ public void setCommandFactory(CommandFactory factory) { this.commandFactory = factory; } /** retrieves the command factory */ protected CommandFactory getCommandFactory() { return this.commandFactory; } /** * Invokes the command handler. The default behavior is to process options, * expand arguments, and then process each argument. * <pre> * run * |-> {@link #processOptions(LinkedList)} * \-> {@link #processRawArguments(LinkedList)} * |-> {@link #expandArguments(LinkedList)} * | \-> {@link #expandArgument(String)}* * \-> {@link #processArguments(LinkedList)} * |-> {@link #processArgument(PathData)}* * | |-> {@link #processPathArgument(PathData)} * | \-> {@link #processPaths(PathData, PathData...)} * | \-> {@link #processPath(PathData)}* * \-> {@link #processNonexistentPath(PathData)} * </pre> * Most commands will chose to implement just * {@link #processOptions(LinkedList)} and {@link #processPath(PathData)} * * @param argv the list of command line arguments * @return the exit code for the command * @throws IllegalArgumentException if called with invalid arguments */ public int run(String...argv) { LinkedList<String> args = new LinkedList<String>(Arrays.asList(argv)); try { if (isDeprecated()) { displayWarning( "DEPRECATED: Please use '"+ getReplacementCommand() + "' instead."); } processOptions(args); processRawArguments(args); } catch (IOException e) { displayError(e); } return (numErrors == 0) ? exitCode : exitCodeForError(); } /** * The exit code to be returned if any errors occur during execution. * This method is needed to account for the inconsistency in the exit * codes returned by various commands. * @return a non-zero exit code */ protected int exitCodeForError() { return 1; } /** * Must be implemented by commands to process the command line flags and * check the bounds of the remaining arguments. If an * IllegalArgumentException is thrown, the FsShell object will print the * short usage of the command. * @param args the command line arguments * @throws IOException */ protected void processOptions(LinkedList<String> args) throws IOException {} /** * Allows commands that don't use paths to handle the raw arguments. * Default behavior is to expand the arguments via * {@link #expandArguments(LinkedList)} and pass the resulting list to * {@link #processArguments(LinkedList)} * @param args the list of argument strings * @throws IOException */ protected void processRawArguments(LinkedList<String> args) throws IOException { processArguments(expandArguments(args)); } /** * Expands a list of arguments into {@link PathData} objects. The default * behavior is to call {@link #expandArgument(String)} on each element * which by default globs the argument. The loop catches IOExceptions, * increments the error count, and displays the exception. * @param args strings to expand into {@link PathData} objects * @return list of all {@link PathData} objects the arguments * @throws IOException if anything goes wrong... */ protected LinkedList<PathData> expandArguments(LinkedList<String> args) throws IOException { LinkedList<PathData> expandedArgs = new LinkedList<PathData>(); for (String arg : args) { try { expandedArgs.addAll(expandArgument(arg)); } catch (IOException e) { // other exceptions are probably nasty displayError(e); } } return expandedArgs; } /** * Expand the given argument into a list of {@link PathData} objects. * The default behavior is to expand globs. Commands may override to * perform other expansions on an argument. * @param arg string pattern to expand * @return list of {@link PathData} objects * @throws IOException if anything goes wrong... */ protected List<PathData> expandArgument(String arg) throws IOException { PathData[] items = PathData.expandAsGlob(arg, getConf()); if (items.length == 0) { // it's a glob that failed to match throw new PathNotFoundException(arg); } return Arrays.asList(items); } /** * Processes the command's list of expanded arguments. * {@link #processArgument(PathData)} will be invoked with each item * in the list. The loop catches IOExceptions, increments the error * count, and displays the exception. * @param args a list of {@link PathData} to process * @throws IOException if anything goes wrong... */ protected void processArguments(LinkedList<PathData> args) throws IOException { for (PathData arg : args) { try { processArgument(arg); } catch (IOException e) { displayError(e); } } } /** * Processes a {@link PathData} item, calling * {@link #processPathArgument(PathData)} or * {@link #processNonexistentPath(PathData)} on each item. * @param item {@link PathData} item to process * @throws IOException if anything goes wrong... */ protected void processArgument(PathData item) throws IOException { if (item.exists) { processPathArgument(item); } else { processNonexistentPath(item); } } /** * This is the last chance to modify an argument before going into the * (possibly) recursive {@link #processPaths(PathData, PathData...)} * -> {@link #processPath(PathData)} loop. Ex. ls and du use this to * expand out directories. * @param item a {@link PathData} representing a path which exists * @throws IOException if anything goes wrong... */ protected void processPathArgument(PathData item) throws IOException { // null indicates that the call is not via recursion, ie. there is // no parent directory that was expanded depth = 0; processPaths(null, item); } /** * Provides a hook for handling paths that don't exist. By default it * will throw an exception. Primarily overriden by commands that create * paths such as mkdir or touch. * @param item the {@link PathData} that doesn't exist * @throws FileNotFoundException if arg is a path and it doesn't exist * @throws IOException if anything else goes wrong... */ protected void processNonexistentPath(PathData item) throws IOException { throw new PathNotFoundException(item.toString()); } /** * Iterates over the given expanded paths and invokes * {@link #processPath(PathData)} on each element. If "recursive" is true, * will do a post-visit DFS on directories. * @param parent if called via a recurse, will be the parent dir, else null * @param items a list of {@link PathData} objects to process * @throws IOException if anything goes wrong... */ protected void processPaths(PathData parent, PathData ... items) throws IOException { // TODO: this really should be iterative for (PathData item : items) { try { processPath(item); if (recursive && isPathRecursable(item)) { recursePath(item); } postProcessPath(item); } catch (IOException e) { displayError(e); } } } /** * Determines whether a {@link PathData} item is recursable. Default * implementation is to recurse directories but can be overridden to recurse * through symbolic links. * * @param item * a {@link PathData} object * @return true if the item is recursable, false otherwise * @throws IOException * if anything goes wrong in the user-implementation */ protected boolean isPathRecursable(PathData item) throws IOException { return item.stat.isDirectory(); } /** * Hook for commands to implement an operation to be applied on each * path for the command. Note implementation of this method is optional * if earlier methods in the chain handle the operation. * @param item a {@link PathData} object * @throws RuntimeException if invoked but not implemented * @throws IOException if anything else goes wrong in the user-implementation */ protected void processPath(PathData item) throws IOException { throw new RuntimeException("processPath() is not implemented"); } /** * Hook for commands to implement an operation to be applied on each * path for the command after being processed successfully * @param item a {@link PathData} object * @throws IOException if anything goes wrong... */ protected void postProcessPath(PathData item) throws IOException { } /** * Gets the directory listing for a path and invokes * {@link #processPaths(PathData, PathData...)} * @param item {@link PathData} for directory to recurse into * @throws IOException if anything goes wrong... */ protected void recursePath(PathData item) throws IOException { try { depth++; processPaths(item, item.getDirectoryContents()); } finally { depth--; } } /** * Display an exception prefaced with the command name. Also increments * the error count for the command which will result in a non-zero exit * code. * @param e exception to display */ public void displayError(Exception e) { // build up a list of exceptions that occurred exceptions.add(e); String errorMessage = e.getLocalizedMessage(); if (errorMessage == null) { // this is an unexpected condition, so dump the whole exception since // it's probably a nasty internal error where the backtrace would be // useful errorMessage = StringUtils.stringifyException(e); LOG.debug(errorMessage); } else { errorMessage = errorMessage.split("\n", 2)[0]; } displayError(errorMessage); } /** * Display an error string prefaced with the command name. Also increments * the error count for the command which will result in a non-zero exit * code. * @param message error message to display */ public void displayError(String message) { numErrors++; displayWarning(message); } /** * Display an warning string prefaced with the command name. * @param message warning message to display */ public void displayWarning(String message) { err.println(getName() + ": " + message); } /** * The name of the command. Will first try to use the assigned name * else fallback to the command's preferred name * @return name of the command */ public String getName() { return (name == null) ? getCommandField("NAME") : name.startsWith("-") ? name.substring(1) : name; } /** * Define the name of the command. * @param name as invoked */ public void setName(String name) { this.name = name; } /** * The short usage suitable for the synopsis * @return "name options" */ public String getUsage() { String cmd = "-" + getName(); String usage = isDeprecated() ? "" : getCommandField("USAGE"); return usage.isEmpty() ? cmd : cmd + " " + usage; } /** * The long usage suitable for help output * @return text of the usage */ public String getDescription() { return isDeprecated() ? "(DEPRECATED) Same as '" + getReplacementCommand() + "'" : getCommandField("DESCRIPTION"); } /** * Is the command deprecated? * @return boolean */ public final boolean isDeprecated() { return (getReplacementCommand() != null); } /** * The replacement for a deprecated command * @return null if not deprecated, else alternative command */ public String getReplacementCommand() { return null; } /** * Get a public static class field * @param field the field to retrieve * @return String of the field */ private String getCommandField(String field) { String value; try { Field f = this.getClass().getDeclaredField(field); f.setAccessible(true); value = f.get(this).toString(); } catch (Exception e) { throw new RuntimeException( "failed to get " + this.getClass().getSimpleName()+"."+field, e); } return value; } }
16,019
31.42915
80
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Touch.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.shell; import java.io.IOException; import java.util.LinkedList; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.PathIOException; import org.apache.hadoop.fs.PathIsDirectoryException; import org.apache.hadoop.fs.PathNotFoundException; /** * Unix touch like commands */ @InterfaceAudience.Private @InterfaceStability.Unstable class Touch extends FsCommand { public static void registerCommands(CommandFactory factory) { factory.addClass(Touchz.class, "-touchz"); } /** * (Re)create zero-length file at the specified path. * This will be replaced by a more UNIX-like touch when files may be * modified. */ public static class Touchz extends Touch { public static final String NAME = "touchz"; public static final String USAGE = "<path> ..."; public static final String DESCRIPTION = "Creates a file of zero length " + "at <path> with current time as the timestamp of that <path>. " + "An error is returned if the file exists with non-zero length\n"; @Override protected void processOptions(LinkedList<String> args) { CommandFormat cf = new CommandFormat(1, Integer.MAX_VALUE); cf.parse(args); } @Override protected void processPath(PathData item) throws IOException { if (item.stat.isDirectory()) { // TODO: handle this throw new PathIsDirectoryException(item.toString()); } if (item.stat.getLen() != 0) { throw new PathIOException(item.toString(), "Not a zero-length file"); } touchz(item); } @Override protected void processNonexistentPath(PathData item) throws IOException { if (!item.parentExists()) { throw new PathNotFoundException(item.toString()); } touchz(item); } private void touchz(PathData item) throws IOException { item.fs.create(item.path).close(); } } }
2,813
32.105882
77
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/AclCommands.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.shell; import java.io.IOException; import java.util.Collections; import java.util.LinkedList; import java.util.List; import com.google.common.collect.Lists; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclEntryScope; import org.apache.hadoop.fs.permission.AclEntryType; import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.AclUtil; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.ScopedAclEntries; /** * Acl related operations */ @InterfaceAudience.Private @InterfaceStability.Evolving class AclCommands extends FsCommand { private static String GET_FACL = "getfacl"; private static String SET_FACL = "setfacl"; public static void registerCommands(CommandFactory factory) { factory.addClass(GetfaclCommand.class, "-" + GET_FACL); factory.addClass(SetfaclCommand.class, "-" + SET_FACL); } /** * Implementing the '-getfacl' command for the the FsShell. */ public static class GetfaclCommand extends FsCommand { public static String NAME = GET_FACL; public static String USAGE = "[-R] <path>"; public static String DESCRIPTION = "Displays the Access Control Lists" + " (ACLs) of files and directories. If a directory has a default ACL," + " then getfacl also displays the default ACL.\n" + " -R: List the ACLs of all files and directories recursively.\n" + " <path>: File or directory to list.\n"; @Override protected void processOptions(LinkedList<String> args) throws IOException { CommandFormat cf = new CommandFormat(0, Integer.MAX_VALUE, "R"); cf.parse(args); setRecursive(cf.getOpt("R")); if (args.isEmpty()) { throw new HadoopIllegalArgumentException("<path> is missing"); } if (args.size() > 1) { throw new HadoopIllegalArgumentException("Too many arguments"); } } @Override protected void processPath(PathData item) throws IOException { out.println("# file: " + item); out.println("# owner: " + item.stat.getOwner()); out.println("# group: " + item.stat.getGroup()); FsPermission perm = item.stat.getPermission(); if (perm.getStickyBit()) { out.println("# flags: --" + (perm.getOtherAction().implies(FsAction.EXECUTE) ? "t" : "T")); } AclStatus aclStatus = item.fs.getAclStatus(item.path); List<AclEntry> entries = perm.getAclBit() ? aclStatus.getEntries() : Collections.<AclEntry> emptyList(); ScopedAclEntries scopedEntries = new ScopedAclEntries( AclUtil.getAclFromPermAndEntries(perm, entries)); printAclEntriesForSingleScope(aclStatus, perm, scopedEntries.getAccessEntries()); printAclEntriesForSingleScope(aclStatus, perm, scopedEntries.getDefaultEntries()); out.println(); } /** * Prints all the ACL entries in a single scope. * @param aclStatus AclStatus for the path * @param fsPerm FsPermission for the path * @param entries List<AclEntry> containing ACL entries of file */ private void printAclEntriesForSingleScope(AclStatus aclStatus, FsPermission fsPerm, List<AclEntry> entries) { if (entries.isEmpty()) { return; } if (AclUtil.isMinimalAcl(entries)) { for (AclEntry entry: entries) { out.println(entry); } } else { for (AclEntry entry: entries) { printExtendedAclEntry(aclStatus, fsPerm, entry); } } } /** * Prints a single extended ACL entry. If the mask restricts the * permissions of the entry, then also prints the restricted version as the * effective permissions. The mask applies to all named entries and also * the unnamed group entry. * @param aclStatus AclStatus for the path * @param fsPerm FsPermission for the path * @param entry AclEntry extended ACL entry to print */ private void printExtendedAclEntry(AclStatus aclStatus, FsPermission fsPerm, AclEntry entry) { if (entry.getName() != null || entry.getType() == AclEntryType.GROUP) { FsAction entryPerm = entry.getPermission(); FsAction effectivePerm = aclStatus .getEffectivePermission(entry, fsPerm); if (entryPerm != effectivePerm) { out.println(String.format("%s\t#effective:%s", entry, effectivePerm.SYMBOL)); } else { out.println(entry); } } else { out.println(entry); } } } /** * Implementing the '-setfacl' command for the the FsShell. */ public static class SetfaclCommand extends FsCommand { public static String NAME = SET_FACL; public static String USAGE = "[-R] [{-b|-k} {-m|-x <acl_spec>} <path>]" + "|[--set <acl_spec> <path>]"; public static String DESCRIPTION = "Sets Access Control Lists (ACLs)" + " of files and directories.\n" + "Options:\n" + " -b :Remove all but the base ACL entries. The entries for user," + " group and others are retained for compatibility with permission " + "bits.\n" + " -k :Remove the default ACL.\n" + " -R :Apply operations to all files and directories recursively.\n" + " -m :Modify ACL. New entries are added to the ACL, and existing" + " entries are retained.\n" + " -x :Remove specified ACL entries. Other ACL entries are retained.\n" + " --set :Fully replace the ACL, discarding all existing entries." + " The <acl_spec> must include entries for user, group, and others" + " for compatibility with permission bits.\n" + " <acl_spec>: Comma separated list of ACL entries.\n" + " <path>: File or directory to modify.\n"; CommandFormat cf = new CommandFormat(0, Integer.MAX_VALUE, "b", "k", "R", "m", "x", "-set"); List<AclEntry> aclEntries = null; List<AclEntry> accessAclEntries = null; @Override protected void processOptions(LinkedList<String> args) throws IOException { cf.parse(args); setRecursive(cf.getOpt("R")); // Mix of remove and modify acl flags are not allowed boolean bothRemoveOptions = cf.getOpt("b") && cf.getOpt("k"); boolean bothModifyOptions = cf.getOpt("m") && cf.getOpt("x"); boolean oneRemoveOption = cf.getOpt("b") || cf.getOpt("k"); boolean oneModifyOption = cf.getOpt("m") || cf.getOpt("x"); boolean setOption = cf.getOpt("-set"); if ((bothRemoveOptions || bothModifyOptions) || (oneRemoveOption && oneModifyOption) || (setOption && (oneRemoveOption || oneModifyOption))) { throw new HadoopIllegalArgumentException( "Specified flags contains both remove and modify flags"); } // Only -m, -x and --set expects <acl_spec> if (oneModifyOption || setOption) { if (args.size() < 2) { throw new HadoopIllegalArgumentException("<acl_spec> is missing"); } aclEntries = AclEntry.parseAclSpec(args.removeFirst(), !cf.getOpt("x")); } if (args.isEmpty()) { throw new HadoopIllegalArgumentException("<path> is missing"); } if (args.size() > 1) { throw new HadoopIllegalArgumentException("Too many arguments"); } // In recursive mode, save a separate list of just the access ACL entries. // Only directories may have a default ACL. When a recursive operation // encounters a file under the specified path, it must pass only the // access ACL entries. if (isRecursive() && (oneModifyOption || setOption)) { accessAclEntries = Lists.newArrayList(); for (AclEntry entry: aclEntries) { if (entry.getScope() == AclEntryScope.ACCESS) { accessAclEntries.add(entry); } } } } @Override protected void processPath(PathData item) throws IOException { if (cf.getOpt("b")) { item.fs.removeAcl(item.path); } else if (cf.getOpt("k")) { item.fs.removeDefaultAcl(item.path); } else if (cf.getOpt("m")) { List<AclEntry> entries = getAclEntries(item); if (!entries.isEmpty()) { item.fs.modifyAclEntries(item.path, entries); } } else if (cf.getOpt("x")) { List<AclEntry> entries = getAclEntries(item); if (!entries.isEmpty()) { item.fs.removeAclEntries(item.path, entries); } } else if (cf.getOpt("-set")) { List<AclEntry> entries = getAclEntries(item); if (!entries.isEmpty()) { item.fs.setAcl(item.path, entries); } } } /** * Returns the ACL entries to use in the API call for the given path. For a * recursive operation, returns all specified ACL entries if the item is a * directory or just the access ACL entries if the item is a file. For a * non-recursive operation, returns all specified ACL entries. * * @param item PathData path to check * @return List<AclEntry> ACL entries to use in the API call */ private List<AclEntry> getAclEntries(PathData item) { if (isRecursive()) { return item.stat.isDirectory() ? aclEntries : accessAclEntries; } else { return aclEntries; } } } }
10,445
38.123596
81
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandUtils.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.shell; final class CommandUtils { static String formatDescription(String usage, String... desciptions) { StringBuilder b = new StringBuilder(usage + ": " + desciptions[0]); for(int i = 1; i < desciptions.length; i++) { b.append("\n\t\t" + desciptions[i]); } return b.toString(); } }
1,146
38.551724
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.shell; import java.io.IOException; import java.io.InputStream; import java.net.URI; import java.net.URISyntaxException; import java.util.EnumSet; import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.NoSuchElementException; import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FilterFileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathExistsException; import org.apache.hadoop.fs.PathIOException; import org.apache.hadoop.fs.PathIsDirectoryException; import org.apache.hadoop.fs.PathIsNotDirectoryException; import org.apache.hadoop.fs.PathNotFoundException; import org.apache.hadoop.fs.PathOperationException; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclUtil; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.io.IOUtils; import static org.apache.hadoop.fs.CreateFlag.CREATE; import static org.apache.hadoop.fs.CreateFlag.LAZY_PERSIST; /** * Provides: argument processing to ensure the destination is valid * for the number of source arguments. A processPaths that accepts both * a source and resolved target. Sources are resolved as children of * a destination directory. */ abstract class CommandWithDestination extends FsCommand { protected PathData dst; private boolean overwrite = false; private boolean verifyChecksum = true; private boolean writeChecksum = true; private boolean lazyPersist = false; /** * The name of the raw xattr namespace. It would be nice to use * XAttr.RAW.name() but we can't reference the hadoop-hdfs project. */ private static final String RAW = "raw."; /** * The name of the reserved raw directory. */ private static final String RESERVED_RAW = "/.reserved/raw"; /** * * This method is used to enable the force(-f) option while copying the files. * * @param flag true/false */ protected void setOverwrite(boolean flag) { overwrite = flag; } protected void setLazyPersist(boolean flag) { lazyPersist = flag; } protected void setVerifyChecksum(boolean flag) { verifyChecksum = flag; } protected void setWriteChecksum(boolean flag) { writeChecksum = flag; } /** * If true, the last modified time, last access time, * owner, group and permission information of the source * file will be preserved as far as target {@link FileSystem} * implementation allows. */ protected void setPreserve(boolean preserve) { if (preserve) { preserve(FileAttribute.TIMESTAMPS); preserve(FileAttribute.OWNERSHIP); preserve(FileAttribute.PERMISSION); } else { preserveStatus.clear(); } } protected static enum FileAttribute { TIMESTAMPS, OWNERSHIP, PERMISSION, ACL, XATTR; public static FileAttribute getAttribute(char symbol) { for (FileAttribute attribute : values()) { if (attribute.name().charAt(0) == Character.toUpperCase(symbol)) { return attribute; } } throw new NoSuchElementException("No attribute for " + symbol); } } private EnumSet<FileAttribute> preserveStatus = EnumSet.noneOf(FileAttribute.class); /** * Checks if the input attribute should be preserved or not * * @param attribute - Attribute to check * @return boolean true if attribute should be preserved, false otherwise */ private boolean shouldPreserve(FileAttribute attribute) { return preserveStatus.contains(attribute); } /** * Add file attributes that need to be preserved. This method may be * called multiple times to add attributes. * * @param fileAttribute - Attribute to add, one at a time */ protected void preserve(FileAttribute fileAttribute) { for (FileAttribute attribute : preserveStatus) { if (attribute.equals(fileAttribute)) { return; } } preserveStatus.add(fileAttribute); } /** * The last arg is expected to be a local path, if only one argument is * given then the destination will be the current directory * @param args is the list of arguments */ protected void getLocalDestination(LinkedList<String> args) throws IOException { String pathString = (args.size() < 2) ? Path.CUR_DIR : args.removeLast(); try { dst = new PathData(new URI(pathString), getConf()); } catch (URISyntaxException e) { if (Path.WINDOWS) { // Unlike URI, PathData knows how to parse Windows drive-letter paths. dst = new PathData(pathString, getConf()); } else { throw new IOException("unexpected URISyntaxException", e); } } } /** * The last arg is expected to be a remote path, if only one argument is * given then the destination will be the remote user's directory * @param args is the list of arguments * @throws PathIOException if path doesn't exist or matches too many times */ protected void getRemoteDestination(LinkedList<String> args) throws IOException { if (args.size() < 2) { dst = new PathData(Path.CUR_DIR, getConf()); } else { String pathString = args.removeLast(); // if the path is a glob, then it must match one and only one path PathData[] items = PathData.expandAsGlob(pathString, getConf()); switch (items.length) { case 0: throw new PathNotFoundException(pathString); case 1: dst = items[0]; break; default: throw new PathIOException(pathString, "Too many matches"); } } } @Override protected void processArguments(LinkedList<PathData> args) throws IOException { // if more than one arg, the destination must be a directory // if one arg, the dst must not exist or must be a directory if (args.size() > 1) { if (!dst.exists) { throw new PathNotFoundException(dst.toString()); } if (!dst.stat.isDirectory()) { throw new PathIsNotDirectoryException(dst.toString()); } } else if (dst.exists) { if (!dst.stat.isDirectory() && !overwrite) { throw new PathExistsException(dst.toString()); } } else if (!dst.parentExists()) { throw new PathNotFoundException(dst.toString()); } super.processArguments(args); } @Override protected void processPathArgument(PathData src) throws IOException { if (src.stat.isDirectory() && src.fs.equals(dst.fs)) { PathData target = getTargetPath(src); String srcPath = src.fs.makeQualified(src.path).toString(); String dstPath = dst.fs.makeQualified(target.path).toString(); if (dstPath.equals(srcPath)) { PathIOException e = new PathIOException(src.toString(), "are identical"); e.setTargetPath(dstPath.toString()); throw e; } if (dstPath.startsWith(srcPath+Path.SEPARATOR)) { PathIOException e = new PathIOException(src.toString(), "is a subdirectory of itself"); e.setTargetPath(target.toString()); throw e; } } super.processPathArgument(src); } @Override protected void processPath(PathData src) throws IOException { processPath(src, getTargetPath(src)); } /** * Called with a source and target destination pair * @param src for the operation * @param dst for the operation * @throws IOException if anything goes wrong */ protected void processPath(PathData src, PathData dst) throws IOException { if (src.stat.isSymlink()) { // TODO: remove when FileContext is supported, this needs to either // copy the symlink or deref the symlink throw new PathOperationException(src.toString()); } else if (src.stat.isFile()) { copyFileToTarget(src, dst); } else if (src.stat.isDirectory() && !isRecursive()) { throw new PathIsDirectoryException(src.toString()); } } @Override protected void recursePath(PathData src) throws IOException { PathData savedDst = dst; try { // modify dst as we descend to append the basename of the // current directory being processed dst = getTargetPath(src); final boolean preserveRawXattrs = checkPathsForReservedRaw(src.path, dst.path); if (dst.exists) { if (!dst.stat.isDirectory()) { throw new PathIsNotDirectoryException(dst.toString()); } } else { if (!dst.fs.mkdirs(dst.path)) { // too bad we have no clue what failed PathIOException e = new PathIOException(dst.toString()); e.setOperation("mkdir"); throw e; } dst.refreshStatus(); // need to update stat to know it exists now } super.recursePath(src); if (dst.stat.isDirectory()) { preserveAttributes(src, dst, preserveRawXattrs); } } finally { dst = savedDst; } } protected PathData getTargetPath(PathData src) throws IOException { PathData target; // on the first loop, the dst may be directory or a file, so only create // a child path if dst is a dir; after recursion, it's always a dir if ((getDepth() > 0) || (dst.exists && dst.stat.isDirectory())) { target = dst.getPathDataForChild(src); } else if (dst.representsDirectory()) { // see if path looks like a dir target = dst.getPathDataForChild(src); } else { target = dst; } return target; } /** * Copies the source file to the target. * @param src item to copy * @param target where to copy the item * @throws IOException if copy fails */ protected void copyFileToTarget(PathData src, PathData target) throws IOException { final boolean preserveRawXattrs = checkPathsForReservedRaw(src.path, target.path); src.fs.setVerifyChecksum(verifyChecksum); InputStream in = null; try { in = src.fs.open(src.path); copyStreamToTarget(in, target); preserveAttributes(src, target, preserveRawXattrs); } finally { IOUtils.closeStream(in); } } /** * Check the source and target paths to ensure that they are either both in * /.reserved/raw or neither in /.reserved/raw. If neither src nor target are * in /.reserved/raw, then return false, indicating not to preserve raw.* * xattrs. If both src/target are in /.reserved/raw, then return true, * indicating raw.* xattrs should be preserved. If only one of src/target is * in /.reserved/raw then throw an exception. * * @param src The source path to check. This should be a fully-qualified * path, not relative. * @param target The target path to check. This should be a fully-qualified * path, not relative. * @return true if raw.* xattrs should be preserved. * @throws PathOperationException is only one of src/target are in * /.reserved/raw. */ private boolean checkPathsForReservedRaw(Path src, Path target) throws PathOperationException { final boolean srcIsRR = Path.getPathWithoutSchemeAndAuthority(src). toString().startsWith(RESERVED_RAW); final boolean dstIsRR = Path.getPathWithoutSchemeAndAuthority(target). toString().startsWith(RESERVED_RAW); boolean preserveRawXattrs = false; if (srcIsRR && !dstIsRR) { final String s = "' copy from '" + RESERVED_RAW + "' to non '" + RESERVED_RAW + "'. Either both source and target must be in '" + RESERVED_RAW + "' or neither."; throw new PathOperationException("'" + src.toString() + s); } else if (!srcIsRR && dstIsRR) { final String s = "' copy from non '" + RESERVED_RAW +"' to '" + RESERVED_RAW + "'. Either both source and target must be in '" + RESERVED_RAW + "' or neither."; throw new PathOperationException("'" + dst.toString() + s); } else if (srcIsRR && dstIsRR) { preserveRawXattrs = true; } return preserveRawXattrs; } /** * Copies the stream contents to a temporary file. If the copy is * successful, the temporary file will be renamed to the real path, * else the temporary file will be deleted. * @param in the input stream for the copy * @param target where to store the contents of the stream * @throws IOException if copy fails */ protected void copyStreamToTarget(InputStream in, PathData target) throws IOException { if (target.exists && (target.stat.isDirectory() || !overwrite)) { throw new PathExistsException(target.toString()); } TargetFileSystem targetFs = new TargetFileSystem(target.fs); try { PathData tempTarget = target.suffix("._COPYING_"); targetFs.setWriteChecksum(writeChecksum); targetFs.writeStreamToFile(in, tempTarget, lazyPersist); targetFs.rename(tempTarget, target); } finally { targetFs.close(); // last ditch effort to ensure temp file is removed } } /** * Preserve the attributes of the source to the target. * The method calls {@link #shouldPreserve(FileAttribute)} to check what * attribute to preserve. * @param src source to preserve * @param target where to preserve attributes * @param preserveRawXAttrs true if raw.* xattrs should be preserved * @throws IOException if fails to preserve attributes */ protected void preserveAttributes(PathData src, PathData target, boolean preserveRawXAttrs) throws IOException { if (shouldPreserve(FileAttribute.TIMESTAMPS)) { target.fs.setTimes( target.path, src.stat.getModificationTime(), src.stat.getAccessTime()); } if (shouldPreserve(FileAttribute.OWNERSHIP)) { target.fs.setOwner( target.path, src.stat.getOwner(), src.stat.getGroup()); } if (shouldPreserve(FileAttribute.PERMISSION) || shouldPreserve(FileAttribute.ACL)) { target.fs.setPermission( target.path, src.stat.getPermission()); } if (shouldPreserve(FileAttribute.ACL)) { FsPermission perm = src.stat.getPermission(); if (perm.getAclBit()) { List<AclEntry> srcEntries = src.fs.getAclStatus(src.path).getEntries(); List<AclEntry> srcFullEntries = AclUtil.getAclFromPermAndEntries(perm, srcEntries); target.fs.setAcl(target.path, srcFullEntries); } } final boolean preserveXAttrs = shouldPreserve(FileAttribute.XATTR); if (preserveXAttrs || preserveRawXAttrs) { Map<String, byte[]> srcXAttrs = src.fs.getXAttrs(src.path); if (srcXAttrs != null) { Iterator<Entry<String, byte[]>> iter = srcXAttrs.entrySet().iterator(); while (iter.hasNext()) { Entry<String, byte[]> entry = iter.next(); final String xattrName = entry.getKey(); if (xattrName.startsWith(RAW) || preserveXAttrs) { target.fs.setXAttr(target.path, entry.getKey(), entry.getValue()); } } } } } // Helper filter filesystem that registers created files as temp files to // be deleted on exit unless successfully renamed private static class TargetFileSystem extends FilterFileSystem { TargetFileSystem(FileSystem fs) { super(fs); } void writeStreamToFile(InputStream in, PathData target, boolean lazyPersist) throws IOException { FSDataOutputStream out = null; try { out = create(target, lazyPersist); IOUtils.copyBytes(in, out, getConf(), true); } finally { IOUtils.closeStream(out); // just in case copyBytes didn't } } // tag created files as temp files FSDataOutputStream create(PathData item, boolean lazyPersist) throws IOException { try { if (lazyPersist) { EnumSet<CreateFlag> createFlags = EnumSet.of(CREATE, LAZY_PERSIST); return create(item.path, FsPermission.getFileDefault().applyUMask( FsPermission.getUMask(getConf())), createFlags, getConf().getInt("io.file.buffer.size", 4096), lazyPersist ? 1 : getDefaultReplication(item.path), getDefaultBlockSize(), null, null); } else { return create(item.path, true); } } finally { // might have been created but stream was interrupted deleteOnExit(item.path); } } void rename(PathData src, PathData target) throws IOException { // the rename method with an option to delete the target is deprecated if (target.exists && !delete(target.path, false)) { // too bad we don't know why it failed PathIOException e = new PathIOException(target.toString()); e.setOperation("delete"); throw e; } if (!rename(src.path, target.path)) { // too bad we don't know why it failed PathIOException e = new PathIOException(src.toString()); e.setOperation("rename"); e.setTargetPath(target.toString()); throw e; } // cancel delete on exit if rename is successful cancelDeleteOnExit(src.path); } @Override public void close() { // purge any remaining temp files, but don't close underlying fs processDeleteOnExit(); } } }
18,400
34.386538
81
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Display.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.shell; import java.io.ByteArrayOutputStream; import java.io.EOFException; import java.io.File; import java.io.IOException; import java.io.InputStream; import java.util.LinkedList; import java.util.zip.GZIPInputStream; import org.apache.avro.Schema; import org.apache.avro.file.DataFileReader; import org.apache.avro.file.FileReader; import org.apache.avro.generic.GenericDatumReader; import org.apache.avro.generic.GenericDatumWriter; import org.apache.avro.io.DatumWriter; import org.apache.avro.io.EncoderFactory; import org.apache.avro.io.JsonEncoder; import org.apache.commons.io.Charsets; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.AvroFSInput; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileChecksum; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathIsDirectoryException; import org.apache.hadoop.io.DataInputBuffer; import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.SequenceFile; import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.WritableComparable; import org.apache.hadoop.io.compress.CompressionCodec; import org.apache.hadoop.io.compress.CompressionCodecFactory; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.StringUtils; import org.codehaus.jackson.JsonEncoding; import org.codehaus.jackson.JsonFactory; import org.codehaus.jackson.JsonGenerator; import org.codehaus.jackson.util.MinimalPrettyPrinter; /** * Display contents or checksums of files */ @InterfaceAudience.Private @InterfaceStability.Evolving class Display extends FsCommand { public static void registerCommands(CommandFactory factory) { factory.addClass(Cat.class, "-cat"); factory.addClass(Text.class, "-text"); factory.addClass(Checksum.class, "-checksum"); } /** * Displays file content to stdout */ public static class Cat extends Display { public static final String NAME = "cat"; public static final String USAGE = "[-ignoreCrc] <src> ..."; public static final String DESCRIPTION = "Fetch all files that match the file pattern <src> " + "and display their content on stdout.\n"; private boolean verifyChecksum = true; @Override protected void processOptions(LinkedList<String> args) throws IOException { CommandFormat cf = new CommandFormat(1, Integer.MAX_VALUE, "ignoreCrc"); cf.parse(args); verifyChecksum = !cf.getOpt("ignoreCrc"); } @Override protected void processPath(PathData item) throws IOException { if (item.stat.isDirectory()) { throw new PathIsDirectoryException(item.toString()); } item.fs.setVerifyChecksum(verifyChecksum); printToStdout(getInputStream(item)); } private void printToStdout(InputStream in) throws IOException { try { IOUtils.copyBytes(in, out, getConf(), false); } finally { in.close(); } } protected InputStream getInputStream(PathData item) throws IOException { return item.fs.open(item.path); } } /** * Same behavior as "-cat", but handles zip and TextRecordInputStream * and Avro encodings. */ public static class Text extends Cat { public static final String NAME = "text"; public static final String USAGE = Cat.USAGE; public static final String DESCRIPTION = "Takes a source file and outputs the file in text format.\n" + "The allowed formats are zip and TextRecordInputStream and Avro."; @Override protected InputStream getInputStream(PathData item) throws IOException { FSDataInputStream i = (FSDataInputStream)super.getInputStream(item); // Handle 0 and 1-byte files short leadBytes; try { leadBytes = i.readShort(); } catch (EOFException e) { i.seek(0); return i; } // Check type of stream first switch(leadBytes) { case 0x1f8b: { // RFC 1952 // Must be gzip i.seek(0); return new GZIPInputStream(i); } case 0x5345: { // 'S' 'E' // Might be a SequenceFile if (i.readByte() == 'Q') { i.close(); return new TextRecordInputStream(item.stat); } } default: { // Check the type of compression instead, depending on Codec class's // own detection methods, based on the provided path. CompressionCodecFactory cf = new CompressionCodecFactory(getConf()); CompressionCodec codec = cf.getCodec(item.path); if (codec != null) { i.seek(0); return codec.createInputStream(i); } break; } case 0x4f62: { // 'O' 'b' if (i.readByte() == 'j') { i.close(); return new AvroFileInputStream(item.stat); } break; } } // File is non-compressed, or not a file container we know. i.seek(0); return i; } } public static class Checksum extends Display { public static final String NAME = "checksum"; public static final String USAGE = "<src> ..."; public static final String DESCRIPTION = "Dump checksum information for files that match the file " + "pattern <src> to stdout. Note that this requires a round-trip " + "to a datanode storing each block of the file, and thus is not " + "efficient to run on a large number of files. The checksum of a " + "file depends on its content, block size and the checksum " + "algorithm and parameters used for creating the file."; @Override protected void processPath(PathData item) throws IOException { if (item.stat.isDirectory()) { throw new PathIsDirectoryException(item.toString()); } FileChecksum checksum = item.fs.getFileChecksum(item.path); if (checksum == null) { out.printf("%s\tNONE\t%n", item.toString()); } else { String checksumString = StringUtils.byteToHexString( checksum.getBytes(), 0, checksum.getLength()); out.printf("%s\t%s\t%s%n", item.toString(), checksum.getAlgorithmName(), checksumString); } } } protected class TextRecordInputStream extends InputStream { SequenceFile.Reader r; WritableComparable<?> key; Writable val; DataInputBuffer inbuf; DataOutputBuffer outbuf; public TextRecordInputStream(FileStatus f) throws IOException { final Path fpath = f.getPath(); final Configuration lconf = getConf(); r = new SequenceFile.Reader(lconf, SequenceFile.Reader.file(fpath)); key = ReflectionUtils.newInstance( r.getKeyClass().asSubclass(WritableComparable.class), lconf); val = ReflectionUtils.newInstance( r.getValueClass().asSubclass(Writable.class), lconf); inbuf = new DataInputBuffer(); outbuf = new DataOutputBuffer(); } @Override public int read() throws IOException { int ret; if (null == inbuf || -1 == (ret = inbuf.read())) { if (!r.next(key, val)) { return -1; } byte[] tmp = key.toString().getBytes(Charsets.UTF_8); outbuf.write(tmp, 0, tmp.length); outbuf.write('\t'); tmp = val.toString().getBytes(Charsets.UTF_8); outbuf.write(tmp, 0, tmp.length); outbuf.write('\n'); inbuf.reset(outbuf.getData(), outbuf.getLength()); outbuf.reset(); ret = inbuf.read(); } return ret; } @Override public void close() throws IOException { r.close(); super.close(); } } /** * This class transforms a binary Avro data file into an InputStream * with data that is in a human readable JSON format. */ protected static class AvroFileInputStream extends InputStream { private int pos; private byte[] buffer; private ByteArrayOutputStream output; private FileReader<?> fileReader; private DatumWriter<Object> writer; private JsonEncoder encoder; public AvroFileInputStream(FileStatus status) throws IOException { pos = 0; buffer = new byte[0]; GenericDatumReader<Object> reader = new GenericDatumReader<Object>(); FileContext fc = FileContext.getFileContext(new Configuration()); fileReader = DataFileReader.openReader(new AvroFSInput(fc, status.getPath()),reader); Schema schema = fileReader.getSchema(); writer = new GenericDatumWriter<Object>(schema); output = new ByteArrayOutputStream(); JsonGenerator generator = new JsonFactory().createJsonGenerator(output, JsonEncoding.UTF8); MinimalPrettyPrinter prettyPrinter = new MinimalPrettyPrinter(); prettyPrinter.setRootValueSeparator(System.getProperty("line.separator")); generator.setPrettyPrinter(prettyPrinter); encoder = EncoderFactory.get().jsonEncoder(schema, generator); } /** * Read a single byte from the stream. */ @Override public int read() throws IOException { if (pos < buffer.length) { return buffer[pos++]; } if (!fileReader.hasNext()) { return -1; } writer.write(fileReader.next(), encoder); encoder.flush(); if (!fileReader.hasNext()) { // Write a new line after the last Avro record. output.write(System.getProperty("line.separator") .getBytes(Charsets.UTF_8)); output.flush(); } pos = 0; buffer = output.toByteArray(); output.reset(); return read(); } /** * Close the stream. */ @Override public void close() throws IOException { fileReader.close(); output.close(); super.close(); } } }
10,895
32.423313
80
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/PathData.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.shell; import java.io.File; import java.io.FileNotFoundException; import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; import java.util.Arrays; import java.util.regex.Pattern; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.LocalFileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathIOException; import org.apache.hadoop.fs.PathIsDirectoryException; import org.apache.hadoop.fs.PathIsNotDirectoryException; import org.apache.hadoop.fs.PathNotFoundException; /** * Encapsulates a Path (path), its FileStatus (stat), and its FileSystem (fs). * PathData ensures that the returned path string will be the same as the * one passed in during initialization (unlike Path objects which can * modify the path string). * The stat field will be null if the path does not exist. */ @InterfaceAudience.Private @InterfaceStability.Unstable public class PathData implements Comparable<PathData> { protected final URI uri; public final FileSystem fs; public final Path path; public FileStatus stat; public boolean exists; /* True if the URI scheme was not present in the pathString but inferred. */ private boolean inferredSchemeFromPath = false; /** * Pre-compiled regular expressions to detect path formats. */ private static final Pattern potentialUri = Pattern.compile("^[a-zA-Z][a-zA-Z0-9+-.]+:"); private static final Pattern windowsNonUriAbsolutePath1 = Pattern.compile("^/?[a-zA-Z]:\\\\"); private static final Pattern windowsNonUriAbsolutePath2 = Pattern.compile("^/?[a-zA-Z]:/"); /** * Creates an object to wrap the given parameters as fields. The string * used to create the path will be recorded since the Path object does not * return exactly the same string used to initialize it * @param pathString a string for a path * @param conf the configuration file * @throws IOException if anything goes wrong... */ public PathData(String pathString, Configuration conf) throws IOException { this(FileSystem.get(stringToUri(pathString), conf), pathString); } /** * Creates an object to wrap the given parameters as fields. The string * used to create the path will be recorded since the Path object does not * return exactly the same string used to initialize it * @param localPath a local URI * @param conf the configuration file * @throws IOException if anything goes wrong... */ public PathData(URI localPath, Configuration conf) throws IOException { this(FileSystem.getLocal(conf), localPath.getPath()); } /** * Looks up the file status for a path. If the path * doesn't exist, then the status will be null * @param fs the FileSystem for the path * @param pathString a string for a path * @throws IOException if anything goes wrong */ private PathData(FileSystem fs, String pathString) throws IOException { this(fs, pathString, lookupStat(fs, pathString, true)); } /** * Validates the given Windows path. * @param pathString a String of the path suppliued by the user. * @return true if the URI scheme was not present in the pathString but * inferred; false, otherwise. * @throws IOException if anything goes wrong */ private static boolean checkIfSchemeInferredFromPath(String pathString) throws IOException { if (windowsNonUriAbsolutePath1.matcher(pathString).find()) { // Forward slashes disallowed in a backslash-separated path. if (pathString.indexOf('/') != -1) { throw new IOException("Invalid path string " + pathString); } return true; } // Is it a forward slash-separated absolute path? if (windowsNonUriAbsolutePath2.matcher(pathString).find()) { return true; } // Does it look like a URI? If so then just leave it alone. if (potentialUri.matcher(pathString).find()) { return false; } // Looks like a relative path on Windows. return false; } /** * Creates an object to wrap the given parameters as fields. The string * used to create the path will be recorded since the Path object does not * return exactly the same string used to initialize it. * @param fs the FileSystem * @param pathString a String of the path * @param stat the FileStatus (may be null if the path doesn't exist) */ private PathData(FileSystem fs, String pathString, FileStatus stat) throws IOException { this.fs = fs; this.uri = stringToUri(pathString); this.path = fs.makeQualified(new Path(uri)); setStat(stat); if (Path.WINDOWS) { inferredSchemeFromPath = checkIfSchemeInferredFromPath(pathString); } } // need a static method for the ctor above /** * Get the FileStatus info * @param ignoreFNF if true, stat will be null if the path doesn't exist * @return FileStatus for the given path * @throws IOException if anything goes wrong */ private static FileStatus lookupStat(FileSystem fs, String pathString, boolean ignoreFNF) throws IOException { FileStatus status = null; try { status = fs.getFileStatus(new Path(pathString)); } catch (FileNotFoundException e) { if (!ignoreFNF) throw new PathNotFoundException(pathString); } // TODO: should consider wrapping other exceptions into Path*Exceptions return status; } private void setStat(FileStatus stat) { this.stat = stat; exists = (stat != null); } /** * Updates the paths's file status * @return the updated FileStatus * @throws IOException if anything goes wrong... */ public FileStatus refreshStatus() throws IOException { FileStatus status = null; try { status = lookupStat(fs, toString(), false); } finally { // always set the status. the caller must get the correct result // if it catches the exception and later interrogates the status setStat(status); } return status; } protected enum FileTypeRequirement { SHOULD_NOT_BE_DIRECTORY, SHOULD_BE_DIRECTORY }; /** * Ensure that the file exists and if it is or is not a directory * @param typeRequirement Set it to the desired requirement. * @throws PathIOException if file doesn't exist or the type does not match * what was specified in typeRequirement. */ private void checkIfExists(FileTypeRequirement typeRequirement) throws PathIOException { if (!exists) { throw new PathNotFoundException(toString()); } if ((typeRequirement == FileTypeRequirement.SHOULD_BE_DIRECTORY) && !stat.isDirectory()) { throw new PathIsNotDirectoryException(toString()); } else if ((typeRequirement == FileTypeRequirement.SHOULD_NOT_BE_DIRECTORY) && stat.isDirectory()) { throw new PathIsDirectoryException(toString()); } } /** * Returns a new PathData with the given extension. * @param extension for the suffix * @return PathData * @throws IOException shouldn't happen */ public PathData suffix(String extension) throws IOException { return new PathData(fs, this+extension); } /** * Test if the parent directory exists * @return boolean indicating parent exists * @throws IOException upon unexpected error */ public boolean parentExists() throws IOException { return representsDirectory() ? fs.exists(path) : fs.exists(path.getParent()); } /** * Check if the path represents a directory as determined by the basename * being "." or "..", or the path ending with a directory separator * @return boolean if this represents a directory */ public boolean representsDirectory() { String uriPath = uri.getPath(); String name = uriPath.substring(uriPath.lastIndexOf("/")+1); // Path will munch off the chars that indicate a dir, so there's no way // to perform this test except by examining the raw basename we maintain return (name.isEmpty() || name.equals(".") || name.equals("..")); } /** * Returns a list of PathData objects of the items contained in the given * directory. * @return list of PathData objects for its children * @throws IOException if anything else goes wrong... */ public PathData[] getDirectoryContents() throws IOException { checkIfExists(FileTypeRequirement.SHOULD_BE_DIRECTORY); FileStatus[] stats = fs.listStatus(path); PathData[] items = new PathData[stats.length]; for (int i=0; i < stats.length; i++) { // preserve relative paths String child = getStringForChildPath(stats[i].getPath()); items[i] = new PathData(fs, child, stats[i]); } Arrays.sort(items); return items; } /** * Creates a new object for a child entry in this directory * @param child the basename will be appended to this object's path * @return PathData for the child * @throws IOException if this object does not exist or is not a directory */ public PathData getPathDataForChild(PathData child) throws IOException { checkIfExists(FileTypeRequirement.SHOULD_BE_DIRECTORY); return new PathData(fs, getStringForChildPath(child.path)); } /** * Given a child of this directory, use the directory's path and the child's * basename to construct the string to the child. This preserves relative * paths since Path will fully qualify. * @param childPath a path contained within this directory * @return String of the path relative to this directory */ private String getStringForChildPath(Path childPath) { String basename = childPath.getName(); if (Path.CUR_DIR.equals(toString())) { return basename; } // check getPath() so scheme slashes aren't considered part of the path String separator = uri.getPath().endsWith(Path.SEPARATOR) ? "" : Path.SEPARATOR; return uriToString(uri, inferredSchemeFromPath) + separator + basename; } protected enum PathType { HAS_SCHEME, SCHEMELESS_ABSOLUTE, RELATIVE }; /** * Expand the given path as a glob pattern. Non-existent paths do not * throw an exception because creation commands like touch and mkdir need * to create them. The "stat" field will be null if the path does not * exist. * @param pattern the pattern to expand as a glob * @param conf the hadoop configuration * @return list of {@link PathData} objects. if the pattern is not a glob, * and does not exist, the list will contain a single PathData with a null * stat * @throws IOException anything else goes wrong... */ public static PathData[] expandAsGlob(String pattern, Configuration conf) throws IOException { Path globPath = new Path(pattern); FileSystem fs = globPath.getFileSystem(conf); FileStatus[] stats = fs.globStatus(globPath); PathData[] items = null; if (stats == null) { // remove any quoting in the glob pattern pattern = pattern.replaceAll("\\\\(.)", "$1"); // not a glob & file not found, so add the path with a null stat items = new PathData[]{ new PathData(fs, pattern, null) }; } else { // figure out what type of glob path was given, will convert globbed // paths to match the type to preserve relativity PathType globType; URI globUri = globPath.toUri(); if (globUri.getScheme() != null) { globType = PathType.HAS_SCHEME; } else if (!globUri.getPath().isEmpty() && new Path(globUri.getPath()).isAbsolute()) { globType = PathType.SCHEMELESS_ABSOLUTE; } else { globType = PathType.RELATIVE; } // convert stats to PathData items = new PathData[stats.length]; int i=0; for (FileStatus stat : stats) { URI matchUri = stat.getPath().toUri(); String globMatch = null; switch (globType) { case HAS_SCHEME: // use as-is, but remove authority if necessary if (globUri.getAuthority() == null) { matchUri = removeAuthority(matchUri); } globMatch = uriToString(matchUri, false); break; case SCHEMELESS_ABSOLUTE: // take just the uri's path globMatch = matchUri.getPath(); break; case RELATIVE: // make it relative to the current working dir URI cwdUri = fs.getWorkingDirectory().toUri(); globMatch = relativize(cwdUri, matchUri, stat.isDirectory()); break; } items[i++] = new PathData(fs, globMatch, stat); } } Arrays.sort(items); return items; } private static URI removeAuthority(URI uri) { try { uri = new URI( uri.getScheme(), "", uri.getPath(), uri.getQuery(), uri.getFragment() ); } catch (URISyntaxException e) { throw new IllegalArgumentException(e.getLocalizedMessage()); } return uri; } private static String relativize(URI cwdUri, URI srcUri, boolean isDir) { String uriPath = srcUri.getPath(); String cwdPath = cwdUri.getPath(); if (cwdPath.equals(uriPath)) { return Path.CUR_DIR; } // find common ancestor int lastSep = findLongestDirPrefix(cwdPath, uriPath, isDir); StringBuilder relPath = new StringBuilder(); // take the remaining path fragment after the ancestor if (lastSep < uriPath.length()) { relPath.append(uriPath.substring(lastSep+1)); } // if cwd has a path fragment after the ancestor, convert them to ".." if (lastSep < cwdPath.length()) { while (lastSep != -1) { if (relPath.length() != 0) relPath.insert(0, Path.SEPARATOR); relPath.insert(0, ".."); lastSep = cwdPath.indexOf(Path.SEPARATOR, lastSep+1); } } return relPath.toString(); } private static int findLongestDirPrefix(String cwd, String path, boolean isDir) { // add the path separator to dirs to simplify finding the longest match if (!cwd.endsWith(Path.SEPARATOR)) { cwd += Path.SEPARATOR; } if (isDir && !path.endsWith(Path.SEPARATOR)) { path += Path.SEPARATOR; } // find longest directory prefix int len = Math.min(cwd.length(), path.length()); int lastSep = -1; for (int i=0; i < len; i++) { if (cwd.charAt(i) != path.charAt(i)) break; if (cwd.charAt(i) == Path.SEPARATOR_CHAR) lastSep = i; } return lastSep; } /** * Returns the printable version of the path that is either the path * as given on the commandline, or the full path * @return String of the path */ @Override public String toString() { return uriToString(uri, inferredSchemeFromPath); } private static String uriToString(URI uri, boolean inferredSchemeFromPath) { String scheme = uri.getScheme(); // No interpretation of symbols. Just decode % escaped chars. String decodedRemainder = uri.getSchemeSpecificPart(); // Drop the scheme if it was inferred to ensure fidelity between // the input and output path strings. if ((scheme == null) || (inferredSchemeFromPath)) { if (Path.isWindowsAbsolutePath(decodedRemainder, true)) { // Strip the leading '/' added in stringToUri so users see a valid // Windows path. decodedRemainder = decodedRemainder.substring(1); } return decodedRemainder; } else { StringBuilder buffer = new StringBuilder(); buffer.append(scheme); buffer.append(":"); buffer.append(decodedRemainder); return buffer.toString(); } } /** * Get the path to a local file * @return File representing the local path * @throws IllegalArgumentException if this.fs is not the LocalFileSystem */ public File toFile() { if (!(fs instanceof LocalFileSystem)) { throw new IllegalArgumentException("Not a local path: " + path); } return ((LocalFileSystem)fs).pathToFile(path); } /** Normalize the given Windows path string. This does the following: * 1. Adds "file:" scheme for absolute paths. * 2. Ensures the scheme-specific part starts with '/' per RFC2396. * 3. Replaces backslash path separators with forward slashes. * @param pathString Path string supplied by the user. * @return normalized absolute path string. Returns the input string * if it is not a Windows absolute path. */ private static String normalizeWindowsPath(String pathString) throws IOException { if (!Path.WINDOWS) { return pathString; } boolean slashed = ((pathString.length() >= 1) && (pathString.charAt(0) == '/')); // Is it a backslash-separated absolute path? if (windowsNonUriAbsolutePath1.matcher(pathString).find()) { // Forward slashes disallowed in a backslash-separated path. if (pathString.indexOf('/') != -1) { throw new IOException("Invalid path string " + pathString); } pathString = pathString.replace('\\', '/'); return "file:" + (slashed ? "" : "/") + pathString; } // Is it a forward slash-separated absolute path? if (windowsNonUriAbsolutePath2.matcher(pathString).find()) { return "file:" + (slashed ? "" : "/") + pathString; } // Is it a backslash-separated relative file path (no scheme and // no drive-letter specifier)? if ((pathString.indexOf(':') == -1) && (pathString.indexOf('\\') != -1)) { pathString = pathString.replace('\\', '/'); } return pathString; } /** Construct a URI from a String with unescaped special characters * that have non-standard semantics. e.g. /, ?, #. A custom parsing * is needed to prevent misbehavior. * @param pathString The input path in string form * @return URI */ private static URI stringToUri(String pathString) throws IOException { // We can't use 'new URI(String)' directly. Since it doesn't do quoting // internally, the internal parser may fail or break the string at wrong // places. Use of multi-argument ctors will quote those chars for us, // but we need to do our own parsing and assembly. // parse uri components String scheme = null; String authority = null; int start = 0; pathString = normalizeWindowsPath(pathString); // parse uri scheme, if any int colon = pathString.indexOf(':'); int slash = pathString.indexOf('/'); if (colon > 0 && (slash == colon +1)) { // has a non zero-length scheme scheme = pathString.substring(0, colon); start = colon + 1; } // parse uri authority, if any if (pathString.startsWith("//", start) && (pathString.length()-start > 2)) { start += 2; int nextSlash = pathString.indexOf('/', start); int authEnd = nextSlash > 0 ? nextSlash : pathString.length(); authority = pathString.substring(start, authEnd); start = authEnd; } // uri path is the rest of the string. ? or # are not interpreted, // but any occurrence of them will be quoted by the URI ctor. String path = pathString.substring(start, pathString.length()); // Construct the URI try { return new URI(scheme, authority, path, null, null); } catch (URISyntaxException e) { throw new IllegalArgumentException(e); } } @Override public int compareTo(PathData o) { return path.compareTo(o.path); } @Override public boolean equals(Object o) { return (o != null) && (o instanceof PathData) && path.equals(((PathData)o).path); } @Override public int hashCode() { return path.hashCode(); } }
20,619
34.068027
83
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Count.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.shell; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.LinkedList; import java.util.List; import org.apache.commons.lang.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.FsShell; import org.apache.hadoop.fs.StorageType; /** * Count the number of directories, files, bytes, quota, and remaining quota. */ @InterfaceAudience.Private @InterfaceStability.Evolving public class Count extends FsCommand { /** * Register the names for the count command * @param factory the command factory that will instantiate this class */ public static void registerCommands(CommandFactory factory) { factory.addClass(Count.class, "-count"); } private static final String OPTION_QUOTA = "q"; private static final String OPTION_HUMAN = "h"; private static final String OPTION_HEADER = "v"; private static final String OPTION_TYPE = "t"; public static final String NAME = "count"; public static final String USAGE = "[-" + OPTION_QUOTA + "] [-" + OPTION_HUMAN + "] [-" + OPTION_HEADER + "] [-" + OPTION_TYPE + " [<storage type>]] <path> ..."; public static final String DESCRIPTION = "Count the number of directories, files and bytes under the paths\n" + "that match the specified file pattern. The output columns are:\n" + StringUtils.join(ContentSummary.getHeaderFields(), ' ') + " PATHNAME\n" + "or, with the -" + OPTION_QUOTA + " option:\n" + StringUtils.join(ContentSummary.getQuotaHeaderFields(), ' ') + "\n" + " " + StringUtils.join(ContentSummary.getHeaderFields(), ' ') + " PATHNAME\n" + "The -" + OPTION_HUMAN + " option shows file sizes in human readable format.\n" + "The -" + OPTION_HEADER + " option displays a header line.\n" + "The -" + OPTION_TYPE + " option displays quota by storage types.\n" + "It must be used with -" + OPTION_QUOTA + " option.\n" + "If a comma-separated list of storage types is given after the -" + OPTION_TYPE + " option, \n" + "it displays the quota and usage for the specified types. \n" + "Otherwise, it displays the quota and usage for all the storage \n" + "types that support quota"; private boolean showQuotas; private boolean humanReadable; private boolean showQuotabyType; private List<StorageType> storageTypes = null; /** Constructor */ public Count() {} /** Constructor * @deprecated invoke via {@link FsShell} * @param cmd the count command * @param pos the starting index of the arguments * @param conf configuration */ @Deprecated public Count(String[] cmd, int pos, Configuration conf) { super(conf); this.args = Arrays.copyOfRange(cmd, pos, cmd.length); } @Override protected void processOptions(LinkedList<String> args) { CommandFormat cf = new CommandFormat(1, Integer.MAX_VALUE, OPTION_QUOTA, OPTION_HUMAN, OPTION_HEADER); cf.addOptionWithValue(OPTION_TYPE); cf.parse(args); if (args.isEmpty()) { // default path is the current working directory args.add("."); } showQuotas = cf.getOpt(OPTION_QUOTA); humanReadable = cf.getOpt(OPTION_HUMAN); if (showQuotas) { String types = cf.getOptValue(OPTION_TYPE); if (null != types) { showQuotabyType = true; storageTypes = getAndCheckStorageTypes(types); } else { showQuotabyType = false; } } if (cf.getOpt(OPTION_HEADER)) { if (showQuotabyType) { out.println(ContentSummary.getStorageTypeHeader(storageTypes) + "PATHNAME"); } else { out.println(ContentSummary.getHeader(showQuotas) + "PATHNAME"); } } } private List<StorageType> getAndCheckStorageTypes(String types) { if ("".equals(types) || "all".equalsIgnoreCase(types)) { return StorageType.getTypesSupportingQuota(); } String[] typeArray = StringUtils.split(types, ','); List<StorageType> stTypes = new ArrayList<>(); for (String t : typeArray) { stTypes.add(StorageType.parseStorageType(t)); } return stTypes; } @Override protected void processPath(PathData src) throws IOException { ContentSummary summary = src.fs.getContentSummary(src.path); out.println(summary.toString(showQuotas, isHumanReadable(), showQuotabyType, storageTypes) + src); } /** * Should quotas get shown as part of the report? * @return if quotas should be shown then true otherwise false */ @InterfaceAudience.Private boolean isShowQuotas() { return showQuotas; } /** * Should sizes be shown in human readable format rather than bytes? * @return true if human readable format */ @InterfaceAudience.Private boolean isHumanReadable() { return humanReadable; } /** * should print quota by storage types * @return true if enables quota by storage types */ @InterfaceAudience.Private boolean isShowQuotabyType() { return showQuotabyType; } /** * show specified storage types * @return specified storagetypes */ @InterfaceAudience.Private List<StorageType> getStorageTypes() { return storageTypes; } }
6,303
32.178947
84
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Stat.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.shell; import java.io.IOException; import java.text.SimpleDateFormat; import java.util.Date; import java.util.LinkedList; import java.util.TimeZone; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.FileStatus; /** * Print statistics about path in specified format. * Format sequences:<br> * %b: Size of file in blocks<br> * %F: Type<br> * %g: Group name of owner<br> * %n: Filename<br> * %o: Block size<br> * %r: replication<br> * %u: User name of owner<br> * %y: UTC date as &quot;yyyy-MM-dd HH:mm:ss&quot;<br> * %Y: Milliseconds since January 1, 1970 UTC<br> * If the format is not specified, %y is used by default. */ @InterfaceAudience.Private @InterfaceStability.Unstable class Stat extends FsCommand { public static void registerCommands(CommandFactory factory) { factory.addClass(Stat.class, "-stat"); } private static final String NEWLINE = System.getProperty("line.separator"); public static final String NAME = "stat"; public static final String USAGE = "[format] <path> ..."; public static final String DESCRIPTION = "Print statistics about the file/directory at <path>" + NEWLINE + "in the specified format. Format accepts filesize in" + NEWLINE + "blocks (%b), type (%F), group name of owner (%g)," + NEWLINE + "name (%n), block size (%o), replication (%r), user name" + NEWLINE + "of owner (%u), modification date (%y, %Y)." + NEWLINE + "%y shows UTC date as \"yyyy-MM-dd HH:mm:ss\" and" + NEWLINE + "%Y shows milliseconds since January 1, 1970 UTC." + NEWLINE + "If the format is not specified, %y is used by default." + NEWLINE; protected final SimpleDateFormat timeFmt; { timeFmt = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); timeFmt.setTimeZone(TimeZone.getTimeZone("UTC")); } // default format string protected String format = "%y"; @Override protected void processOptions(LinkedList<String> args) throws IOException { CommandFormat cf = new CommandFormat(1, Integer.MAX_VALUE, "R"); cf.parse(args); setRecursive(cf.getOpt("R")); if (args.getFirst().contains("%")) format = args.removeFirst(); cf.parse(args); // make sure there's still at least one arg } @Override protected void processPath(PathData item) throws IOException { FileStatus stat = item.stat; StringBuilder buf = new StringBuilder(); char[] fmt = format.toCharArray(); for (int i = 0; i < fmt.length; ++i) { if (fmt[i] != '%') { buf.append(fmt[i]); } else { // this silently drops a trailing %? if (i + 1 == fmt.length) break; switch (fmt[++i]) { case 'b': buf.append(stat.getLen()); break; case 'F': buf.append(stat.isDirectory() ? "directory" : (stat.isFile() ? "regular file" : "symlink")); break; case 'g': buf.append(stat.getGroup()); break; case 'n': buf.append(item.path.getName()); break; case 'o': buf.append(stat.getBlockSize()); break; case 'r': buf.append(stat.getReplication()); break; case 'u': buf.append(stat.getOwner()); break; case 'y': buf.append(timeFmt.format(new Date(stat.getModificationTime()))); break; case 'Y': buf.append(stat.getModificationTime()); break; default: // this leaves %<unknown> alone, which causes the potential for // future format options to break strings; should use %% to // escape percents buf.append(fmt[i]); break; } } } out.println(buf.toString()); } }
4,753
33.201439
77
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandFormat.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.shell; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; /** * Parse the args of a command and check the format of args. */ public class CommandFormat { final int minPar, maxPar; final Map<String, Boolean> options = new HashMap<String, Boolean>(); final Map<String, String> optionsWithValue = new HashMap<String, String>(); boolean ignoreUnknownOpts = false; /** * @deprecated use replacement since name is an unused parameter * @param name of command, but never used * @param min see replacement * @param max see replacement * @param possibleOpt see replacement * @see #CommandFormat(int, int, String...) */ @Deprecated public CommandFormat(String n, int min, int max, String ... possibleOpt) { this(min, max, possibleOpt); } /** * Simple parsing of command line arguments * @param min minimum arguments required * @param max maximum arguments permitted * @param possibleOpt list of the allowed switches */ public CommandFormat(int min, int max, String ... possibleOpt) { minPar = min; maxPar = max; for (String opt : possibleOpt) { if (opt == null) { ignoreUnknownOpts = true; } else { options.put(opt, Boolean.FALSE); } } } /** * add option with value * * @param option option name */ public void addOptionWithValue(String option) { if (options.containsKey(option)) { throw new DuplicatedOptionException(option); } optionsWithValue.put(option, null); } /** Parse parameters starting from the given position * Consider using the variant that directly takes a List * * @param args an array of input arguments * @param pos the position at which starts to parse * @return a list of parameters */ public List<String> parse(String[] args, int pos) { List<String> parameters = new ArrayList<String>(Arrays.asList(args)); parameters.subList(0, pos).clear(); parse(parameters); return parameters; } /** Parse parameters from the given list of args. The list is * destructively modified to remove the options. * * @param args as a list of input arguments */ public void parse(List<String> args) { int pos = 0; while (pos < args.size()) { String arg = args.get(pos); // stop if not an opt, or the stdin arg "-" is found if (!arg.startsWith("-") || arg.equals("-")) { break; } else if (arg.equals("--")) { // force end of option processing args.remove(pos); break; } String opt = arg.substring(1); if (options.containsKey(opt)) { args.remove(pos); options.put(opt, Boolean.TRUE); } else if (optionsWithValue.containsKey(opt)) { args.remove(pos); if (pos < args.size() && (args.size() > minPar) && !args.get(pos).startsWith("-")) { arg = args.get(pos); args.remove(pos); } else { arg = ""; } if (!arg.startsWith("-") || arg.equals("-")) { optionsWithValue.put(opt, arg); } } else if (ignoreUnknownOpts) { pos++; } else { throw new UnknownOptionException(arg); } } int psize = args.size(); if (psize < minPar) { throw new NotEnoughArgumentsException(minPar, psize); } if (psize > maxPar) { throw new TooManyArgumentsException(maxPar, psize); } } /** Return if the option is set or not * * @param option String representation of an option * @return true is the option is set; false otherwise */ public boolean getOpt(String option) { return options.containsKey(option) ? options.get(option) : false; } /** * get the option's value * * @param option option name * @return option value * if option exists, but no value assigned, return "" * if option not exists, return null */ public String getOptValue(String option) { return optionsWithValue.get(option); } /** Returns all the options that are set * * @return Set<String> of the enabled options */ public Set<String> getOpts() { Set<String> optSet = new HashSet<String>(); for (Map.Entry<String, Boolean> entry : options.entrySet()) { if (entry.getValue()) { optSet.add(entry.getKey()); } } return optSet; } /** Used when the arguments exceed their bounds */ public static abstract class IllegalNumberOfArgumentsException extends IllegalArgumentException { private static final long serialVersionUID = 0L; protected int expected; protected int actual; protected IllegalNumberOfArgumentsException(int want, int got) { expected = want; actual = got; } @Override public String getMessage() { return "expected " + expected + " but got " + actual; } } /** Used when too many arguments are supplied to a command */ public static class TooManyArgumentsException extends IllegalNumberOfArgumentsException { private static final long serialVersionUID = 0L; public TooManyArgumentsException(int expected, int actual) { super(expected, actual); } @Override public String getMessage() { return "Too many arguments: " + super.getMessage(); } } /** Used when too few arguments are supplied to a command */ public static class NotEnoughArgumentsException extends IllegalNumberOfArgumentsException { private static final long serialVersionUID = 0L; public NotEnoughArgumentsException(int expected, int actual) { super(expected, actual); } @Override public String getMessage() { return "Not enough arguments: " + super.getMessage(); } } /** Used when an unsupported option is supplied to a command */ public static class UnknownOptionException extends IllegalArgumentException { private static final long serialVersionUID = 0L; protected String option = null; public UnknownOptionException(String unknownOption) { super("Illegal option " + unknownOption); option = unknownOption; } public String getOption() { return option; } } /** * Used when a duplicated option is supplied to a command. */ public static class DuplicatedOptionException extends IllegalArgumentException { private static final long serialVersionUID = 0L; public DuplicatedOptionException(String duplicatedOption) { super("option " + duplicatedOption + " already exsits!"); } } }
7,505
28.435294
82
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Mkdir.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.shell; import java.io.IOException; import java.util.LinkedList; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathExistsException; import org.apache.hadoop.fs.PathIOException; import org.apache.hadoop.fs.PathIsNotDirectoryException; import org.apache.hadoop.fs.PathNotFoundException; /** * Create the given dir */ @InterfaceAudience.Private @InterfaceStability.Unstable class Mkdir extends FsCommand { public static void registerCommands(CommandFactory factory) { factory.addClass(Mkdir.class, "-mkdir"); } public static final String NAME = "mkdir"; public static final String USAGE = "[-p] <path> ..."; public static final String DESCRIPTION = "Create a directory in specified location.\n" + "-p: Do not fail if the directory already exists"; private boolean createParents; @Override protected void processOptions(LinkedList<String> args) { CommandFormat cf = new CommandFormat(1, Integer.MAX_VALUE, "p"); cf.parse(args); createParents = cf.getOpt("p"); } @Override protected void processPath(PathData item) throws IOException { if (item.stat.isDirectory()) { if (!createParents) { throw new PathExistsException(item.toString()); } } else { throw new PathIsNotDirectoryException(item.toString()); } } @Override protected void processNonexistentPath(PathData item) throws IOException { // check if parent exists. this is complicated because getParent(a/b/c/) returns a/b/c, but // we want a/b if (!createParents && !item.fs.exists(new Path(item.path.toString()).getParent())) { throw new PathNotFoundException(item.toString()); } if (!item.fs.mkdirs(item.path)) { throw new PathIOException(item.toString()); } } }
2,737
32.390244
95
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.shell; import java.io.IOException; import java.util.LinkedList; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.PathIOException; import org.apache.hadoop.fs.PathExistsException; import org.apache.hadoop.fs.shell.CopyCommands.CopyFromLocal; /** Various commands for moving files */ @InterfaceAudience.Private @InterfaceStability.Evolving class MoveCommands { public static void registerCommands(CommandFactory factory) { factory.addClass(MoveFromLocal.class, "-moveFromLocal"); factory.addClass(MoveToLocal.class, "-moveToLocal"); factory.addClass(Rename.class, "-mv"); } /** * Move local files to a remote filesystem */ public static class MoveFromLocal extends CopyFromLocal { public static final String NAME = "moveFromLocal"; public static final String USAGE = "<localsrc> ... <dst>"; public static final String DESCRIPTION = "Same as -put, except that the source is " + "deleted after it's copied."; @Override protected void processPath(PathData src, PathData target) throws IOException { // unlike copy, don't merge existing dirs during move if (target.exists && target.stat.isDirectory()) { throw new PathExistsException(target.toString()); } super.processPath(src, target); } @Override protected void postProcessPath(PathData src) throws IOException { if (!src.fs.delete(src.path, false)) { // we have no way to know the actual error... PathIOException e = new PathIOException(src.toString()); e.setOperation("remove"); throw e; } } } /** * Move remote files to a local filesystem */ public static class MoveToLocal extends FsCommand { public static final String NAME = "moveToLocal"; public static final String USAGE = "<src> <localdst>"; public static final String DESCRIPTION = "Not implemented yet"; @Override protected void processOptions(LinkedList<String> args) throws IOException { throw new IOException("Option '-moveToLocal' is not implemented yet."); } } /** move/rename paths on the same fileystem */ public static class Rename extends CommandWithDestination { public static final String NAME = "mv"; public static final String USAGE = "<src> ... <dst>"; public static final String DESCRIPTION = "Move files that match the specified file pattern <src> " + "to a destination <dst>. When moving multiple files, the " + "destination must be a directory."; @Override protected void processOptions(LinkedList<String> args) throws IOException { CommandFormat cf = new CommandFormat(2, Integer.MAX_VALUE); cf.parse(args); getRemoteDestination(args); } @Override protected void processPath(PathData src, PathData target) throws IOException { if (!src.fs.getUri().equals(target.fs.getUri())) { throw new PathIOException(src.toString(), "Does not match target filesystem"); } if (target.exists) { throw new PathExistsException(target.toString()); } if (!target.fs.rename(src.path, target.path)) { // we have no way to know the actual error... throw new PathIOException(src.toString()); } } } }
4,213
35.017094
82
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandFactory.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.shell; import java.util.Arrays; import java.util.HashMap; import java.util.Map; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.StringUtils; /** class to search for and register commands */ @InterfaceAudience.Private @InterfaceStability.Unstable public class CommandFactory extends Configured { private Map<String, Class<? extends Command>> classMap = new HashMap<String, Class<? extends Command>>(); private Map<String, Command> objectMap = new HashMap<String, Command>(); /** Factory constructor for commands */ public CommandFactory() { this(null); } /** * Factory constructor for commands * @param conf the hadoop configuration */ public CommandFactory(Configuration conf) { super(conf); } /** * Invokes "static void registerCommands(CommandFactory)" on the given class. * This method abstracts the contract between the factory and the command * class. Do not assume that directly invoking registerCommands on the * given class will have the same effect. * @param registrarClass class to allow an opportunity to register */ public void registerCommands(Class<?> registrarClass) { try { registrarClass.getMethod( "registerCommands", CommandFactory.class ).invoke(null, this); } catch (Exception e) { throw new RuntimeException(StringUtils.stringifyException(e)); } } /** * Register the given class as handling the given list of command * names. * @param cmdClass the class implementing the command names * @param names one or more command names that will invoke this class */ public void addClass(Class<? extends Command> cmdClass, String ... names) { for (String name : names) classMap.put(name, cmdClass); } /** * Register the given object as handling the given list of command * names. Avoid calling this method and use * {@link #addClass(Class, String...)} whenever possible to avoid * startup overhead from excessive command object instantiations. This * method is intended only for handling nested non-static classes that * are re-usable. Namely -help/-usage. * @param cmdObject the object implementing the command names * @param names one or more command names that will invoke this class */ public void addObject(Command cmdObject, String ... names) { for (String name : names) { objectMap.put(name, cmdObject); classMap.put(name, null); // just so it shows up in the list of commands } } /** * Returns an instance of the class implementing the given command. The * class must have been registered via * {@link #addClass(Class, String...)} * @param cmd name of the command * @return instance of the requested command */ public Command getInstance(String cmd) { return getInstance(cmd, getConf()); } /** * Get an instance of the requested command * @param cmdName name of the command to lookup * @param conf the hadoop configuration * @return the {@link Command} or null if the command is unknown */ public Command getInstance(String cmdName, Configuration conf) { if (conf == null) throw new NullPointerException("configuration is null"); Command instance = objectMap.get(cmdName); if (instance == null) { Class<? extends Command> cmdClass = classMap.get(cmdName); if (cmdClass != null) { instance = ReflectionUtils.newInstance(cmdClass, conf); instance.setName(cmdName); instance.setCommandFactory(this); } } return instance; } /** * Gets all of the registered commands * @return a sorted list of command names */ public String[] getNames() { String[] names = classMap.keySet().toArray(new String[0]); Arrays.sort(names); return names; } }
4,875
33.097902
79
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsCommand.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.shell; import java.io.IOException; import java.util.LinkedList; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FsShellPermissions; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.shell.find.Find; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_DEFAULT; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SHELL_MISSING_DEFAULT_FS_WARNING_DEFAULT; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SHELL_MISSING_DEFAULT_FS_WARNING_KEY; /** * Base class for all "hadoop fs" commands. */ @InterfaceAudience.Private @InterfaceStability.Evolving // this class may not look useful now, but it's a placeholder for future // functionality to act as a registry for fs commands. currently it's being // used to implement unnecessary abstract methods in the base class abstract public class FsCommand extends Command { /** * Register the command classes used by the fs subcommand * @param factory where to register the class */ public static void registerCommands(CommandFactory factory) { factory.registerCommands(AclCommands.class); factory.registerCommands(CopyCommands.class); factory.registerCommands(Count.class); factory.registerCommands(Delete.class); factory.registerCommands(Display.class); factory.registerCommands(Find.class); factory.registerCommands(FsShellPermissions.class); factory.registerCommands(FsUsage.class); factory.registerCommands(Ls.class); factory.registerCommands(Mkdir.class); factory.registerCommands(MoveCommands.class); factory.registerCommands(SetReplication.class); factory.registerCommands(Stat.class); factory.registerCommands(Tail.class); factory.registerCommands(Test.class); factory.registerCommands(Touch.class); factory.registerCommands(Truncate.class); factory.registerCommands(SnapshotCommands.class); factory.registerCommands(XAttrCommands.class); } protected FsCommand() {} protected FsCommand(Configuration conf) { super(conf); } // historical abstract method in Command @Override public String getCommandName() { return getName(); } // abstract method that normally is invoked by runall() which is // overridden below @Override protected void run(Path path) throws IOException { throw new RuntimeException("not supposed to get here"); } /** @deprecated use {@link Command#run(String...argv)} */ @Deprecated @Override public int runAll() { return run(args); } @Override protected void processRawArguments(LinkedList<String> args) throws IOException { LinkedList<PathData> expendedArgs = expandArguments(args); // If "fs.defaultFs" is not set appropriately, it warns the user that the // command is not running against HDFS. final boolean displayWarnings = getConf().getBoolean( HADOOP_SHELL_MISSING_DEFAULT_FS_WARNING_KEY, HADOOP_SHELL_MISSING_DEFAULT_FS_WARNING_DEFAULT); if (displayWarnings) { final String defaultFs = getConf().get(FS_DEFAULT_NAME_KEY); final boolean missingDefaultFs = defaultFs == null || defaultFs.equals(FS_DEFAULT_NAME_DEFAULT); if (missingDefaultFs) { err.printf( "Warning: fs.defaultFs is not set when running \"%s\" command.%n", getCommandName()); } } processArguments(expendedArgs); } }
4,496
35.860656
113
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.shell; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; import java.net.URI; import java.net.URISyntaxException; import java.util.Iterator; import java.util.LinkedList; import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathIsDirectoryException; import org.apache.hadoop.io.IOUtils; /** Various commands for copy files */ @InterfaceAudience.Private @InterfaceStability.Evolving class CopyCommands { public static void registerCommands(CommandFactory factory) { factory.addClass(Merge.class, "-getmerge"); factory.addClass(Cp.class, "-cp"); factory.addClass(CopyFromLocal.class, "-copyFromLocal"); factory.addClass(CopyToLocal.class, "-copyToLocal"); factory.addClass(Get.class, "-get"); factory.addClass(Put.class, "-put"); factory.addClass(AppendToFile.class, "-appendToFile"); } /** merge multiple files together */ public static class Merge extends FsCommand { public static final String NAME = "getmerge"; public static final String USAGE = "[-nl] <src> <localdst>"; public static final String DESCRIPTION = "Get all the files in the directories that " + "match the source file pattern and merge and sort them to only " + "one file on local fs. <src> is kept.\n" + "-nl: Add a newline character at the end of each file."; protected PathData dst = null; protected String delimiter = null; protected List<PathData> srcs = null; @Override protected void processOptions(LinkedList<String> args) throws IOException { try { CommandFormat cf = new CommandFormat(2, Integer.MAX_VALUE, "nl"); cf.parse(args); delimiter = cf.getOpt("nl") ? "\n" : null; dst = new PathData(new URI(args.removeLast()), getConf()); if (dst.exists && dst.stat.isDirectory()) { throw new PathIsDirectoryException(dst.toString()); } srcs = new LinkedList<PathData>(); } catch (URISyntaxException e) { throw new IOException("unexpected URISyntaxException", e); } } @Override protected void processArguments(LinkedList<PathData> items) throws IOException { super.processArguments(items); if (exitCode != 0) { // check for error collecting paths return; } FSDataOutputStream out = dst.fs.create(dst.path); try { for (PathData src : srcs) { FSDataInputStream in = src.fs.open(src.path); try { IOUtils.copyBytes(in, out, getConf(), false); if (delimiter != null) { out.write(delimiter.getBytes("UTF-8")); } } finally { in.close(); } } } finally { out.close(); } } @Override protected void processNonexistentPath(PathData item) throws IOException { exitCode = 1; // flag that a path is bad super.processNonexistentPath(item); } // this command is handled a bit differently than others. the paths // are batched up instead of actually being processed. this avoids // unnecessarily streaming into the merge file and then encountering // a path error that should abort the merge @Override protected void processPath(PathData src) throws IOException { // for directories, recurse one level to get its files, else skip it if (src.stat.isDirectory()) { if (getDepth() == 0) { recursePath(src); } // skip subdirs } else { srcs.add(src); } } } static class Cp extends CommandWithDestination { public static final String NAME = "cp"; public static final String USAGE = "[-f] [-p | -p[topax]] <src> ... <dst>"; public static final String DESCRIPTION = "Copy files that match the file pattern <src> to a " + "destination. When copying multiple files, the destination " + "must be a directory. Passing -p preserves status " + "[topax] (timestamps, ownership, permission, ACLs, XAttr). " + "If -p is specified with no <arg>, then preserves " + "timestamps, ownership, permission. If -pa is specified, " + "then preserves permission also because ACL is a super-set of " + "permission. Passing -f overwrites the destination if it " + "already exists. raw namespace extended attributes are preserved " + "if (1) they are supported (HDFS only) and, (2) all of the source and " + "target pathnames are in the /.reserved/raw hierarchy. raw namespace " + "xattr preservation is determined solely by the presence (or absence) " + "of the /.reserved/raw prefix and not by the -p option.\n"; @Override protected void processOptions(LinkedList<String> args) throws IOException { popPreserveOption(args); CommandFormat cf = new CommandFormat(2, Integer.MAX_VALUE, "f"); cf.parse(args); setOverwrite(cf.getOpt("f")); // should have a -r option setRecursive(true); getRemoteDestination(args); } private void popPreserveOption(List<String> args) { for (Iterator<String> iter = args.iterator(); iter.hasNext(); ) { String cur = iter.next(); if (cur.equals("--")) { // stop parsing arguments when you see -- break; } else if (cur.startsWith("-p")) { iter.remove(); if (cur.length() == 2) { setPreserve(true); } else { String attributes = cur.substring(2); for (int index = 0; index < attributes.length(); index++) { preserve(FileAttribute.getAttribute(attributes.charAt(index))); } } return; } } } } /** * Copy local files to a remote filesystem */ public static class Get extends CommandWithDestination { public static final String NAME = "get"; public static final String USAGE = "[-p] [-ignoreCrc] [-crc] <src> ... <localdst>"; public static final String DESCRIPTION = "Copy files that match the file pattern <src> " + "to the local name. <src> is kept. When copying multiple " + "files, the destination must be a directory. Passing " + "-p preserves access and modification times, " + "ownership and the mode.\n"; @Override protected void processOptions(LinkedList<String> args) throws IOException { CommandFormat cf = new CommandFormat( 1, Integer.MAX_VALUE, "crc", "ignoreCrc", "p"); cf.parse(args); setWriteChecksum(cf.getOpt("crc")); setVerifyChecksum(!cf.getOpt("ignoreCrc")); setPreserve(cf.getOpt("p")); setRecursive(true); getLocalDestination(args); } } /** * Copy local files to a remote filesystem */ public static class Put extends CommandWithDestination { public static final String NAME = "put"; public static final String USAGE = "[-f] [-p] [-l] <localsrc> ... <dst>"; public static final String DESCRIPTION = "Copy files from the local file system " + "into fs. Copying fails if the file already " + "exists, unless the -f flag is given.\n" + "Flags:\n" + " -p : Preserves access and modification times, ownership and the mode.\n" + " -f : Overwrites the destination if it already exists.\n" + " -l : Allow DataNode to lazily persist the file to disk. Forces\n" + " replication factor of 1. This flag will result in reduced\n" + " durability. Use with care.\n"; @Override protected void processOptions(LinkedList<String> args) throws IOException { CommandFormat cf = new CommandFormat(1, Integer.MAX_VALUE, "f", "p", "l"); cf.parse(args); setOverwrite(cf.getOpt("f")); setPreserve(cf.getOpt("p")); setLazyPersist(cf.getOpt("l")); getRemoteDestination(args); // should have a -r option setRecursive(true); } // commands operating on local paths have no need for glob expansion @Override protected List<PathData> expandArgument(String arg) throws IOException { List<PathData> items = new LinkedList<PathData>(); try { items.add(new PathData(new URI(arg), getConf())); } catch (URISyntaxException e) { if (Path.WINDOWS) { // Unlike URI, PathData knows how to parse Windows drive-letter paths. items.add(new PathData(arg, getConf())); } else { throw new IOException("unexpected URISyntaxException", e); } } return items; } @Override protected void processArguments(LinkedList<PathData> args) throws IOException { // NOTE: this logic should be better, mimics previous implementation if (args.size() == 1 && args.get(0).toString().equals("-")) { copyStreamToTarget(System.in, getTargetPath(args.get(0))); return; } super.processArguments(args); } } public static class CopyFromLocal extends Put { public static final String NAME = "copyFromLocal"; public static final String USAGE = Put.USAGE; public static final String DESCRIPTION = "Identical to the -put command."; } public static class CopyToLocal extends Get { public static final String NAME = "copyToLocal"; public static final String USAGE = Get.USAGE; public static final String DESCRIPTION = "Identical to the -get command."; } /** * Append the contents of one or more local files to a remote * file. */ public static class AppendToFile extends CommandWithDestination { public static final String NAME = "appendToFile"; public static final String USAGE = "<localsrc> ... <dst>"; public static final String DESCRIPTION = "Appends the contents of all the given local files to the " + "given dst file. The dst file will be created if it does " + "not exist. If <localSrc> is -, then the input is read " + "from stdin."; private static final int DEFAULT_IO_LENGTH = 1024 * 1024; boolean readStdin = false; // commands operating on local paths have no need for glob expansion @Override protected List<PathData> expandArgument(String arg) throws IOException { List<PathData> items = new LinkedList<PathData>(); if (arg.equals("-")) { readStdin = true; } else { try { items.add(new PathData(new URI(arg), getConf())); } catch (URISyntaxException e) { if (Path.WINDOWS) { // Unlike URI, PathData knows how to parse Windows drive-letter paths. items.add(new PathData(arg, getConf())); } else { throw new IOException("Unexpected URISyntaxException: " + e.toString()); } } } return items; } @Override protected void processOptions(LinkedList<String> args) throws IOException { if (args.size() < 2) { throw new IOException("missing destination argument"); } getRemoteDestination(args); super.processOptions(args); } @Override protected void processArguments(LinkedList<PathData> args) throws IOException { if (!dst.exists) { dst.fs.create(dst.path, false).close(); } InputStream is = null; FSDataOutputStream fos = dst.fs.append(dst.path); try { if (readStdin) { if (args.size() == 0) { IOUtils.copyBytes(System.in, fos, DEFAULT_IO_LENGTH); } else { throw new IOException( "stdin (-) must be the sole input argument when present"); } } // Read in each input file and write to the target. for (PathData source : args) { is = new FileInputStream(source.toFile()); IOUtils.copyBytes(is, fos, DEFAULT_IO_LENGTH); IOUtils.closeStream(is); is = null; } } finally { if (is != null) { IOUtils.closeStream(is); } if (fos != null) { IOUtils.closeStream(fos); } } } } }
13,207
34.505376
84
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.shell; import java.io.FileNotFoundException; import java.io.IOException; import java.util.LinkedList; import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.PathIOException; import org.apache.hadoop.fs.PathIsDirectoryException; import org.apache.hadoop.fs.PathIsNotDirectoryException; import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException; import org.apache.hadoop.fs.PathNotFoundException; import org.apache.hadoop.fs.Trash; /** * Classes that delete paths */ @InterfaceAudience.Private @InterfaceStability.Evolving class Delete { public static void registerCommands(CommandFactory factory) { factory.addClass(Rm.class, "-rm"); factory.addClass(Rmdir.class, "-rmdir"); factory.addClass(Rmr.class, "-rmr"); factory.addClass(Expunge.class, "-expunge"); } /** remove non-directory paths */ public static class Rm extends FsCommand { public static final String NAME = "rm"; public static final String USAGE = "[-f] [-r|-R] [-skipTrash] <src> ..."; public static final String DESCRIPTION = "Delete all files that match the specified file pattern. " + "Equivalent to the Unix command \"rm <src>\"\n" + "-skipTrash: option bypasses trash, if enabled, and immediately " + "deletes <src>\n" + "-f: If the file does not exist, do not display a diagnostic " + "message or modify the exit status to reflect an error.\n" + "-[rR]: Recursively deletes directories"; private boolean skipTrash = false; private boolean deleteDirs = false; private boolean ignoreFNF = false; @Override protected void processOptions(LinkedList<String> args) throws IOException { CommandFormat cf = new CommandFormat( 1, Integer.MAX_VALUE, "f", "r", "R", "skipTrash"); cf.parse(args); ignoreFNF = cf.getOpt("f"); deleteDirs = cf.getOpt("r") || cf.getOpt("R"); skipTrash = cf.getOpt("skipTrash"); } @Override protected List<PathData> expandArgument(String arg) throws IOException { try { return super.expandArgument(arg); } catch (PathNotFoundException e) { if (!ignoreFNF) { throw e; } // prevent -f on a non-existent glob from failing return new LinkedList<PathData>(); } } @Override protected void processNonexistentPath(PathData item) throws IOException { if (!ignoreFNF) super.processNonexistentPath(item); } @Override protected void processPath(PathData item) throws IOException { if (item.stat.isDirectory() && !deleteDirs) { throw new PathIsDirectoryException(item.toString()); } // TODO: if the user wants the trash to be used but there is any // problem (ie. creating the trash dir, moving the item to be deleted, // etc), then the path will just be deleted because moveToTrash returns // false and it falls thru to fs.delete. this doesn't seem right if (moveToTrash(item)) { return; } if (!item.fs.delete(item.path, deleteDirs)) { throw new PathIOException(item.toString()); } out.println("Deleted " + item); } private boolean moveToTrash(PathData item) throws IOException { boolean success = false; if (!skipTrash) { try { success = Trash.moveToAppropriateTrash(item.fs, item.path, getConf()); } catch(FileNotFoundException fnfe) { throw fnfe; } catch (IOException ioe) { String msg = ioe.getMessage(); if (ioe.getCause() != null) { msg += ": " + ioe.getCause().getMessage(); } throw new IOException(msg + ". Consider using -skipTrash option", ioe); } } return success; } } /** remove any path */ static class Rmr extends Rm { public static final String NAME = "rmr"; @Override protected void processOptions(LinkedList<String> args) throws IOException { args.addFirst("-r"); super.processOptions(args); } @Override public String getReplacementCommand() { return "-rm -r"; } } /** remove only empty directories */ static class Rmdir extends FsCommand { public static final String NAME = "rmdir"; public static final String USAGE = "[--ignore-fail-on-non-empty] <dir> ..."; public static final String DESCRIPTION = "Removes the directory entry specified by each directory argument, " + "provided it is empty.\n"; private boolean ignoreNonEmpty = false; @Override protected void processOptions(LinkedList<String> args) throws IOException { CommandFormat cf = new CommandFormat( 1, Integer.MAX_VALUE, "-ignore-fail-on-non-empty"); cf.parse(args); ignoreNonEmpty = cf.getOpt("-ignore-fail-on-non-empty"); } @Override protected void processPath(PathData item) throws IOException { if (!item.stat.isDirectory()) { throw new PathIsNotDirectoryException(item.toString()); } if (item.fs.listStatus(item.path).length == 0) { if (!item.fs.delete(item.path, false)) { throw new PathIOException(item.toString()); } } else if (!ignoreNonEmpty) { throw new PathIsNotEmptyDirectoryException(item.toString()); } } } /** empty the trash */ static class Expunge extends FsCommand { public static final String NAME = "expunge"; public static final String USAGE = ""; public static final String DESCRIPTION = "Empty the Trash"; // TODO: should probably allow path arguments for the filesystems @Override protected void processOptions(LinkedList<String> args) throws IOException { CommandFormat cf = new CommandFormat(0, 0); cf.parse(args); } @Override protected void processArguments(LinkedList<PathData> args) throws IOException { FileSystem[] childFileSystems = FileSystem.get(getConf()).getChildFileSystems(); if (null != childFileSystems) { for (FileSystem fs : childFileSystems) { Trash trash = new Trash(fs, getConf()); trash.expunge(); trash.checkpoint(); } } else { Trash trash = new Trash(getConf()); trash.expunge(); trash.checkpoint(); } } } }
7,314
33.023256
81
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsUsage.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.shell; import java.io.IOException; import java.io.PrintStream; import java.util.ArrayList; import java.util.LinkedList; import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.FsStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.util.StringUtils; /** Base class for commands related to viewing filesystem usage, such as * du and df */ @InterfaceAudience.Private @InterfaceStability.Evolving class FsUsage extends FsCommand { public static void registerCommands(CommandFactory factory) { factory.addClass(Df.class, "-df"); factory.addClass(Du.class, "-du"); factory.addClass(Dus.class, "-dus"); } protected boolean humanReadable = false; protected TableBuilder usagesTable; protected String formatSize(long size) { return humanReadable ? StringUtils.TraditionalBinaryPrefix.long2String(size, "", 1) : String.valueOf(size); } /** Show the size of a partition in the filesystem */ public static class Df extends FsUsage { public static final String NAME = "df"; public static final String USAGE = "[-h] [<path> ...]"; public static final String DESCRIPTION = "Shows the capacity, free and used space of the filesystem. "+ "If the filesystem has multiple partitions, and no path to a " + "particular partition is specified, then the status of the root " + "partitions will be shown.\n" + "-h: Formats the sizes of files in a human-readable fashion " + "rather than a number of bytes."; @Override protected void processOptions(LinkedList<String> args) throws IOException { CommandFormat cf = new CommandFormat(0, Integer.MAX_VALUE, "h"); cf.parse(args); humanReadable = cf.getOpt("h"); if (args.isEmpty()) args.add(Path.SEPARATOR); } @Override protected void processArguments(LinkedList<PathData> args) throws IOException { usagesTable = new TableBuilder( "Filesystem", "Size", "Used", "Available", "Use%"); usagesTable.setRightAlign(1, 2, 3, 4); super.processArguments(args); if (!usagesTable.isEmpty()) { usagesTable.printToStream(out); } } @Override protected void processPath(PathData item) throws IOException { FsStatus fsStats = item.fs.getStatus(item.path); long size = fsStats.getCapacity(); long used = fsStats.getUsed(); long free = fsStats.getRemaining(); usagesTable.addRow( item.fs.getUri(), formatSize(size), formatSize(used), formatSize(free), StringUtils.formatPercent((double)used/(double)size, 0) ); } } /** show disk usage */ public static class Du extends FsUsage { public static final String NAME = "du"; public static final String USAGE = "[-s] [-h] <path> ..."; public static final String DESCRIPTION = "Show the amount of space, in bytes, used by the files that " + "match the specified file pattern. The following flags are optional:\n" + "-s: Rather than showing the size of each individual file that" + " matches the pattern, shows the total (summary) size.\n" + "-h: Formats the sizes of files in a human-readable fashion" + " rather than a number of bytes.\n\n" + "Note that, even without the -s option, this only shows size summaries " + "one level deep into a directory.\n\n" + "The output is in the form \n" + "\tsize\tname(full path)\n"; protected boolean summary = false; @Override protected void processOptions(LinkedList<String> args) throws IOException { CommandFormat cf = new CommandFormat(0, Integer.MAX_VALUE, "h", "s"); cf.parse(args); humanReadable = cf.getOpt("h"); summary = cf.getOpt("s"); if (args.isEmpty()) args.add(Path.CUR_DIR); } @Override protected void processPathArgument(PathData item) throws IOException { usagesTable = new TableBuilder(2); // go one level deep on dirs from cmdline unless in summary mode if (!summary && item.stat.isDirectory()) { recursePath(item); } else { super.processPathArgument(item); } usagesTable.printToStream(out); } @Override protected void processPath(PathData item) throws IOException { long length; if (item.stat.isDirectory()) { length = item.fs.getContentSummary(item.path).getLength(); } else { length = item.stat.getLen(); } usagesTable.addRow(formatSize(length), item); } } /** show disk usage summary */ public static class Dus extends Du { public static final String NAME = "dus"; @Override protected void processOptions(LinkedList<String> args) throws IOException { args.addFirst("-s"); super.processOptions(args); } @Override public String getReplacementCommand() { return "du -s"; } } /** * Creates a table of aligned values based on the maximum width of each * column as a string */ private static class TableBuilder { protected boolean hasHeader = false; protected List<String[]> rows; protected int[] widths; protected boolean[] rightAlign; /** * Create a table w/o headers * @param columns number of columns */ public TableBuilder(int columns) { rows = new ArrayList<String[]>(); widths = new int[columns]; rightAlign = new boolean[columns]; } /** * Create a table with headers * @param headers list of headers */ public TableBuilder(Object ... headers) { this(headers.length); this.addRow(headers); hasHeader = true; } /** * Change the default left-align of columns * @param indexes of columns to right align */ public void setRightAlign(int ... indexes) { for (int i : indexes) rightAlign[i] = true; } /** * Add a row of objects to the table * @param objects the values */ public void addRow(Object ... objects) { String[] row = new String[widths.length]; for (int col=0; col < widths.length; col++) { row[col] = String.valueOf(objects[col]); widths[col] = Math.max(widths[col], row[col].length()); } rows.add(row); } /** * Render the table to a stream * @param out PrintStream for output */ public void printToStream(PrintStream out) { if (isEmpty()) return; StringBuilder fmt = new StringBuilder(); for (int i=0; i < widths.length; i++) { if (fmt.length() != 0) fmt.append(" "); if (rightAlign[i]) { fmt.append("%"+widths[i]+"s"); } else if (i != widths.length-1) { fmt.append("%-"+widths[i]+"s"); } else { // prevent trailing spaces if the final column is left-aligned fmt.append("%s"); } } for (Object[] row : rows) { out.println(String.format(fmt.toString(), row)); } } /** * Number of rows excluding header * @return rows */ public int size() { return rows.size() - (hasHeader ? 1 : 0); } /** * Does table have any rows * @return boolean */ public boolean isEmpty() { return size() == 0; } } }
8,260
30.056391
79
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Tail.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.shell; import java.io.IOException; import java.util.LinkedList; import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.PathIsDirectoryException; import org.apache.hadoop.io.IOUtils; /** * Get a listing of all files in that match the file patterns. */ @InterfaceAudience.Private @InterfaceStability.Unstable class Tail extends FsCommand { public static void registerCommands(CommandFactory factory) { factory.addClass(Tail.class, "-tail"); } public static final String NAME = "tail"; public static final String USAGE = "[-f] <file>"; public static final String DESCRIPTION = "Show the last 1KB of the file.\n" + "-f: Shows appended data as the file grows.\n"; private long startingOffset = -1024; private boolean follow = false; private long followDelay = 5000; // milliseconds @Override protected void processOptions(LinkedList<String> args) throws IOException { CommandFormat cf = new CommandFormat(1, 1, "f"); cf.parse(args); follow = cf.getOpt("f"); } // TODO: HADOOP-7234 will add glob support; for now, be backwards compat @Override protected List<PathData> expandArgument(String arg) throws IOException { List<PathData> items = new LinkedList<PathData>(); items.add(new PathData(arg, getConf())); return items; } @Override protected void processPath(PathData item) throws IOException { if (item.stat.isDirectory()) { throw new PathIsDirectoryException(item.toString()); } long offset = dumpFromOffset(item, startingOffset); while (follow) { try { Thread.sleep(followDelay); } catch (InterruptedException e) { break; } offset = dumpFromOffset(item, offset); } } private long dumpFromOffset(PathData item, long offset) throws IOException { long fileSize = item.refreshStatus().getLen(); if (offset > fileSize) return fileSize; // treat a negative offset as relative to end of the file, floor of 0 if (offset < 0) { offset = Math.max(fileSize + offset, 0); } FSDataInputStream in = item.fs.open(item.path); try { in.seek(offset); // use conf so the system configured io block size is used IOUtils.copyBytes(in, System.out, getConf(), false); offset = in.getPos(); } finally { in.close(); } return offset; } }
3,357
31.288462
78
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/SetReplication.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.shell; import java.io.IOException; import java.util.LinkedList; import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.PathIOException; /** * Modifies the replication factor */ @InterfaceAudience.Private @InterfaceStability.Unstable class SetReplication extends FsCommand { public static void registerCommands(CommandFactory factory) { factory.addClass(SetReplication.class, "-setrep"); } public static final String NAME = "setrep"; public static final String USAGE = "[-R] [-w] <rep> <path> ..."; public static final String DESCRIPTION = "Set the replication level of a file. If <path> is a directory " + "then the command recursively changes the replication factor of " + "all files under the directory tree rooted at <path>.\n" + "-w: It requests that the command waits for the replication " + "to complete. This can potentially take a very long time.\n" + "-R: It is accepted for backwards compatibility. It has no effect."; protected short newRep = 0; protected List<PathData> waitList = new LinkedList<PathData>(); protected boolean waitOpt = false; @Override protected void processOptions(LinkedList<String> args) throws IOException { CommandFormat cf = new CommandFormat(2, Integer.MAX_VALUE, "R", "w"); cf.parse(args); waitOpt = cf.getOpt("w"); setRecursive(true); try { newRep = Short.parseShort(args.removeFirst()); } catch (NumberFormatException nfe) { displayWarning("Illegal replication, a positive integer expected"); throw nfe; } if (newRep < 1) { throw new IllegalArgumentException("replication must be >= 1"); } } @Override protected void processArguments(LinkedList<PathData> args) throws IOException { super.processArguments(args); if (waitOpt) waitForReplication(); } @Override protected void processPath(PathData item) throws IOException { if (item.stat.isSymlink()) { throw new PathIOException(item.toString(), "Symlinks unsupported"); } if (item.stat.isFile()) { if (!item.fs.setReplication(item.path, newRep)) { throw new IOException("Could not set replication for: " + item); } out.println("Replication " + newRep + " set: " + item); if (waitOpt) waitList.add(item); } } /** * Wait for all files in waitList to have replication number equal to rep. */ private void waitForReplication() throws IOException { for (PathData item : waitList) { out.print("Waiting for " + item + " ..."); out.flush(); boolean printedWarning = false; boolean done = false; while (!done) { item.refreshStatus(); BlockLocation[] locations = item.fs.getFileBlockLocations(item.stat, 0, item.stat.getLen()); int i = 0; for(; i < locations.length; i++) { int currentRep = locations[i].getHosts().length; if (currentRep != newRep) { if (!printedWarning && currentRep > newRep) { out.println("\nWARNING: the waiting time may be long for " + "DECREASING the number of replications."); printedWarning = true; } break; } } done = i == locations.length; if (done) break; out.print("."); out.flush(); try {Thread.sleep(10000);} catch (InterruptedException e) {} } out.println(" done"); } } }
4,472
32.631579
77
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/find/Expression.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.shell.find; import java.io.IOException; import java.util.Deque; import org.apache.hadoop.fs.shell.PathData; /** * Interface describing an expression to be used in the * {@link org.apache.hadoop.fs.shell.find.Find} command. */ public interface Expression { /** * Set the options for this expression, called once before processing any * items. */ public void setOptions(FindOptions options) throws IOException; /** * Prepares the expression for execution, called once after setting options * and before processing any options. * @throws IOException */ public void prepare() throws IOException; /** * Apply the expression to the specified item, called once for each item. * * @param item {@link PathData} item to be processed * @param depth distance of the item from the command line argument * @return {@link Result} of applying the expression to the item */ public Result apply(PathData item, int depth) throws IOException; /** * Finishes the expression, called once after processing all items. * * @throws IOException */ public void finish() throws IOException; /** * Returns brief usage instructions for this expression. Multiple items should * be returned if there are multiple ways to use this expression. * * @return array of usage instructions */ public String[] getUsage(); /** * Returns a description of the expression for use in help. Multiple lines * should be returned array items. Lines should be formated to 60 characters * or less. * * @return array of description lines */ public String[] getHelp(); /** * Indicates whether this expression performs an action, i.e. provides output * back to the user. */ public boolean isAction(); /** Identifies the expression as an operator rather than a primary. */ public boolean isOperator(); /** * Returns the precedence of this expression * (only applicable to operators). */ public int getPrecedence(); /** * Adds children to this expression. Children are popped from the head of the * deque. * * @param expressions * deque of expressions from which to take the children */ public void addChildren(Deque<Expression> expressions); /** * Adds arguments to this expression. Arguments are popped from the head of * the deque and added to the front of the child list, ie last child added is * the first evaluated. * @param args deque of arguments from which to take expression arguments */ public void addArguments(Deque<String> args); }
3,428
30.75
80
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/find/Result.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.shell.find; public final class Result { /** Result indicating {@link Expression} processing should continue. */ public static final Result PASS = new Result(true, true); /** Result indicating {@link Expression} processing should stop. */ public static final Result FAIL = new Result(false, true); /** * Result indicating {@link Expression} processing should not descend any more * directories. */ public static final Result STOP = new Result(true, false); private boolean descend; private boolean success; private Result(boolean success, boolean recurse) { this.success = success; this.descend = recurse; } /** Should further directories be descended. */ public boolean isDescend() { return this.descend; } /** Should processing continue. */ public boolean isPass() { return this.success; } /** Returns the combination of this and another result. */ public Result combine(Result other) { return new Result(this.isPass() && other.isPass(), this.isDescend() && other.isDescend()); } /** Negate this result. */ public Result negate() { return new Result(!this.isPass(), this.isDescend()); } @Override public String toString() { return "success=" + isPass() + "; recurse=" + isDescend(); } @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + (descend ? 1231 : 1237); result = prime * result + (success ? 1231 : 1237); return result; } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (getClass() != obj.getClass()) return false; Result other = (Result) obj; if (descend != other.descend) return false; if (success != other.success) return false; return true; } }
2,696
29.303371
80
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/find/And.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.shell.find; import java.io.IOException; import java.util.Deque; import org.apache.hadoop.fs.shell.PathData; /** * Implements the -a (and) operator for the * {@link org.apache.hadoop.fs.shell.find.Find} command. */ final class And extends BaseExpression { /** Registers this expression with the specified factory. */ public static void registerExpression(ExpressionFactory factory) throws IOException { factory.addClass(And.class, "-a"); factory.addClass(And.class, "-and"); } private static final String[] USAGE = { "expression -a expression", "expression -and expression", "expression expression" }; private static final String[] HELP = { "Logical AND operator for joining two expressions. Returns", "true if both child expressions return true. Implied by the", "juxtaposition of two expressions and so does not need to be", "explicitly specified. The second expression will not be", "applied if the first fails." }; public And() { super(); setUsage(USAGE); setHelp(HELP); } /** * Applies child expressions to the {@link PathData} item. If all pass then * returns {@link Result#PASS} else returns the result of the first * non-passing expression. */ @Override public Result apply(PathData item, int depth) throws IOException { Result result = Result.PASS; for (Expression child : getChildren()) { Result childResult = child.apply(item, -1); result = result.combine(childResult); if (!result.isPass()) { return result; } } return result; } @Override public boolean isOperator() { return true; } @Override public int getPrecedence() { return 200; } @Override public void addChildren(Deque<Expression> expressions) { addChildren(expressions, 2); } }
2,670
30.423529
77
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/find/Print.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.shell.find; import java.io.IOException; import org.apache.hadoop.fs.shell.PathData; /** * Implements the -print expression for the * {@link org.apache.hadoop.fs.shell.find.Find} command. */ final class Print extends BaseExpression { /** Registers this expression with the specified factory. */ public static void registerExpression(ExpressionFactory factory) throws IOException { factory.addClass(Print.class, "-print"); factory.addClass(Print0.class, "-print0"); } private static final String[] USAGE = { "-print", "-print0" }; private static final String[] HELP = { "Always evaluates to true. Causes the current pathname to be", "written to standard output followed by a newline. If the -print0", "expression is used then an ASCII NULL character is appended rather", "than a newline." }; private final String suffix; public Print() { this("\n"); } /** * Construct a Print {@link Expression} with the specified suffix. */ private Print(String suffix) { super(); setUsage(USAGE); setHelp(HELP); this.suffix = suffix; } @Override public Result apply(PathData item, int depth) throws IOException { getOptions().getOut().print(item.toString() + suffix); return Result.PASS; } @Override public boolean isAction() { return true; } /** Implements the -print0 expression. */ final static class Print0 extends FilterExpression { public Print0() { super(new Print("\0")); } } }
2,348
29.506494
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/find/ExpressionFactory.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.shell.find; import java.io.IOException; import java.lang.reflect.Method; import java.util.HashMap; import java.util.Map; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.StringUtils; /** * Factory class for registering and searching for expressions for use in the * {@link org.apache.hadoop.fs.shell.find.Find} command. */ final class ExpressionFactory { private static final String REGISTER_EXPRESSION_METHOD = "registerExpression"; private Map<String, Class<? extends Expression>> expressionMap = new HashMap<String, Class<? extends Expression>>(); private static final ExpressionFactory INSTANCE = new ExpressionFactory(); static ExpressionFactory getExpressionFactory() { return INSTANCE; } /** * Private constructor to ensure singleton. */ private ExpressionFactory() { } /** * Invokes "static void registerExpression(FindExpressionFactory)" on the * given class. This method abstracts the contract between the factory and the * expression class. Do not assume that directly invoking registerExpression * on the given class will have the same effect. * * @param expressionClass * class to allow an opportunity to register */ void registerExpression(Class<? extends Expression> expressionClass) { try { Method register = expressionClass.getMethod(REGISTER_EXPRESSION_METHOD, ExpressionFactory.class); if (register != null) { register.invoke(null, this); } } catch (Exception e) { throw new RuntimeException(StringUtils.stringifyException(e)); } } /** * Register the given class as handling the given list of expression names. * * @param expressionClass * the class implementing the expression names * @param names * one or more command names that will invoke this class * @throws IOException * if the expression is not of an expected type */ void addClass(Class<? extends Expression> expressionClass, String... names) throws IOException { for (String name : names) expressionMap.put(name, expressionClass); } /** * Determines whether the given expression name represents and actual * expression. * * @param expressionName * name of the expression * @return true if expressionName represents an expression */ boolean isExpression(String expressionName) { return expressionMap.containsKey(expressionName); } /** * Get an instance of the requested expression * * @param expressionName * name of the command to lookup * @param conf * the Hadoop configuration * @return the {@link Expression} or null if the expression is unknown */ Expression getExpression(String expressionName, Configuration conf) { if (conf == null) throw new NullPointerException("configuration is null"); Class<? extends Expression> expressionClass = expressionMap .get(expressionName); Expression instance = createExpression(expressionClass, conf); return instance; } /** * Creates an instance of the requested {@link Expression} class. * * @param expressionClass * {@link Expression} class to be instantiated * @param conf * the Hadoop configuration * @return a new instance of the requested {@link Expression} class */ Expression createExpression( Class<? extends Expression> expressionClass, Configuration conf) { Expression instance = null; if (expressionClass != null) { instance = ReflectionUtils.newInstance(expressionClass, conf); } return instance; } /** * Creates an instance of the requested {@link Expression} class. * * @param expressionClassname * name of the {@link Expression} class to be instantiated * @param conf * the Hadoop configuration * @return a new instance of the requested {@link Expression} class */ Expression createExpression(String expressionClassname, Configuration conf) { try { Class<? extends Expression> expressionClass = Class.forName( expressionClassname).asSubclass(Expression.class); return createExpression(expressionClass, conf); } catch (ClassNotFoundException e) { throw new IllegalArgumentException("Invalid classname " + expressionClassname); } } }
5,294
32.726115
80
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/find/FindOptions.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.shell.find; import java.io.InputStream; import java.io.PrintStream; import java.util.Date; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.shell.CommandFactory; /** * Options to be used by the {@link Find} command and its {@link Expression}s. */ public class FindOptions { /** Output stream to be used. */ private PrintStream out; /** Error stream to be used. */ private PrintStream err; /** Input stream to be used. */ private InputStream in; /** * Indicates whether the expression should be applied to the directory tree * depth first. */ private boolean depthFirst = false; /** Indicates whether symbolic links should be followed. */ private boolean followLink = false; /** * Indicates whether symbolic links specified as command arguments should be * followed. */ private boolean followArgLink = false; /** Start time of the find process. */ private long startTime = new Date().getTime(); /** * Depth at which to start applying expressions. */ private int minDepth = 0; /** * Depth at which to stop applying expressions. */ private int maxDepth = Integer.MAX_VALUE; /** Factory for retrieving command classes. */ private CommandFactory commandFactory; /** Configuration object. */ private Configuration configuration = new Configuration(); /** * Sets the output stream to be used. * * @param out output stream to be used */ public void setOut(PrintStream out) { this.out = out; } /** * Returns the output stream to be used. * * @return output stream to be used */ public PrintStream getOut() { return this.out; } /** * Sets the error stream to be used. * * @param err error stream to be used */ public void setErr(PrintStream err) { this.err = err; } /** * Returns the error stream to be used. * * @return error stream to be used */ public PrintStream getErr() { return this.err; } /** * Sets the input stream to be used. * * @param in input stream to be used */ public void setIn(InputStream in) { this.in = in; } /** * Returns the input stream to be used. * * @return input stream to be used */ public InputStream getIn() { return this.in; } /** * Sets flag indicating whether the expression should be applied to the * directory tree depth first. * * @param depthFirst true indicates depth first traversal */ public void setDepthFirst(boolean depthFirst) { this.depthFirst = depthFirst; } /** * Should directory tree be traversed depth first? * * @return true indicate depth first traversal */ public boolean isDepthFirst() { return this.depthFirst; } /** * Sets flag indicating whether symbolic links should be followed. * * @param followLink true indicates follow links */ public void setFollowLink(boolean followLink) { this.followLink = followLink; } /** * Should symbolic links be follows? * * @return true indicates links should be followed */ public boolean isFollowLink() { return this.followLink; } /** * Sets flag indicating whether command line symbolic links should be * followed. * * @param followArgLink true indicates follow links */ public void setFollowArgLink(boolean followArgLink) { this.followArgLink = followArgLink; } /** * Should command line symbolic links be follows? * * @return true indicates links should be followed */ public boolean isFollowArgLink() { return this.followArgLink; } /** * Returns the start time of this {@link Find} command. * * @return start time (in milliseconds since epoch) */ public long getStartTime() { return this.startTime; } /** * Set the start time of this {@link Find} command. * * @param time start time (in milliseconds since epoch) */ public void setStartTime(long time) { this.startTime = time; } /** * Returns the minimum depth for applying expressions. * * @return min depth */ public int getMinDepth() { return this.minDepth; } /** * Sets the minimum depth for applying expressions. * * @param minDepth minimum depth */ public void setMinDepth(int minDepth) { this.minDepth = minDepth; } /** * Returns the maximum depth for applying expressions. * * @return maximum depth */ public int getMaxDepth() { return this.maxDepth; } /** * Sets the maximum depth for applying expressions. * * @param maxDepth maximum depth */ public void setMaxDepth(int maxDepth) { this.maxDepth = maxDepth; } /** * Set the command factory. * * @param factory {@link CommandFactory} */ public void setCommandFactory(CommandFactory factory) { this.commandFactory = factory; } /** * Return the command factory. * * @return {@link CommandFactory} */ public CommandFactory getCommandFactory() { return this.commandFactory; } /** * Set the {@link Configuration} * * @param configuration {@link Configuration} */ public void setConfiguration(Configuration configuration) { this.configuration = configuration; } /** * Return the {@link Configuration} return configuration {@link Configuration} */ public Configuration getConfiguration() { return this.configuration; } }
6,267
22.044118
80
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/find/FilterExpression.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.shell.find; import java.io.IOException; import java.util.Deque; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.shell.PathData; /** * Provides an abstract composition filter for the {@link Expression} interface. * Allows other {@link Expression} implementations to be reused without * inheritance. */ public abstract class FilterExpression implements Expression, Configurable { protected Expression expression; protected FilterExpression(Expression expression) { this.expression = expression; } @Override public void setOptions(FindOptions options) throws IOException { if (expression != null) { expression.setOptions(options); } } @Override public void prepare() throws IOException { if (expression != null) { expression.prepare(); } } @Override public Result apply(PathData item, int depth) throws IOException { if (expression != null) { return expression.apply(item, -1); } return Result.PASS; } @Override public void finish() throws IOException { if (expression != null) { expression.finish(); } } @Override public String[] getUsage() { if (expression != null) { return expression.getUsage(); } return null; } @Override public String[] getHelp() { if (expression != null) { return expression.getHelp(); } return null; } @Override public boolean isAction() { if (expression != null) { return expression.isAction(); } return false; } @Override public boolean isOperator() { if (expression != null) { return expression.isOperator(); } return false; } @Override public int getPrecedence() { if (expression != null) { return expression.getPrecedence(); } return -1; } @Override public void addChildren(Deque<Expression> expressions) { if (expression != null) { expression.addChildren(expressions); } } @Override public void addArguments(Deque<String> args) { if (expression != null) { expression.addArguments(args); } } @Override public void setConf(Configuration conf) { if (expression instanceof Configurable) { ((Configurable) expression).setConf(conf); } } @Override public Configuration getConf() { if (expression instanceof Configurable) { return ((Configurable) expression).getConf(); } return null; } @Override public String toString() { if (expression != null) { return getClass().getSimpleName() + "-" + expression.toString(); } return getClass().getSimpleName(); } }
3,526
23.324138
80
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/find/Find.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.shell.find; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; import java.util.Deque; import java.util.HashSet; import java.util.Iterator; import java.util.LinkedList; import java.util.Set; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.shell.CommandFactory; import org.apache.hadoop.fs.shell.CommandFormat; import org.apache.hadoop.fs.shell.FsCommand; import org.apache.hadoop.fs.shell.PathData; @InterfaceAudience.Private @InterfaceStability.Unstable /** * Implements a Hadoop find command. */ public class Find extends FsCommand { /** * Register the names for the count command * * @param factory the command factory that will instantiate this class */ public static void registerCommands(CommandFactory factory) { factory.addClass(Find.class, "-find"); } public static final String NAME = "find"; public static final String USAGE = "<path> ... <expression> ..."; public static final String DESCRIPTION; private static String[] HELP = { "Finds all files that match the specified expression and", "applies selected actions to them. If no <path> is specified", "then defaults to the current working directory. If no", "expression is specified then defaults to -print." }; private static final String OPTION_FOLLOW_LINK = "L"; private static final String OPTION_FOLLOW_ARG_LINK = "H"; /** List of expressions recognized by this command. */ private static final Set<Class<? extends Expression>> EXPRESSIONS = new HashSet<>(); private static void addExpression(Class<?> clazz) { EXPRESSIONS.add(clazz.asSubclass(Expression.class)); } static { // Initialize the static variables. // Operator Expressions addExpression(And.class); // Action Expressions addExpression(Print.class); // Navigation Expressions // Matcher Expressions addExpression(Name.class); DESCRIPTION = buildDescription(ExpressionFactory.getExpressionFactory()); // Register the expressions with the expression factory. registerExpressions(ExpressionFactory.getExpressionFactory()); } /** Options for use in this command */ private FindOptions options; /** Root expression for this instance of the command. */ private Expression rootExpression; /** Set of path items returning a {@link Result#STOP} result. */ private HashSet<Path> stopPaths = new HashSet<Path>(); /** Register the expressions with the expression factory. */ private static void registerExpressions(ExpressionFactory factory) { for (Class<? extends Expression> exprClass : EXPRESSIONS) { factory.registerExpression(exprClass); } } /** Build the description used by the help command. */ private static String buildDescription(ExpressionFactory factory) { ArrayList<Expression> operators = new ArrayList<Expression>(); ArrayList<Expression> primaries = new ArrayList<Expression>(); for (Class<? extends Expression> exprClass : EXPRESSIONS) { Expression expr = factory.createExpression(exprClass, null); if (expr.isOperator()) { operators.add(expr); } else { primaries.add(expr); } } Collections.sort(operators, new Comparator<Expression>() { @Override public int compare(Expression arg0, Expression arg1) { return arg0.getClass().getName().compareTo(arg1.getClass().getName()); } }); Collections.sort(primaries, new Comparator<Expression>() { @Override public int compare(Expression arg0, Expression arg1) { return arg0.getClass().getName().compareTo(arg1.getClass().getName()); } }); StringBuilder sb = new StringBuilder(); for (String line : HELP) { sb.append(line).append("\n"); } sb.append("\n"); sb.append("The following primary expressions are recognised:\n"); for (Expression expr : primaries) { for (String line : expr.getUsage()) { sb.append(" ").append(line).append("\n"); } for (String line : expr.getHelp()) { sb.append(" ").append(line).append("\n"); } sb.append("\n"); } sb.append("The following operators are recognised:\n"); for (Expression expr : operators) { for (String line : expr.getUsage()) { sb.append(" ").append(line).append("\n"); } for (String line : expr.getHelp()) { sb.append(" ").append(line).append("\n"); } sb.append("\n"); } return sb.toString(); } /** Default constructor for the Find command. */ public Find() { setRecursive(true); } @Override protected void processOptions(LinkedList<String> args) throws IOException { CommandFormat cf = new CommandFormat(1, Integer.MAX_VALUE, OPTION_FOLLOW_LINK, OPTION_FOLLOW_ARG_LINK, null); cf.parse(args); if (cf.getOpt(OPTION_FOLLOW_LINK)) { getOptions().setFollowLink(true); } else if (cf.getOpt(OPTION_FOLLOW_ARG_LINK)) { getOptions().setFollowArgLink(true); } // search for first non-path argument (ie starts with a "-") and capture and // remove the remaining arguments as expressions LinkedList<String> expressionArgs = new LinkedList<String>(); Iterator<String> it = args.iterator(); boolean isPath = true; while (it.hasNext()) { String arg = it.next(); if (isPath) { if (arg.startsWith("-")) { isPath = false; } } if (!isPath) { expressionArgs.add(arg); it.remove(); } } if (args.isEmpty()) { args.add(Path.CUR_DIR); } Expression expression = parseExpression(expressionArgs); if (!expression.isAction()) { Expression and = getExpression(And.class); Deque<Expression> children = new LinkedList<Expression>(); children.add(getExpression(Print.class)); children.add(expression); and.addChildren(children); expression = and; } setRootExpression(expression); } /** * Set the root expression for this find. * * @param expression */ @InterfaceAudience.Private void setRootExpression(Expression expression) { this.rootExpression = expression; } /** * Return the root expression for this find. * * @return the root expression */ @InterfaceAudience.Private Expression getRootExpression() { return this.rootExpression; } /** Returns the current find options, creating them if necessary. */ @InterfaceAudience.Private FindOptions getOptions() { if (options == null) { options = createOptions(); } return options; } /** Create a new set of find options. */ private FindOptions createOptions() { FindOptions options = new FindOptions(); options.setOut(out); options.setErr(err); options.setIn(System.in); options.setCommandFactory(getCommandFactory()); options.setConfiguration(getConf()); return options; } /** Add the {@link PathData} item to the stop set. */ private void addStop(PathData item) { stopPaths.add(item.path); } /** Returns true if the {@link PathData} item is in the stop set. */ private boolean isStop(PathData item) { return stopPaths.contains(item.path); } /** * Parse a list of arguments to to extract the {@link Expression} elements. * The input Deque will be modified to remove the used elements. * * @param args arguments to be parsed * @return list of {@link Expression} elements applicable to this command * @throws IOException if list can not be parsed */ private Expression parseExpression(Deque<String> args) throws IOException { Deque<Expression> primaries = new LinkedList<Expression>(); Deque<Expression> operators = new LinkedList<Expression>(); Expression prevExpr = getExpression(And.class); while (!args.isEmpty()) { String arg = args.pop(); if ("(".equals(arg)) { Expression expr = parseExpression(args); primaries.add(expr); prevExpr = new BaseExpression() { @Override public Result apply(PathData item, int depth) throws IOException { return Result.PASS; } }; // stub the previous expression to be a non-op } else if (")".equals(arg)) { break; } else if (isExpression(arg)) { Expression expr = getExpression(arg); expr.addArguments(args); if (expr.isOperator()) { while (!operators.isEmpty()) { if (operators.peek().getPrecedence() >= expr.getPrecedence()) { Expression op = operators.pop(); op.addChildren(primaries); primaries.push(op); } else { break; } } operators.push(expr); } else { if (!prevExpr.isOperator()) { Expression and = getExpression(And.class); while (!operators.isEmpty()) { if (operators.peek().getPrecedence() >= and.getPrecedence()) { Expression op = operators.pop(); op.addChildren(primaries); primaries.push(op); } else { break; } } operators.push(and); } primaries.push(expr); } prevExpr = expr; } else { throw new IOException("Unexpected argument: " + arg); } } while (!operators.isEmpty()) { Expression operator = operators.pop(); operator.addChildren(primaries); primaries.push(operator); } return primaries.isEmpty() ? getExpression(Print.class) : primaries.pop(); } /** Returns true if the target is an ancestor of the source. */ private boolean isAncestor(PathData source, PathData target) { for (Path parent = source.path; (parent != null) && !parent.isRoot(); parent = parent.getParent()) { if (parent.equals(target.path)) { return true; } } return false; } @Override protected void recursePath(PathData item) throws IOException { if (isStop(item)) { // this item returned a stop result so don't recurse any further return; } if (getDepth() >= getOptions().getMaxDepth()) { // reached the maximum depth so don't got any further. return; } if (item.stat.isSymlink() && getOptions().isFollowLink()) { PathData linkedItem = new PathData(item.stat.getSymlink().toString(), getConf()); if (isAncestor(item, linkedItem)) { getOptions().getErr().println( "Infinite loop ignored: " + item.toString() + " -> " + linkedItem.toString()); return; } if (linkedItem.exists) { item = linkedItem; } } if (item.stat.isDirectory()) { super.recursePath(item); } } @Override protected boolean isPathRecursable(PathData item) throws IOException { if (item.stat.isDirectory()) { return true; } if (item.stat.isSymlink()) { PathData linkedItem = new PathData(item.fs.resolvePath(item.stat.getSymlink()).toString(), getConf()); if (linkedItem.stat.isDirectory()) { if (getOptions().isFollowLink()) { return true; } if (getOptions().isFollowArgLink() && (getDepth() == 0)) { return true; } } } return false; } @Override protected void processPath(PathData item) throws IOException { if (getOptions().isDepthFirst()) { // depth first so leave until post processing return; } applyItem(item); } @Override protected void postProcessPath(PathData item) throws IOException { if (!getOptions().isDepthFirst()) { // not depth first so already processed return; } applyItem(item); } private void applyItem(PathData item) throws IOException { if (getDepth() >= getOptions().getMinDepth()) { Result result = getRootExpression().apply(item, getDepth()); if (Result.STOP.equals(result)) { addStop(item); } } } @Override protected void processArguments(LinkedList<PathData> args) throws IOException { Expression expr = getRootExpression(); expr.setOptions(getOptions()); expr.prepare(); super.processArguments(args); expr.finish(); } /** Gets a named expression from the factory. */ private Expression getExpression(String expressionName) { return ExpressionFactory.getExpressionFactory().getExpression( expressionName, getConf()); } /** Gets an instance of an expression from the factory. */ private Expression getExpression( Class<? extends Expression> expressionClass) { return ExpressionFactory.getExpressionFactory().createExpression( expressionClass, getConf()); } /** Asks the factory whether an expression is recognized. */ private boolean isExpression(String expressionName) { return ExpressionFactory.getExpressionFactory() .isExpression(expressionName); } }
14,062
30.251111
80
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/find/BaseExpression.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.shell.find; import java.io.IOException; import java.util.Deque; import java.util.LinkedList; import java.util.List; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.shell.PathData; /** * Abstract expression for use in the * {@link org.apache.hadoop.fs.shell.find.Find} command. Provides default * behavior for a no-argument primary expression. */ public abstract class BaseExpression implements Expression, Configurable { private String[] usage = { "Not yet implemented" }; private String[] help = { "Not yet implemented" }; /** Sets the usage text for this {@link Expression} */ protected void setUsage(String[] usage) { this.usage = usage; } /** Sets the help text for this {@link Expression} */ protected void setHelp(String[] help) { this.help = help; } @Override public String[] getUsage() { return this.usage; } @Override public String[] getHelp() { return this.help; } @Override public void setOptions(FindOptions options) throws IOException { this.options = options; for (Expression child : getChildren()) { child.setOptions(options); } } @Override public void prepare() throws IOException { for (Expression child : getChildren()) { child.prepare(); } } @Override public void finish() throws IOException { for (Expression child : getChildren()) { child.finish(); } } /** Options passed in from the {@link Find} command. */ private FindOptions options; /** Hadoop configuration. */ private Configuration conf; /** Arguments for this expression. */ private LinkedList<String> arguments = new LinkedList<String>(); /** Children of this expression. */ private LinkedList<Expression> children = new LinkedList<Expression>(); /** Return the options to be used by this expression. */ protected FindOptions getOptions() { return (this.options == null) ? new FindOptions() : this.options; } @Override public void setConf(Configuration conf) { this.conf = conf; } @Override public Configuration getConf() { return this.conf; } @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append(getClass().getSimpleName()); sb.append("("); boolean firstArg = true; for (String arg : getArguments()) { if (!firstArg) { sb.append(","); } else { firstArg = false; } sb.append(arg); } sb.append(";"); firstArg = true; for (Expression child : getChildren()) { if (!firstArg) { sb.append(","); } else { firstArg = false; } sb.append(child.toString()); } sb.append(")"); return sb.toString(); } @Override public boolean isAction() { for (Expression child : getChildren()) { if (child.isAction()) { return true; } } return false; } @Override public boolean isOperator() { return false; } /** * Returns the arguments of this expression * * @return list of argument strings */ protected List<String> getArguments() { return this.arguments; } /** * Returns the argument at the given position (starting from 1). * * @param position * argument to be returned * @return requested argument * @throws IOException * if the argument doesn't exist or is null */ protected String getArgument(int position) throws IOException { if (position > this.arguments.size()) { throw new IOException("Missing argument at " + position); } String argument = this.arguments.get(position - 1); if (argument == null) { throw new IOException("Null argument at position " + position); } return argument; } /** * Returns the children of this expression. * * @return list of child expressions */ protected List<Expression> getChildren() { return this.children; } @Override public int getPrecedence() { return 0; } @Override public void addChildren(Deque<Expression> exprs) { // no children by default, will be overridden by specific expressions. } /** * Add a specific number of children to this expression. The children are * popped off the head of the expressions. * * @param exprs * deque of expressions from which to take the children * @param count * number of children to be added */ protected void addChildren(Deque<Expression> exprs, int count) { for (int i = 0; i < count; i++) { addChild(exprs.pop()); } } /** * Add a single argument to this expression. The argument is popped off the * head of the expressions. * * @param expr * child to add to the expression */ private void addChild(Expression expr) { children.push(expr); } @Override public void addArguments(Deque<String> args) { // no children by default, will be overridden by specific expressions. } /** * Add a specific number of arguments to this expression. The children are * popped off the head of the expressions. * * @param args * deque of arguments from which to take the argument * @param count * number of children to be added */ protected void addArguments(Deque<String> args, int count) { for (int i = 0; i < count; i++) { addArgument(args.pop()); } } /** * Add a single argument to this expression. The argument is popped off the * head of the expressions. * * @param arg * argument to add to the expression */ protected void addArgument(String arg) { arguments.add(arg); } /** * Returns the {@link FileStatus} from the {@link PathData} item. If the * current options require links to be followed then the returned file status * is that of the linked file. * * @param item * PathData * @param depth * current depth in the process directories * @return FileStatus */ protected FileStatus getFileStatus(PathData item, int depth) throws IOException { FileStatus fileStatus = item.stat; if (fileStatus.isSymlink()) { if (options.isFollowLink() || (options.isFollowArgLink() && (depth == 0))) { Path linkedFile = item.fs.resolvePath(fileStatus.getSymlink()); fileStatus = getFileSystem(item).getFileStatus(linkedFile); } } return fileStatus; } /** * Returns the {@link Path} from the {@link PathData} item. * * @param item * PathData * @return Path */ protected Path getPath(PathData item) throws IOException { return item.path; } /** * Returns the {@link FileSystem} associated with the {@link PathData} item. * * @param item PathData * @return FileSystem */ protected FileSystem getFileSystem(PathData item) throws IOException { return item.fs; } }
7,928
25.168317
79
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/find/Name.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.fs.shell.find; import java.io.IOException; import java.util.Deque; import org.apache.hadoop.fs.GlobPattern; import org.apache.hadoop.fs.shell.PathData; import org.apache.hadoop.util.StringUtils; /** * Implements the -name expression for the * {@link org.apache.hadoop.fs.shell.find.Find} command. */ final class Name extends BaseExpression { /** Registers this expression with the specified factory. */ public static void registerExpression(ExpressionFactory factory) throws IOException { factory.addClass(Name.class, "-name"); factory.addClass(Iname.class, "-iname"); } private static final String[] USAGE = { "-name pattern", "-iname pattern" }; private static final String[] HELP = { "Evaluates as true if the basename of the file matches the", "pattern using standard file system globbing.", "If -iname is used then the match is case insensitive." }; private GlobPattern globPattern; private boolean caseSensitive = true; /** Creates a case sensitive name expression. */ public Name() { this(true); } /** * Construct a Name {@link Expression} with a specified case sensitivity. * * @param caseSensitive if true the comparisons are case sensitive. */ private Name(boolean caseSensitive) { super(); setUsage(USAGE); setHelp(HELP); setCaseSensitive(caseSensitive); } private void setCaseSensitive(boolean caseSensitive) { this.caseSensitive = caseSensitive; } @Override public void addArguments(Deque<String> args) { addArguments(args, 1); } @Override public void prepare() throws IOException { String argPattern = getArgument(1); if (!caseSensitive) { argPattern = StringUtils.toLowerCase(argPattern); } globPattern = new GlobPattern(argPattern); } @Override public Result apply(PathData item, int depth) throws IOException { String name = getPath(item).getName(); if (!caseSensitive) { name = StringUtils.toLowerCase(name); } if (globPattern.matches(name)) { return Result.PASS; } else { return Result.FAIL; } } /** Case insensitive version of the -name expression. */ static class Iname extends FilterExpression { public Iname() { super(new Name(false)); } } }
3,116
29.558824
78
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.conf; import java.io.IOException; import java.io.Writer; import javax.servlet.ServletException; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.http.HttpServer2; /** * A servlet to print out the running configuration data. */ @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"}) @InterfaceStability.Unstable public class ConfServlet extends HttpServlet { private static final long serialVersionUID = 1L; private static final String FORMAT_JSON = "json"; private static final String FORMAT_XML = "xml"; private static final String FORMAT_PARAM = "format"; /** * Return the Configuration of the daemon hosting this servlet. * This is populated when the HttpServer starts. */ private Configuration getConfFromContext() { Configuration conf = (Configuration)getServletContext().getAttribute( HttpServer2.CONF_CONTEXT_ATTRIBUTE); assert conf != null; return conf; } @Override public void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { if (!HttpServer2.isInstrumentationAccessAllowed(getServletContext(), request, response)) { return; } String format = request.getParameter(FORMAT_PARAM); if (null == format) { format = FORMAT_XML; } if (FORMAT_XML.equals(format)) { response.setContentType("text/xml; charset=utf-8"); } else if (FORMAT_JSON.equals(format)) { response.setContentType("application/json; charset=utf-8"); } Writer out = response.getWriter(); try { writeResponse(getConfFromContext(), out, format); } catch (BadFormatException bfe) { response.sendError(HttpServletResponse.SC_BAD_REQUEST, bfe.getMessage()); } out.close(); } /** * Guts of the servlet - extracted for easy testing. */ static void writeResponse(Configuration conf, Writer out, String format) throws IOException, BadFormatException { if (FORMAT_JSON.equals(format)) { Configuration.dumpConfiguration(conf, out); } else if (FORMAT_XML.equals(format)) { conf.writeXml(out); } else { throw new BadFormatException("Bad format: " + format); } } public static class BadFormatException extends Exception { private static final long serialVersionUID = 1L; public BadFormatException(String msg) { super(msg); } } }
3,482
31.551402
79
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configured.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.conf; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** Base class for things that may be configured with a {@link Configuration}. */ @InterfaceAudience.Public @InterfaceStability.Stable public class Configured implements Configurable { private Configuration conf; /** Construct a Configured. */ public Configured() { this(null); } /** Construct a Configured. */ public Configured(Configuration conf) { setConf(conf); } // inherit javadoc @Override public void setConf(Configuration conf) { this.conf = conf; } // inherit javadoc @Override public Configuration getConf() { return conf; } }
1,553
27.777778
81
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationTaskStatus.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.conf; import com.google.common.base.Optional; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.ReconfigurationUtil.PropertyChange; import java.util.Map; @InterfaceAudience.Public @InterfaceStability.Stable public class ReconfigurationTaskStatus { long startTime; long endTime; final Map<ReconfigurationUtil.PropertyChange, Optional<String>> status; public ReconfigurationTaskStatus(long startTime, long endTime, Map<ReconfigurationUtil.PropertyChange, Optional<String>> status) { this.startTime = startTime; this.endTime = endTime; this.status = status; } /** * Return true if * - A reconfiguration task has finished or * - an active reconfiguration task is running */ public boolean hasTask() { return startTime > 0; } /** * Return true if the latest reconfiguration task has finished and there is * no another active task running. */ public boolean stopped() { return endTime > 0; } public long getStartTime() { return startTime; } public long getEndTime() { return endTime; } public final Map<PropertyChange, Optional<String>> getStatus() { return status; } }
2,103
28.633803
77
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurableBase.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.conf; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Optional; import com.google.common.base.Preconditions; import com.google.common.collect.Maps; import org.apache.commons.logging.*; import org.apache.hadoop.util.Time; import org.apache.hadoop.conf.ReconfigurationUtil.PropertyChange; import java.io.IOException; import java.util.Collection; import java.util.Collections; import java.util.Map; /** * Utility base class for implementing the Reconfigurable interface. * * Subclasses should override reconfigurePropertyImpl to change individual * properties and getReconfigurableProperties to get all properties that * can be changed at run time. */ public abstract class ReconfigurableBase extends Configured implements Reconfigurable { private static final Log LOG = LogFactory.getLog(ReconfigurableBase.class); // Use for testing purpose. private ReconfigurationUtil reconfigurationUtil = new ReconfigurationUtil(); /** Background thread to reload configuration. */ private Thread reconfigThread = null; private volatile boolean shouldRun = true; private Object reconfigLock = new Object(); /** * The timestamp when the <code>reconfigThread</code> starts. */ private long startTime = 0; /** * The timestamp when the <code>reconfigThread</code> finishes. */ private long endTime = 0; /** * A map of <changed property, error message>. If error message is present, * it contains the messages about the error occurred when applies the particular * change. Otherwise, it indicates that the change has been successfully applied. */ private Map<PropertyChange, Optional<String>> status = null; /** * Construct a ReconfigurableBase. */ public ReconfigurableBase() { super(new Configuration()); } /** * Construct a ReconfigurableBase with the {@link Configuration} * conf. */ public ReconfigurableBase(Configuration conf) { super((conf == null) ? new Configuration() : conf); } @VisibleForTesting public void setReconfigurationUtil(ReconfigurationUtil ru) { reconfigurationUtil = Preconditions.checkNotNull(ru); } /** * Create a new configuration. */ protected abstract Configuration getNewConf(); @VisibleForTesting public Collection<PropertyChange> getChangedProperties( Configuration newConf, Configuration oldConf) { return reconfigurationUtil.parseChangedProperties(newConf, oldConf); } /** * A background thread to apply configuration changes. */ private static class ReconfigurationThread extends Thread { private ReconfigurableBase parent; ReconfigurationThread(ReconfigurableBase base) { this.parent = base; } // See {@link ReconfigurationServlet#applyChanges} public void run() { LOG.info("Starting reconfiguration task."); Configuration oldConf = this.parent.getConf(); Configuration newConf = this.parent.getNewConf(); Collection<PropertyChange> changes = this.parent.getChangedProperties(newConf, oldConf); Map<PropertyChange, Optional<String>> results = Maps.newHashMap(); for (PropertyChange change : changes) { String errorMessage = null; if (!this.parent.isPropertyReconfigurable(change.prop)) { LOG.info(String.format( "Property %s is not configurable: old value: %s, new value: %s", change.prop, change.oldVal, change.newVal)); continue; } LOG.info("Change property: " + change.prop + " from \"" + ((change.oldVal == null) ? "<default>" : change.oldVal) + "\" to \"" + ((change.newVal == null) ? "<default>" : change.newVal) + "\"."); try { this.parent.reconfigurePropertyImpl(change.prop, change.newVal); } catch (ReconfigurationException e) { errorMessage = e.getCause().getMessage(); } results.put(change, Optional.fromNullable(errorMessage)); } synchronized (this.parent.reconfigLock) { this.parent.endTime = Time.now(); this.parent.status = Collections.unmodifiableMap(results); this.parent.reconfigThread = null; } } } /** * Start a reconfiguration task to reload configuration in background. */ public void startReconfigurationTask() throws IOException { synchronized (reconfigLock) { if (!shouldRun) { String errorMessage = "The server is stopped."; LOG.warn(errorMessage); throw new IOException(errorMessage); } if (reconfigThread != null) { String errorMessage = "Another reconfiguration task is running."; LOG.warn(errorMessage); throw new IOException(errorMessage); } reconfigThread = new ReconfigurationThread(this); reconfigThread.setDaemon(true); reconfigThread.setName("Reconfiguration Task"); reconfigThread.start(); startTime = Time.now(); } } public ReconfigurationTaskStatus getReconfigurationTaskStatus() { synchronized (reconfigLock) { if (reconfigThread != null) { return new ReconfigurationTaskStatus(startTime, 0, null); } return new ReconfigurationTaskStatus(startTime, endTime, status); } } public void shutdownReconfigurationTask() { Thread tempThread; synchronized (reconfigLock) { shouldRun = false; if (reconfigThread == null) { return; } tempThread = reconfigThread; reconfigThread = null; } try { tempThread.join(); } catch (InterruptedException e) { } } /** * {@inheritDoc} * * This method makes the change to this objects {@link Configuration} * and calls reconfigurePropertyImpl to update internal data structures. * This method cannot be overridden, subclasses should instead override * reconfigureProperty. */ @Override public final String reconfigureProperty(String property, String newVal) throws ReconfigurationException { if (isPropertyReconfigurable(property)) { LOG.info("changing property " + property + " to " + newVal); String oldVal; synchronized(getConf()) { oldVal = getConf().get(property); reconfigurePropertyImpl(property, newVal); if (newVal != null) { getConf().set(property, newVal); } else { getConf().unset(property); } } return oldVal; } else { throw new ReconfigurationException(property, newVal, getConf().get(property)); } } /** * {@inheritDoc} * * Subclasses must override this. */ @Override public abstract Collection<String> getReconfigurableProperties(); /** * {@inheritDoc} * * Subclasses may wish to override this with a more efficient implementation. */ @Override public boolean isPropertyReconfigurable(String property) { return getReconfigurableProperties().contains(property); } /** * Change a configuration property. * * Subclasses must override this. This method applies the change to * all internal data structures derived from the configuration property * that is being changed. If this object owns other Reconfigurable objects * reconfigureProperty should be called recursively to make sure that * to make sure that the configuration of these objects is updated. */ protected abstract void reconfigurePropertyImpl(String property, String newVal) throws ReconfigurationException; }
8,398
31.428571
83
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationException.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.conf; /** * Exception indicating that configuration property cannot be changed * at run time. */ public class ReconfigurationException extends Exception { private static final long serialVersionUID = 1L; private String property; private String newVal; private String oldVal; /** * Construct the exception message. */ private static String constructMessage(String property, String newVal, String oldVal) { String message = "Could not change property " + property; if (oldVal != null) { message += " from \'" + oldVal; } if (newVal != null) { message += "\' to \'" + newVal + "\'"; } return message; } /** * Create a new instance of {@link ReconfigurationException}. */ public ReconfigurationException() { super("Could not change configuration."); this.property = null; this.newVal = null; this.oldVal = null; } /** * Create a new instance of {@link ReconfigurationException}. */ public ReconfigurationException(String property, String newVal, String oldVal, Throwable cause) { super(constructMessage(property, newVal, oldVal), cause); this.property = property; this.newVal = newVal; this.oldVal = oldVal; } /** * Create a new instance of {@link ReconfigurationException}. */ public ReconfigurationException(String property, String newVal, String oldVal) { super(constructMessage(property, newVal, oldVal)); this.property = property; this.newVal = newVal; this.oldVal = oldVal; } /** * Get property that cannot be changed. */ public String getProperty() { return property; } /** * Get value to which property was supposed to be changed. */ public String getNewValue() { return newVal; } /** * Get old value of property that cannot be changed. */ public String getOldValue() { return oldVal; } }
2,879
26.428571
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Reconfigurable.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.conf; import java.util.Collection; /** * Something whose {@link Configuration} can be changed at run time. */ public interface Reconfigurable extends Configurable { /** * Change a configuration property on this object to the value specified. * * Change a configuration property on this object to the value specified * and return the previous value that the configuration property was set to * (or null if it was not previously set). If newVal is null, set the property * to its default value; * * If the property cannot be changed, throw a * {@link ReconfigurationException}. */ public String reconfigureProperty(String property, String newVal) throws ReconfigurationException; /** * Return whether a given property is changeable at run time. * * If isPropertyReconfigurable returns true for a property, * then changeConf should not throw an exception when changing * this property. */ public boolean isPropertyReconfigurable(String property); /** * Return all the properties that can be changed at run time. */ public Collection<String> getReconfigurableProperties(); }
1,987
33.275862
80
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.conf; import org.apache.commons.logging.*; import org.apache.commons.lang.StringEscapeUtils; import java.util.Collection; import java.util.Enumeration; import java.io.IOException; import java.io.PrintWriter; import javax.servlet.ServletException; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import org.apache.hadoop.util.StringUtils; /** * A servlet for changing a node's configuration. * * Reloads the configuration file, verifies whether changes are * possible and asks the admin to approve the change. * */ public class ReconfigurationServlet extends HttpServlet { private static final long serialVersionUID = 1L; private static final Log LOG = LogFactory.getLog(ReconfigurationServlet.class); // the prefix used to fing the attribute holding the reconfigurable // for a given request // // we get the attribute prefix + servlet path public static final String CONF_SERVLET_RECONFIGURABLE_PREFIX = "conf.servlet.reconfigurable."; @Override public void init() throws ServletException { super.init(); } private Reconfigurable getReconfigurable(HttpServletRequest req) { LOG.info("servlet path: " + req.getServletPath()); LOG.info("getting attribute: " + CONF_SERVLET_RECONFIGURABLE_PREFIX + req.getServletPath()); return (Reconfigurable) this.getServletContext().getAttribute(CONF_SERVLET_RECONFIGURABLE_PREFIX + req.getServletPath()); } private void printHeader(PrintWriter out, String nodeName) { out.print("<html><head>"); out.printf("<title>%s Reconfiguration Utility</title>%n", StringEscapeUtils.escapeHtml(nodeName)); out.print("</head><body>\n"); out.printf("<h1>%s Reconfiguration Utility</h1>%n", StringEscapeUtils.escapeHtml(nodeName)); } private void printFooter(PrintWriter out) { out.print("</body></html>\n"); } /** * Print configuration options that can be changed. */ private void printConf(PrintWriter out, Reconfigurable reconf) { Configuration oldConf = reconf.getConf(); Configuration newConf = new Configuration(); Collection<ReconfigurationUtil.PropertyChange> changes = ReconfigurationUtil.getChangedProperties(newConf, oldConf); boolean changeOK = true; out.println("<form action=\"\" method=\"post\">"); out.println("<table border=\"1\">"); out.println("<tr><th>Property</th><th>Old value</th>"); out.println("<th>New value </th><th></th></tr>"); for (ReconfigurationUtil.PropertyChange c: changes) { out.print("<tr><td>"); if (!reconf.isPropertyReconfigurable(c.prop)) { out.print("<font color=\"red\">" + StringEscapeUtils.escapeHtml(c.prop) + "</font>"); changeOK = false; } else { out.print(StringEscapeUtils.escapeHtml(c.prop)); out.print("<input type=\"hidden\" name=\"" + StringEscapeUtils.escapeHtml(c.prop) + "\" value=\"" + StringEscapeUtils.escapeHtml(c.newVal) + "\"/>"); } out.print("</td><td>" + (c.oldVal == null ? "<it>default</it>" : StringEscapeUtils.escapeHtml(c.oldVal)) + "</td><td>" + (c.newVal == null ? "<it>default</it>" : StringEscapeUtils.escapeHtml(c.newVal)) + "</td>"); out.print("</tr>\n"); } out.println("</table>"); if (!changeOK) { out.println("<p><font color=\"red\">WARNING: properties marked red" + " will not be changed until the next restart.</font></p>"); } out.println("<input type=\"submit\" value=\"Apply\" />"); out.println("</form>"); } @SuppressWarnings("unchecked") private Enumeration<String> getParams(HttpServletRequest req) { return req.getParameterNames(); } /** * Apply configuratio changes after admin has approved them. */ private void applyChanges(PrintWriter out, Reconfigurable reconf, HttpServletRequest req) throws ReconfigurationException { Configuration oldConf = reconf.getConf(); Configuration newConf = new Configuration(); Enumeration<String> params = getParams(req); synchronized(oldConf) { while (params.hasMoreElements()) { String rawParam = params.nextElement(); String param = StringEscapeUtils.unescapeHtml(rawParam); String value = StringEscapeUtils.unescapeHtml(req.getParameter(rawParam)); if (value != null) { if (value.equals(newConf.getRaw(param)) || value.equals("default") || value.equals("null") || value.isEmpty()) { if ((value.equals("default") || value.equals("null") || value.isEmpty()) && oldConf.getRaw(param) != null) { out.println("<p>Changed \"" + StringEscapeUtils.escapeHtml(param) + "\" from \"" + StringEscapeUtils.escapeHtml(oldConf.getRaw(param)) + "\" to default</p>"); reconf.reconfigureProperty(param, null); } else if (!value.equals("default") && !value.equals("null") && !value.isEmpty() && (oldConf.getRaw(param) == null || !oldConf.getRaw(param).equals(value))) { // change from default or value to different value if (oldConf.getRaw(param) == null) { out.println("<p>Changed \"" + StringEscapeUtils.escapeHtml(param) + "\" from default to \"" + StringEscapeUtils.escapeHtml(value) + "\"</p>"); } else { out.println("<p>Changed \"" + StringEscapeUtils.escapeHtml(param) + "\" from \"" + StringEscapeUtils.escapeHtml(oldConf. getRaw(param)) + "\" to \"" + StringEscapeUtils.escapeHtml(value) + "\"</p>"); } reconf.reconfigureProperty(param, value); } else { LOG.info("property " + param + " unchanged"); } } else { // parameter value != newConf value out.println("<p>\"" + StringEscapeUtils.escapeHtml(param) + "\" not changed because value has changed from \"" + StringEscapeUtils.escapeHtml(value) + "\" to \"" + StringEscapeUtils.escapeHtml(newConf.getRaw(param)) + "\" since approval</p>"); } } } } } @Override protected void doGet(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException { LOG.info("GET"); resp.setContentType("text/html"); PrintWriter out = resp.getWriter(); Reconfigurable reconf = getReconfigurable(req); String nodeName = reconf.getClass().getCanonicalName(); printHeader(out, nodeName); printConf(out, reconf); printFooter(out); } @Override protected void doPost(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException { LOG.info("POST"); resp.setContentType("text/html"); PrintWriter out = resp.getWriter(); Reconfigurable reconf = getReconfigurable(req); String nodeName = reconf.getClass().getCanonicalName(); printHeader(out, nodeName); try { applyChanges(out, reconf, req); } catch (ReconfigurationException e) { resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, StringUtils.stringifyException(e)); return; } out.println("<p><a href=\"" + req.getServletPath() + "\">back</a></p>"); printFooter(out); } }
8,898
36.390756
80
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.conf; import com.google.common.annotations.VisibleForTesting; import java.io.BufferedInputStream; import java.io.DataInput; import java.io.DataOutput; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.io.OutputStream; import java.io.OutputStreamWriter; import java.io.Reader; import java.io.Writer; import java.lang.ref.WeakReference; import java.net.InetSocketAddress; import java.net.URL; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.Enumeration; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.ListIterator; import java.util.Map; import java.util.Map.Entry; import java.util.Properties; import java.util.Set; import java.util.StringTokenizer; import java.util.WeakHashMap; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CopyOnWriteArrayList; import java.util.regex.Matcher; import java.util.regex.Pattern; import java.util.regex.PatternSyntaxException; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; import javax.xml.parsers.DocumentBuilder; import javax.xml.parsers.DocumentBuilderFactory; import javax.xml.parsers.ParserConfigurationException; import javax.xml.transform.Transformer; import javax.xml.transform.TransformerException; import javax.xml.transform.TransformerFactory; import javax.xml.transform.dom.DOMSource; import javax.xml.transform.stream.StreamResult; import com.google.common.base.Charsets; import org.apache.commons.collections.map.UnmodifiableMap; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.WritableUtils; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.alias.CredentialProvider; import org.apache.hadoop.security.alias.CredentialProvider.CredentialEntry; import org.apache.hadoop.security.alias.CredentialProviderFactory; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.StringInterner; import org.apache.hadoop.util.StringUtils; import org.codehaus.jackson.JsonFactory; import org.codehaus.jackson.JsonGenerator; import org.w3c.dom.DOMException; import org.w3c.dom.Document; import org.w3c.dom.Element; import org.w3c.dom.Node; import org.w3c.dom.NodeList; import org.w3c.dom.Text; import org.xml.sax.SAXException; import com.google.common.base.Preconditions; /** * Provides access to configuration parameters. * * <h4 id="Resources">Resources</h4> * * <p>Configurations are specified by resources. A resource contains a set of * name/value pairs as XML data. Each resource is named by either a * <code>String</code> or by a {@link Path}. If named by a <code>String</code>, * then the classpath is examined for a file with that name. If named by a * <code>Path</code>, then the local filesystem is examined directly, without * referring to the classpath. * * <p>Unless explicitly turned off, Hadoop by default specifies two * resources, loaded in-order from the classpath: <ol> * <li><tt> * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml"> * core-default.xml</a></tt>: Read-only defaults for hadoop.</li> * <li><tt>core-site.xml</tt>: Site-specific configuration for a given hadoop * installation.</li> * </ol> * Applications may add additional resources, which are loaded * subsequent to these resources in the order they are added. * * <h4 id="FinalParams">Final Parameters</h4> * * <p>Configuration parameters may be declared <i>final</i>. * Once a resource declares a value final, no subsequently-loaded * resource can alter that value. * For example, one might define a final parameter with: * <tt><pre> * &lt;property&gt; * &lt;name&gt;dfs.hosts.include&lt;/name&gt; * &lt;value&gt;/etc/hadoop/conf/hosts.include&lt;/value&gt; * <b>&lt;final&gt;true&lt;/final&gt;</b> * &lt;/property&gt;</pre></tt> * * Administrators typically define parameters as final in * <tt>core-site.xml</tt> for values that user applications may not alter. * * <h4 id="VariableExpansion">Variable Expansion</h4> * * <p>Value strings are first processed for <i>variable expansion</i>. The * available properties are:<ol> * <li>Other properties defined in this Configuration; and, if a name is * undefined here,</li> * <li>Properties in {@link System#getProperties()}.</li> * </ol> * * <p>For example, if a configuration resource contains the following property * definitions: * <tt><pre> * &lt;property&gt; * &lt;name&gt;basedir&lt;/name&gt; * &lt;value&gt;/user/${<i>user.name</i>}&lt;/value&gt; * &lt;/property&gt; * * &lt;property&gt; * &lt;name&gt;tempdir&lt;/name&gt; * &lt;value&gt;${<i>basedir</i>}/tmp&lt;/value&gt; * &lt;/property&gt;</pre></tt> * * When <tt>conf.get("tempdir")</tt> is called, then <tt>${<i>basedir</i>}</tt> * will be resolved to another property in this Configuration, while * <tt>${<i>user.name</i>}</tt> would then ordinarily be resolved to the value * of the System property with that name. * By default, warnings will be given to any deprecated configuration * parameters and these are suppressible by configuring * <tt>log4j.logger.org.apache.hadoop.conf.Configuration.deprecation</tt> in * log4j.properties file. */ @InterfaceAudience.Public @InterfaceStability.Stable public class Configuration implements Iterable<Map.Entry<String,String>>, Writable { private static final Log LOG = LogFactory.getLog(Configuration.class); private static final Log LOG_DEPRECATION = LogFactory.getLog("org.apache.hadoop.conf.Configuration.deprecation"); private boolean quietmode = true; private static final String DEFAULT_STRING_CHECK = "testingforemptydefaultvalue"; private boolean allowNullValueProperties = false; private static class Resource { private final Object resource; private final String name; public Resource(Object resource) { this(resource, resource.toString()); } public Resource(Object resource, String name) { this.resource = resource; this.name = name; } public String getName(){ return name; } public Object getResource() { return resource; } @Override public String toString() { return name; } } /** * List of configuration resources. */ private ArrayList<Resource> resources = new ArrayList<Resource>(); /** * The value reported as the setting resource when a key is set * by code rather than a file resource by dumpConfiguration. */ static final String UNKNOWN_RESOURCE = "Unknown"; /** * List of configuration parameters marked <b>final</b>. */ private Set<String> finalParameters = Collections.newSetFromMap( new ConcurrentHashMap<String, Boolean>()); private boolean loadDefaults = true; /** * Configuration objects */ private static final WeakHashMap<Configuration,Object> REGISTRY = new WeakHashMap<Configuration,Object>(); /** * List of default Resources. Resources are loaded in the order of the list * entries */ private static final CopyOnWriteArrayList<String> defaultResources = new CopyOnWriteArrayList<String>(); private static final Map<ClassLoader, Map<String, WeakReference<Class<?>>>> CACHE_CLASSES = new WeakHashMap<ClassLoader, Map<String, WeakReference<Class<?>>>>(); /** * Sentinel value to store negative cache results in {@link #CACHE_CLASSES}. */ private static final Class<?> NEGATIVE_CACHE_SENTINEL = NegativeCacheSentinel.class; /** * Stores the mapping of key to the resource which modifies or loads * the key most recently */ private Map<String, String[]> updatingResource; /** * Class to keep the information about the keys which replace the deprecated * ones. * * This class stores the new keys which replace the deprecated keys and also * gives a provision to have a custom message for each of the deprecated key * that is being replaced. It also provides method to get the appropriate * warning message which can be logged whenever the deprecated key is used. */ private static class DeprecatedKeyInfo { private final String[] newKeys; private final String customMessage; private final AtomicBoolean accessed = new AtomicBoolean(false); DeprecatedKeyInfo(String[] newKeys, String customMessage) { this.newKeys = newKeys; this.customMessage = customMessage; } /** * Method to provide the warning message. It gives the custom message if * non-null, and default message otherwise. * @param key the associated deprecated key. * @return message that is to be logged when a deprecated key is used. */ private final String getWarningMessage(String key) { String warningMessage; if(customMessage == null) { StringBuilder message = new StringBuilder(key); String deprecatedKeySuffix = " is deprecated. Instead, use "; message.append(deprecatedKeySuffix); for (int i = 0; i < newKeys.length; i++) { message.append(newKeys[i]); if(i != newKeys.length-1) { message.append(", "); } } warningMessage = message.toString(); } else { warningMessage = customMessage; } return warningMessage; } boolean getAndSetAccessed() { return accessed.getAndSet(true); } public void clearAccessed() { accessed.set(false); } } /** * A pending addition to the global set of deprecated keys. */ public static class DeprecationDelta { private final String key; private final String[] newKeys; private final String customMessage; DeprecationDelta(String key, String[] newKeys, String customMessage) { Preconditions.checkNotNull(key); Preconditions.checkNotNull(newKeys); Preconditions.checkArgument(newKeys.length > 0); this.key = key; this.newKeys = newKeys; this.customMessage = customMessage; } public DeprecationDelta(String key, String newKey, String customMessage) { this(key, new String[] { newKey }, customMessage); } public DeprecationDelta(String key, String newKey) { this(key, new String[] { newKey }, null); } public String getKey() { return key; } public String[] getNewKeys() { return newKeys; } public String getCustomMessage() { return customMessage; } } /** * The set of all keys which are deprecated. * * DeprecationContext objects are immutable. */ private static class DeprecationContext { /** * Stores the deprecated keys, the new keys which replace the deprecated keys * and custom message(if any provided). */ private final Map<String, DeprecatedKeyInfo> deprecatedKeyMap; /** * Stores a mapping from superseding keys to the keys which they deprecate. */ private final Map<String, String> reverseDeprecatedKeyMap; /** * Create a new DeprecationContext by copying a previous DeprecationContext * and adding some deltas. * * @param other The previous deprecation context to copy, or null to start * from nothing. * @param deltas The deltas to apply. */ @SuppressWarnings("unchecked") DeprecationContext(DeprecationContext other, DeprecationDelta[] deltas) { HashMap<String, DeprecatedKeyInfo> newDeprecatedKeyMap = new HashMap<String, DeprecatedKeyInfo>(); HashMap<String, String> newReverseDeprecatedKeyMap = new HashMap<String, String>(); if (other != null) { for (Entry<String, DeprecatedKeyInfo> entry : other.deprecatedKeyMap.entrySet()) { newDeprecatedKeyMap.put(entry.getKey(), entry.getValue()); } for (Entry<String, String> entry : other.reverseDeprecatedKeyMap.entrySet()) { newReverseDeprecatedKeyMap.put(entry.getKey(), entry.getValue()); } } for (DeprecationDelta delta : deltas) { if (!newDeprecatedKeyMap.containsKey(delta.getKey())) { DeprecatedKeyInfo newKeyInfo = new DeprecatedKeyInfo(delta.getNewKeys(), delta.getCustomMessage()); newDeprecatedKeyMap.put(delta.key, newKeyInfo); for (String newKey : delta.getNewKeys()) { newReverseDeprecatedKeyMap.put(newKey, delta.key); } } } this.deprecatedKeyMap = UnmodifiableMap.decorate(newDeprecatedKeyMap); this.reverseDeprecatedKeyMap = UnmodifiableMap.decorate(newReverseDeprecatedKeyMap); } Map<String, DeprecatedKeyInfo> getDeprecatedKeyMap() { return deprecatedKeyMap; } Map<String, String> getReverseDeprecatedKeyMap() { return reverseDeprecatedKeyMap; } } private static DeprecationDelta[] defaultDeprecations = new DeprecationDelta[] { new DeprecationDelta("topology.script.file.name", CommonConfigurationKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY), new DeprecationDelta("topology.script.number.args", CommonConfigurationKeys.NET_TOPOLOGY_SCRIPT_NUMBER_ARGS_KEY), new DeprecationDelta("hadoop.configured.node.mapping", CommonConfigurationKeys.NET_TOPOLOGY_CONFIGURED_NODE_MAPPING_KEY), new DeprecationDelta("topology.node.switch.mapping.impl", CommonConfigurationKeys.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY), new DeprecationDelta("dfs.df.interval", CommonConfigurationKeys.FS_DF_INTERVAL_KEY), new DeprecationDelta("hadoop.native.lib", CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY), new DeprecationDelta("fs.default.name", CommonConfigurationKeys.FS_DEFAULT_NAME_KEY), new DeprecationDelta("dfs.umaskmode", CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY), new DeprecationDelta("dfs.nfs.exports.allowed.hosts", CommonConfigurationKeys.NFS_EXPORTS_ALLOWED_HOSTS_KEY) }; /** * The global DeprecationContext. */ private static AtomicReference<DeprecationContext> deprecationContext = new AtomicReference<DeprecationContext>( new DeprecationContext(null, defaultDeprecations)); /** * Adds a set of deprecated keys to the global deprecations. * * This method is lockless. It works by means of creating a new * DeprecationContext based on the old one, and then atomically swapping in * the new context. If someone else updated the context in between us reading * the old context and swapping in the new one, we try again until we win the * race. * * @param deltas The deprecations to add. */ public static void addDeprecations(DeprecationDelta[] deltas) { DeprecationContext prev, next; do { prev = deprecationContext.get(); next = new DeprecationContext(prev, deltas); } while (!deprecationContext.compareAndSet(prev, next)); } /** * Adds the deprecated key to the global deprecation map. * It does not override any existing entries in the deprecation map. * This is to be used only by the developers in order to add deprecation of * keys, and attempts to call this method after loading resources once, * would lead to <tt>UnsupportedOperationException</tt> * * If a key is deprecated in favor of multiple keys, they are all treated as * aliases of each other, and setting any one of them resets all the others * to the new value. * * If you have multiple deprecation entries to add, it is more efficient to * use #addDeprecations(DeprecationDelta[] deltas) instead. * * @param key * @param newKeys * @param customMessage * @deprecated use {@link #addDeprecation(String key, String newKey, String customMessage)} instead */ @Deprecated public static void addDeprecation(String key, String[] newKeys, String customMessage) { addDeprecations(new DeprecationDelta[] { new DeprecationDelta(key, newKeys, customMessage) }); } /** * Adds the deprecated key to the global deprecation map. * It does not override any existing entries in the deprecation map. * This is to be used only by the developers in order to add deprecation of * keys, and attempts to call this method after loading resources once, * would lead to <tt>UnsupportedOperationException</tt> * * If you have multiple deprecation entries to add, it is more efficient to * use #addDeprecations(DeprecationDelta[] deltas) instead. * * @param key * @param newKey * @param customMessage */ public static void addDeprecation(String key, String newKey, String customMessage) { addDeprecation(key, new String[] {newKey}, customMessage); } /** * Adds the deprecated key to the global deprecation map when no custom * message is provided. * It does not override any existing entries in the deprecation map. * This is to be used only by the developers in order to add deprecation of * keys, and attempts to call this method after loading resources once, * would lead to <tt>UnsupportedOperationException</tt> * * If a key is deprecated in favor of multiple keys, they are all treated as * aliases of each other, and setting any one of them resets all the others * to the new value. * * If you have multiple deprecation entries to add, it is more efficient to * use #addDeprecations(DeprecationDelta[] deltas) instead. * * @param key Key that is to be deprecated * @param newKeys list of keys that take up the values of deprecated key * @deprecated use {@link #addDeprecation(String key, String newKey)} instead */ @Deprecated public static void addDeprecation(String key, String[] newKeys) { addDeprecation(key, newKeys, null); } /** * Adds the deprecated key to the global deprecation map when no custom * message is provided. * It does not override any existing entries in the deprecation map. * This is to be used only by the developers in order to add deprecation of * keys, and attempts to call this method after loading resources once, * would lead to <tt>UnsupportedOperationException</tt> * * If you have multiple deprecation entries to add, it is more efficient to * use #addDeprecations(DeprecationDelta[] deltas) instead. * * @param key Key that is to be deprecated * @param newKey key that takes up the value of deprecated key */ public static void addDeprecation(String key, String newKey) { addDeprecation(key, new String[] {newKey}, null); } /** * checks whether the given <code>key</code> is deprecated. * * @param key the parameter which is to be checked for deprecation * @return <code>true</code> if the key is deprecated and * <code>false</code> otherwise. */ public static boolean isDeprecated(String key) { return deprecationContext.get().getDeprecatedKeyMap().containsKey(key); } /** * Sets all deprecated properties that are not currently set but have a * corresponding new property that is set. Useful for iterating the * properties when all deprecated properties for currently set properties * need to be present. */ public void setDeprecatedProperties() { DeprecationContext deprecations = deprecationContext.get(); Properties props = getProps(); Properties overlay = getOverlay(); for (Map.Entry<String, DeprecatedKeyInfo> entry : deprecations.getDeprecatedKeyMap().entrySet()) { String depKey = entry.getKey(); if (!overlay.contains(depKey)) { for (String newKey : entry.getValue().newKeys) { String val = overlay.getProperty(newKey); if (val != null) { props.setProperty(depKey, val); overlay.setProperty(depKey, val); break; } } } } } /** * Checks for the presence of the property <code>name</code> in the * deprecation map. Returns the first of the list of new keys if present * in the deprecation map or the <code>name</code> itself. If the property * is not presently set but the property map contains an entry for the * deprecated key, the value of the deprecated key is set as the value for * the provided property name. * * @param name the property name * @return the first property in the list of properties mapping * the <code>name</code> or the <code>name</code> itself. */ private String[] handleDeprecation(DeprecationContext deprecations, String name) { if (null != name) { name = name.trim(); } ArrayList<String > names = new ArrayList<String>(); if (isDeprecated(name)) { DeprecatedKeyInfo keyInfo = deprecations.getDeprecatedKeyMap().get(name); warnOnceIfDeprecated(deprecations, name); for (String newKey : keyInfo.newKeys) { if(newKey != null) { names.add(newKey); } } } if(names.size() == 0) { names.add(name); } for(String n : names) { String deprecatedKey = deprecations.getReverseDeprecatedKeyMap().get(n); if (deprecatedKey != null && !getOverlay().containsKey(n) && getOverlay().containsKey(deprecatedKey)) { getProps().setProperty(n, getOverlay().getProperty(deprecatedKey)); getOverlay().setProperty(n, getOverlay().getProperty(deprecatedKey)); } } return names.toArray(new String[names.size()]); } private void handleDeprecation() { LOG.debug("Handling deprecation for all properties in config..."); DeprecationContext deprecations = deprecationContext.get(); Set<Object> keys = new HashSet<Object>(); keys.addAll(getProps().keySet()); for (Object item: keys) { LOG.debug("Handling deprecation for " + (String)item); handleDeprecation(deprecations, (String)item); } } static{ //print deprecation warning if hadoop-site.xml is found in classpath ClassLoader cL = Thread.currentThread().getContextClassLoader(); if (cL == null) { cL = Configuration.class.getClassLoader(); } if(cL.getResource("hadoop-site.xml")!=null) { LOG.warn("DEPRECATED: hadoop-site.xml found in the classpath. " + "Usage of hadoop-site.xml is deprecated. Instead use core-site.xml, " + "mapred-site.xml and hdfs-site.xml to override properties of " + "core-default.xml, mapred-default.xml and hdfs-default.xml " + "respectively"); } addDefaultResource("core-default.xml"); addDefaultResource("core-site.xml"); } private Properties properties; private Properties overlay; private ClassLoader classLoader; { classLoader = Thread.currentThread().getContextClassLoader(); if (classLoader == null) { classLoader = Configuration.class.getClassLoader(); } } /** A new configuration. */ public Configuration() { this(true); } /** A new configuration where the behavior of reading from the default * resources can be turned off. * * If the parameter {@code loadDefaults} is false, the new instance * will not load resources from the default files. * @param loadDefaults specifies whether to load from the default files */ public Configuration(boolean loadDefaults) { this.loadDefaults = loadDefaults; updatingResource = new ConcurrentHashMap<String, String[]>(); synchronized(Configuration.class) { REGISTRY.put(this, null); } } /** * A new configuration with the same settings cloned from another. * * @param other the configuration from which to clone settings. */ @SuppressWarnings("unchecked") public Configuration(Configuration other) { this.resources = (ArrayList<Resource>) other.resources.clone(); synchronized(other) { if (other.properties != null) { this.properties = (Properties)other.properties.clone(); } if (other.overlay!=null) { this.overlay = (Properties)other.overlay.clone(); } this.updatingResource = new ConcurrentHashMap<String, String[]>( other.updatingResource); this.finalParameters = Collections.newSetFromMap( new ConcurrentHashMap<String, Boolean>()); this.finalParameters.addAll(other.finalParameters); } synchronized(Configuration.class) { REGISTRY.put(this, null); } this.classLoader = other.classLoader; this.loadDefaults = other.loadDefaults; setQuietMode(other.getQuietMode()); } /** * Add a default resource. Resources are loaded in the order of the resources * added. * @param name file name. File should be present in the classpath. */ public static synchronized void addDefaultResource(String name) { if(!defaultResources.contains(name)) { defaultResources.add(name); for(Configuration conf : REGISTRY.keySet()) { if(conf.loadDefaults) { conf.reloadConfiguration(); } } } } /** * Add a configuration resource. * * The properties of this resource will override properties of previously * added resources, unless they were marked <a href="#Final">final</a>. * * @param name resource to be added, the classpath is examined for a file * with that name. */ public void addResource(String name) { addResourceObject(new Resource(name)); } /** * Add a configuration resource. * * The properties of this resource will override properties of previously * added resources, unless they were marked <a href="#Final">final</a>. * * @param url url of the resource to be added, the local filesystem is * examined directly to find the resource, without referring to * the classpath. */ public void addResource(URL url) { addResourceObject(new Resource(url)); } /** * Add a configuration resource. * * The properties of this resource will override properties of previously * added resources, unless they were marked <a href="#Final">final</a>. * * @param file file-path of resource to be added, the local filesystem is * examined directly to find the resource, without referring to * the classpath. */ public void addResource(Path file) { addResourceObject(new Resource(file)); } /** * Add a configuration resource. * * The properties of this resource will override properties of previously * added resources, unless they were marked <a href="#Final">final</a>. * * WARNING: The contents of the InputStream will be cached, by this method. * So use this sparingly because it does increase the memory consumption. * * @param in InputStream to deserialize the object from. In will be read from * when a get or set is called next. After it is read the stream will be * closed. */ public void addResource(InputStream in) { addResourceObject(new Resource(in)); } /** * Add a configuration resource. * * The properties of this resource will override properties of previously * added resources, unless they were marked <a href="#Final">final</a>. * * @param in InputStream to deserialize the object from. * @param name the name of the resource because InputStream.toString is not * very descriptive some times. */ public void addResource(InputStream in, String name) { addResourceObject(new Resource(in, name)); } /** * Add a configuration resource. * * The properties of this resource will override properties of previously * added resources, unless they were marked <a href="#Final">final</a>. * * @param conf Configuration object from which to load properties */ public void addResource(Configuration conf) { addResourceObject(new Resource(conf.getProps())); } /** * Reload configuration from previously added resources. * * This method will clear all the configuration read from the added * resources, and final parameters. This will make the resources to * be read again before accessing the values. Values that are added * via set methods will overlay values read from the resources. */ public synchronized void reloadConfiguration() { properties = null; // trigger reload finalParameters.clear(); // clear site-limits } private synchronized void addResourceObject(Resource resource) { resources.add(resource); // add to resources reloadConfiguration(); } private static final int MAX_SUBST = 20; private static final int SUB_START_IDX = 0; private static final int SUB_END_IDX = SUB_START_IDX + 1; /** * This is a manual implementation of the following regex * "\\$\\{[^\\}\\$\u0020]+\\}". It can be 15x more efficient than * a regex matcher as demonstrated by HADOOP-11506. This is noticeable with * Hadoop apps building on the assumption Configuration#get is an O(1) * hash table lookup, especially when the eval is a long string. * * @param eval a string that may contain variables requiring expansion. * @return a 2-element int array res such that * eval.substring(res[0], res[1]) is "var" for the left-most occurrence of * ${var} in eval. If no variable is found -1, -1 is returned. */ private static int[] findSubVariable(String eval) { int[] result = {-1, -1}; int matchStart; int leftBrace; // scanning for a brace first because it's less frequent than $ // that can occur in nested class names // match_loop: for (matchStart = 1, leftBrace = eval.indexOf('{', matchStart); // minimum left brace position (follows '$') leftBrace > 0 // right brace of a smallest valid expression "${c}" && leftBrace + "{c".length() < eval.length(); leftBrace = eval.indexOf('{', matchStart)) { int matchedLen = 0; if (eval.charAt(leftBrace - 1) == '$') { int subStart = leftBrace + 1; // after '{' for (int i = subStart; i < eval.length(); i++) { switch (eval.charAt(i)) { case '}': if (matchedLen > 0) { // match result[SUB_START_IDX] = subStart; result[SUB_END_IDX] = subStart + matchedLen; break match_loop; } // fall through to skip 1 char case ' ': case '$': matchStart = i + 1; continue match_loop; default: matchedLen++; } } // scanned from "${" to the end of eval, and no reset via ' ', '$': // no match! break match_loop; } else { // not a start of a variable // matchStart = leftBrace + 1; } } return result; } /** * Attempts to repeatedly expand the value {@code expr} by replacing the * left-most substring of the form "${var}" in the following precedence order * <ol> * <li>by the value of the Java system property "var" if defined</li> * <li>by the value of the configuration key "var" if defined</li> * </ol> * * If var is unbounded the current state of expansion "prefix${var}suffix" is * returned. * * @param expr the literal value of a config key * @return null if expr is null, otherwise the value resulting from expanding * expr using the algorithm above. * @throws IllegalArgumentException when more than * {@link Configuration#MAX_SUBST} replacements are required */ private String substituteVars(String expr) { if (expr == null) { return null; } String eval = expr; for (int s = 0; s < MAX_SUBST; s++) { final int[] varBounds = findSubVariable(eval); if (varBounds[SUB_START_IDX] == -1) { return eval; } final String var = eval.substring(varBounds[SUB_START_IDX], varBounds[SUB_END_IDX]); String val = null; try { val = System.getProperty(var); } catch(SecurityException se) { LOG.warn("Unexpected SecurityException in Configuration", se); } if (val == null) { val = getRaw(var); } if (val == null) { return eval; // return literal ${var}: var is unbound } final int dollar = varBounds[SUB_START_IDX] - "${".length(); final int afterRightBrace = varBounds[SUB_END_IDX] + "}".length(); // substitute eval = eval.substring(0, dollar) + val + eval.substring(afterRightBrace); } throw new IllegalStateException("Variable substitution depth too large: " + MAX_SUBST + " " + expr); } /** * Get the value of the <code>name</code> property, <code>null</code> if * no such property exists. If the key is deprecated, it returns the value of * the first key which replaces the deprecated key and is not null. * * Values are processed for <a href="#VariableExpansion">variable expansion</a> * before being returned. * * @param name the property name, will be trimmed before get value. * @return the value of the <code>name</code> or its replacing property, * or null if no such property exists. */ public String get(String name) { String[] names = handleDeprecation(deprecationContext.get(), name); String result = null; for(String n : names) { result = substituteVars(getProps().getProperty(n)); } return result; } /** * Set Configuration to allow keys without values during setup. Intended * for use during testing. * * @param val If true, will allow Configuration to store keys without values */ @VisibleForTesting public void setAllowNullValueProperties( boolean val ) { this.allowNullValueProperties = val; } /** * Return existence of the <code>name</code> property, but only for * names which have no valid value, usually non-existent or commented * out in XML. * * @param name the property name * @return true if the property <code>name</code> exists without value */ @VisibleForTesting public boolean onlyKeyExists(String name) { String[] names = handleDeprecation(deprecationContext.get(), name); for(String n : names) { if ( getProps().getProperty(n,DEFAULT_STRING_CHECK) .equals(DEFAULT_STRING_CHECK) ) { return true; } } return false; } /** * Get the value of the <code>name</code> property as a trimmed <code>String</code>, * <code>null</code> if no such property exists. * If the key is deprecated, it returns the value of * the first key which replaces the deprecated key and is not null * * Values are processed for <a href="#VariableExpansion">variable expansion</a> * before being returned. * * @param name the property name. * @return the value of the <code>name</code> or its replacing property, * or null if no such property exists. */ public String getTrimmed(String name) { String value = get(name); if (null == value) { return null; } else { return value.trim(); } } /** * Get the value of the <code>name</code> property as a trimmed <code>String</code>, * <code>defaultValue</code> if no such property exists. * See @{Configuration#getTrimmed} for more details. * * @param name the property name. * @param defaultValue the property default value. * @return the value of the <code>name</code> or defaultValue * if it is not set. */ public String getTrimmed(String name, String defaultValue) { String ret = getTrimmed(name); return ret == null ? defaultValue : ret; } /** * Get the value of the <code>name</code> property, without doing * <a href="#VariableExpansion">variable expansion</a>.If the key is * deprecated, it returns the value of the first key which replaces * the deprecated key and is not null. * * @param name the property name. * @return the value of the <code>name</code> property or * its replacing property and null if no such property exists. */ public String getRaw(String name) { String[] names = handleDeprecation(deprecationContext.get(), name); String result = null; for(String n : names) { result = getProps().getProperty(n); } return result; } /** * Returns alternative names (non-deprecated keys or previously-set deprecated keys) * for a given non-deprecated key. * If the given key is deprecated, return null. * * @param name property name. * @return alternative names. */ private String[] getAlternativeNames(String name) { String altNames[] = null; DeprecatedKeyInfo keyInfo = null; DeprecationContext cur = deprecationContext.get(); String depKey = cur.getReverseDeprecatedKeyMap().get(name); if(depKey != null) { keyInfo = cur.getDeprecatedKeyMap().get(depKey); if(keyInfo.newKeys.length > 0) { if(getProps().containsKey(depKey)) { //if deprecated key is previously set explicitly List<String> list = new ArrayList<String>(); list.addAll(Arrays.asList(keyInfo.newKeys)); list.add(depKey); altNames = list.toArray(new String[list.size()]); } else { altNames = keyInfo.newKeys; } } } return altNames; } /** * Set the <code>value</code> of the <code>name</code> property. If * <code>name</code> is deprecated or there is a deprecated name associated to it, * it sets the value to both names. Name will be trimmed before put into * configuration. * * @param name property name. * @param value property value. */ public void set(String name, String value) { set(name, value, null); } /** * Set the <code>value</code> of the <code>name</code> property. If * <code>name</code> is deprecated, it also sets the <code>value</code> to * the keys that replace the deprecated key. Name will be trimmed before put * into configuration. * * @param name property name. * @param value property value. * @param source the place that this configuration value came from * (For debugging). * @throws IllegalArgumentException when the value or name is null. */ public void set(String name, String value, String source) { Preconditions.checkArgument( name != null, "Property name must not be null"); Preconditions.checkArgument( value != null, "The value of property " + name + " must not be null"); name = name.trim(); DeprecationContext deprecations = deprecationContext.get(); if (deprecations.getDeprecatedKeyMap().isEmpty()) { getProps(); } getOverlay().setProperty(name, value); getProps().setProperty(name, value); String newSource = (source == null ? "programatically" : source); if (!isDeprecated(name)) { updatingResource.put(name, new String[] {newSource}); String[] altNames = getAlternativeNames(name); if(altNames != null) { for(String n: altNames) { if(!n.equals(name)) { getOverlay().setProperty(n, value); getProps().setProperty(n, value); updatingResource.put(n, new String[] {newSource}); } } } } else { String[] names = handleDeprecation(deprecationContext.get(), name); String altSource = "because " + name + " is deprecated"; for(String n : names) { getOverlay().setProperty(n, value); getProps().setProperty(n, value); updatingResource.put(n, new String[] {altSource}); } } } private void warnOnceIfDeprecated(DeprecationContext deprecations, String name) { DeprecatedKeyInfo keyInfo = deprecations.getDeprecatedKeyMap().get(name); if (keyInfo != null && !keyInfo.getAndSetAccessed()) { LOG_DEPRECATION.info(keyInfo.getWarningMessage(name)); } } /** * Unset a previously set property. */ public synchronized void unset(String name) { String[] names = null; if (!isDeprecated(name)) { names = getAlternativeNames(name); if(names == null) { names = new String[]{name}; } } else { names = handleDeprecation(deprecationContext.get(), name); } for(String n: names) { getOverlay().remove(n); getProps().remove(n); } } /** * Sets a property if it is currently unset. * @param name the property name * @param value the new value */ public synchronized void setIfUnset(String name, String value) { if (get(name) == null) { set(name, value); } } private synchronized Properties getOverlay() { if (overlay==null){ overlay=new Properties(); } return overlay; } /** * Get the value of the <code>name</code>. If the key is deprecated, * it returns the value of the first key which replaces the deprecated key * and is not null. * If no such property exists, * then <code>defaultValue</code> is returned. * * @param name property name, will be trimmed before get value. * @param defaultValue default value. * @return property value, or <code>defaultValue</code> if the property * doesn't exist. */ public String get(String name, String defaultValue) { String[] names = handleDeprecation(deprecationContext.get(), name); String result = null; for(String n : names) { result = substituteVars(getProps().getProperty(n, defaultValue)); } return result; } /** * Get the value of the <code>name</code> property as an <code>int</code>. * * If no such property exists, the provided default value is returned, * or if the specified value is not a valid <code>int</code>, * then an error is thrown. * * @param name property name. * @param defaultValue default value. * @throws NumberFormatException when the value is invalid * @return property value as an <code>int</code>, * or <code>defaultValue</code>. */ public int getInt(String name, int defaultValue) { String valueString = getTrimmed(name); if (valueString == null) return defaultValue; String hexString = getHexDigits(valueString); if (hexString != null) { return Integer.parseInt(hexString, 16); } return Integer.parseInt(valueString); } /** * Get the value of the <code>name</code> property as a set of comma-delimited * <code>int</code> values. * * If no such property exists, an empty array is returned. * * @param name property name * @return property value interpreted as an array of comma-delimited * <code>int</code> values */ public int[] getInts(String name) { String[] strings = getTrimmedStrings(name); int[] ints = new int[strings.length]; for (int i = 0; i < strings.length; i++) { ints[i] = Integer.parseInt(strings[i]); } return ints; } /** * Set the value of the <code>name</code> property to an <code>int</code>. * * @param name property name. * @param value <code>int</code> value of the property. */ public void setInt(String name, int value) { set(name, Integer.toString(value)); } /** * Get the value of the <code>name</code> property as a <code>long</code>. * If no such property exists, the provided default value is returned, * or if the specified value is not a valid <code>long</code>, * then an error is thrown. * * @param name property name. * @param defaultValue default value. * @throws NumberFormatException when the value is invalid * @return property value as a <code>long</code>, * or <code>defaultValue</code>. */ public long getLong(String name, long defaultValue) { String valueString = getTrimmed(name); if (valueString == null) return defaultValue; String hexString = getHexDigits(valueString); if (hexString != null) { return Long.parseLong(hexString, 16); } return Long.parseLong(valueString); } /** * Get the value of the <code>name</code> property as a <code>long</code> or * human readable format. If no such property exists, the provided default * value is returned, or if the specified value is not a valid * <code>long</code> or human readable format, then an error is thrown. You * can use the following suffix (case insensitive): k(kilo), m(mega), g(giga), * t(tera), p(peta), e(exa) * * @param name property name. * @param defaultValue default value. * @throws NumberFormatException when the value is invalid * @return property value as a <code>long</code>, * or <code>defaultValue</code>. */ public long getLongBytes(String name, long defaultValue) { String valueString = getTrimmed(name); if (valueString == null) return defaultValue; return StringUtils.TraditionalBinaryPrefix.string2long(valueString); } private String getHexDigits(String value) { boolean negative = false; String str = value; String hexString = null; if (value.startsWith("-")) { negative = true; str = value.substring(1); } if (str.startsWith("0x") || str.startsWith("0X")) { hexString = str.substring(2); if (negative) { hexString = "-" + hexString; } return hexString; } return null; } /** * Set the value of the <code>name</code> property to a <code>long</code>. * * @param name property name. * @param value <code>long</code> value of the property. */ public void setLong(String name, long value) { set(name, Long.toString(value)); } /** * Get the value of the <code>name</code> property as a <code>float</code>. * If no such property exists, the provided default value is returned, * or if the specified value is not a valid <code>float</code>, * then an error is thrown. * * @param name property name. * @param defaultValue default value. * @throws NumberFormatException when the value is invalid * @return property value as a <code>float</code>, * or <code>defaultValue</code>. */ public float getFloat(String name, float defaultValue) { String valueString = getTrimmed(name); if (valueString == null) return defaultValue; return Float.parseFloat(valueString); } /** * Set the value of the <code>name</code> property to a <code>float</code>. * * @param name property name. * @param value property value. */ public void setFloat(String name, float value) { set(name,Float.toString(value)); } /** * Get the value of the <code>name</code> property as a <code>double</code>. * If no such property exists, the provided default value is returned, * or if the specified value is not a valid <code>double</code>, * then an error is thrown. * * @param name property name. * @param defaultValue default value. * @throws NumberFormatException when the value is invalid * @return property value as a <code>double</code>, * or <code>defaultValue</code>. */ public double getDouble(String name, double defaultValue) { String valueString = getTrimmed(name); if (valueString == null) return defaultValue; return Double.parseDouble(valueString); } /** * Set the value of the <code>name</code> property to a <code>double</code>. * * @param name property name. * @param value property value. */ public void setDouble(String name, double value) { set(name,Double.toString(value)); } /** * Get the value of the <code>name</code> property as a <code>boolean</code>. * If no such property is specified, or if the specified value is not a valid * <code>boolean</code>, then <code>defaultValue</code> is returned. * * @param name property name. * @param defaultValue default value. * @return property value as a <code>boolean</code>, * or <code>defaultValue</code>. */ public boolean getBoolean(String name, boolean defaultValue) { String valueString = getTrimmed(name); if (null == valueString || valueString.isEmpty()) { return defaultValue; } if (StringUtils.equalsIgnoreCase("true", valueString)) return true; else if (StringUtils.equalsIgnoreCase("false", valueString)) return false; else return defaultValue; } /** * Set the value of the <code>name</code> property to a <code>boolean</code>. * * @param name property name. * @param value <code>boolean</code> value of the property. */ public void setBoolean(String name, boolean value) { set(name, Boolean.toString(value)); } /** * Set the given property, if it is currently unset. * @param name property name * @param value new value */ public void setBooleanIfUnset(String name, boolean value) { setIfUnset(name, Boolean.toString(value)); } /** * Set the value of the <code>name</code> property to the given type. This * is equivalent to <code>set(&lt;name&gt;, value.toString())</code>. * @param name property name * @param value new value */ public <T extends Enum<T>> void setEnum(String name, T value) { set(name, value.toString()); } /** * Return value matching this enumerated type. * Note that the returned value is trimmed by this method. * @param name Property name * @param defaultValue Value returned if no mapping exists * @throws IllegalArgumentException If mapping is illegal for the type * provided */ public <T extends Enum<T>> T getEnum(String name, T defaultValue) { final String val = getTrimmed(name); return null == val ? defaultValue : Enum.valueOf(defaultValue.getDeclaringClass(), val); } enum ParsedTimeDuration { NS { TimeUnit unit() { return TimeUnit.NANOSECONDS; } String suffix() { return "ns"; } }, US { TimeUnit unit() { return TimeUnit.MICROSECONDS; } String suffix() { return "us"; } }, MS { TimeUnit unit() { return TimeUnit.MILLISECONDS; } String suffix() { return "ms"; } }, S { TimeUnit unit() { return TimeUnit.SECONDS; } String suffix() { return "s"; } }, M { TimeUnit unit() { return TimeUnit.MINUTES; } String suffix() { return "m"; } }, H { TimeUnit unit() { return TimeUnit.HOURS; } String suffix() { return "h"; } }, D { TimeUnit unit() { return TimeUnit.DAYS; } String suffix() { return "d"; } }; abstract TimeUnit unit(); abstract String suffix(); static ParsedTimeDuration unitFor(String s) { for (ParsedTimeDuration ptd : values()) { // iteration order is in decl order, so SECONDS matched last if (s.endsWith(ptd.suffix())) { return ptd; } } return null; } static ParsedTimeDuration unitFor(TimeUnit unit) { for (ParsedTimeDuration ptd : values()) { if (ptd.unit() == unit) { return ptd; } } return null; } } /** * Set the value of <code>name</code> to the given time duration. This * is equivalent to <code>set(&lt;name&gt;, value + &lt;time suffix&gt;)</code>. * @param name Property name * @param value Time duration * @param unit Unit of time */ public void setTimeDuration(String name, long value, TimeUnit unit) { set(name, value + ParsedTimeDuration.unitFor(unit).suffix()); } /** * Return time duration in the given time unit. Valid units are encoded in * properties as suffixes: nanoseconds (ns), microseconds (us), milliseconds * (ms), seconds (s), minutes (m), hours (h), and days (d). * @param name Property name * @param defaultValue Value returned if no mapping exists. * @param unit Unit to convert the stored property, if it exists. * @throws NumberFormatException If the property stripped of its unit is not * a number */ public long getTimeDuration(String name, long defaultValue, TimeUnit unit) { String vStr = get(name); if (null == vStr) { return defaultValue; } vStr = vStr.trim(); ParsedTimeDuration vUnit = ParsedTimeDuration.unitFor(vStr); if (null == vUnit) { LOG.warn("No unit for " + name + "(" + vStr + ") assuming " + unit); vUnit = ParsedTimeDuration.unitFor(unit); } else { vStr = vStr.substring(0, vStr.lastIndexOf(vUnit.suffix())); } return unit.convert(Long.parseLong(vStr), vUnit.unit()); } /** * Get the value of the <code>name</code> property as a <code>Pattern</code>. * If no such property is specified, or if the specified value is not a valid * <code>Pattern</code>, then <code>DefaultValue</code> is returned. * Note that the returned value is NOT trimmed by this method. * * @param name property name * @param defaultValue default value * @return property value as a compiled Pattern, or defaultValue */ public Pattern getPattern(String name, Pattern defaultValue) { String valString = get(name); if (null == valString || valString.isEmpty()) { return defaultValue; } try { return Pattern.compile(valString); } catch (PatternSyntaxException pse) { LOG.warn("Regular expression '" + valString + "' for property '" + name + "' not valid. Using default", pse); return defaultValue; } } /** * Set the given property to <code>Pattern</code>. * If the pattern is passed as null, sets the empty pattern which results in * further calls to getPattern(...) returning the default value. * * @param name property name * @param pattern new value */ public void setPattern(String name, Pattern pattern) { assert pattern != null : "Pattern cannot be null"; set(name, pattern.pattern()); } /** * Gets information about why a property was set. Typically this is the * path to the resource objects (file, URL, etc.) the property came from, but * it can also indicate that it was set programatically, or because of the * command line. * * @param name - The property name to get the source of. * @return null - If the property or its source wasn't found. Otherwise, * returns a list of the sources of the resource. The older sources are * the first ones in the list. So for example if a configuration is set from * the command line, and then written out to a file that is read back in the * first entry would indicate that it was set from the command line, while * the second one would indicate the file that the new configuration was read * in from. */ @InterfaceStability.Unstable public synchronized String[] getPropertySources(String name) { if (properties == null) { // If properties is null, it means a resource was newly added // but the props were cleared so as to load it upon future // requests. So lets force a load by asking a properties list. getProps(); } // Return a null right away if our properties still // haven't loaded or the resource mapping isn't defined if (properties == null || updatingResource == null) { return null; } else { String[] source = updatingResource.get(name); if(source == null) { return null; } else { return Arrays.copyOf(source, source.length); } } } /** * A class that represents a set of positive integer ranges. It parses * strings of the form: "2-3,5,7-" where ranges are separated by comma and * the lower/upper bounds are separated by dash. Either the lower or upper * bound may be omitted meaning all values up to or over. So the string * above means 2, 3, 5, and 7, 8, 9, ... */ public static class IntegerRanges implements Iterable<Integer>{ private static class Range { int start; int end; } private static class RangeNumberIterator implements Iterator<Integer> { Iterator<Range> internal; int at; int end; public RangeNumberIterator(List<Range> ranges) { if (ranges != null) { internal = ranges.iterator(); } at = -1; end = -2; } @Override public boolean hasNext() { if (at <= end) { return true; } else if (internal != null){ return internal.hasNext(); } return false; } @Override public Integer next() { if (at <= end) { at++; return at - 1; } else if (internal != null){ Range found = internal.next(); if (found != null) { at = found.start; end = found.end; at++; return at - 1; } } return null; } @Override public void remove() { throw new UnsupportedOperationException(); } }; List<Range> ranges = new ArrayList<Range>(); public IntegerRanges() { } public IntegerRanges(String newValue) { StringTokenizer itr = new StringTokenizer(newValue, ","); while (itr.hasMoreTokens()) { String rng = itr.nextToken().trim(); String[] parts = rng.split("-", 3); if (parts.length < 1 || parts.length > 2) { throw new IllegalArgumentException("integer range badly formed: " + rng); } Range r = new Range(); r.start = convertToInt(parts[0], 0); if (parts.length == 2) { r.end = convertToInt(parts[1], Integer.MAX_VALUE); } else { r.end = r.start; } if (r.start > r.end) { throw new IllegalArgumentException("IntegerRange from " + r.start + " to " + r.end + " is invalid"); } ranges.add(r); } } /** * Convert a string to an int treating empty strings as the default value. * @param value the string value * @param defaultValue the value for if the string is empty * @return the desired integer */ private static int convertToInt(String value, int defaultValue) { String trim = value.trim(); if (trim.length() == 0) { return defaultValue; } return Integer.parseInt(trim); } /** * Is the given value in the set of ranges * @param value the value to check * @return is the value in the ranges? */ public boolean isIncluded(int value) { for(Range r: ranges) { if (r.start <= value && value <= r.end) { return true; } } return false; } /** * @return true if there are no values in this range, else false. */ public boolean isEmpty() { return ranges == null || ranges.isEmpty(); } @Override public String toString() { StringBuilder result = new StringBuilder(); boolean first = true; for(Range r: ranges) { if (first) { first = false; } else { result.append(','); } result.append(r.start); result.append('-'); result.append(r.end); } return result.toString(); } @Override public Iterator<Integer> iterator() { return new RangeNumberIterator(ranges); } } /** * Parse the given attribute as a set of integer ranges * @param name the attribute name * @param defaultValue the default value if it is not set * @return a new set of ranges from the configured value */ public IntegerRanges getRange(String name, String defaultValue) { return new IntegerRanges(get(name, defaultValue)); } /** * Get the comma delimited values of the <code>name</code> property as * a collection of <code>String</code>s. * If no such property is specified then empty collection is returned. * <p> * This is an optimized version of {@link #getStrings(String)} * * @param name property name. * @return property value as a collection of <code>String</code>s. */ public Collection<String> getStringCollection(String name) { String valueString = get(name); return StringUtils.getStringCollection(valueString); } /** * Get the comma delimited values of the <code>name</code> property as * an array of <code>String</code>s. * If no such property is specified then <code>null</code> is returned. * * @param name property name. * @return property value as an array of <code>String</code>s, * or <code>null</code>. */ public String[] getStrings(String name) { String valueString = get(name); return StringUtils.getStrings(valueString); } /** * Get the comma delimited values of the <code>name</code> property as * an array of <code>String</code>s. * If no such property is specified then default value is returned. * * @param name property name. * @param defaultValue The default value * @return property value as an array of <code>String</code>s, * or default value. */ public String[] getStrings(String name, String... defaultValue) { String valueString = get(name); if (valueString == null) { return defaultValue; } else { return StringUtils.getStrings(valueString); } } /** * Get the comma delimited values of the <code>name</code> property as * a collection of <code>String</code>s, trimmed of the leading and trailing whitespace. * If no such property is specified then empty <code>Collection</code> is returned. * * @param name property name. * @return property value as a collection of <code>String</code>s, or empty <code>Collection</code> */ public Collection<String> getTrimmedStringCollection(String name) { String valueString = get(name); if (null == valueString) { Collection<String> empty = new ArrayList<String>(); return empty; } return StringUtils.getTrimmedStringCollection(valueString); } /** * Get the comma delimited values of the <code>name</code> property as * an array of <code>String</code>s, trimmed of the leading and trailing whitespace. * If no such property is specified then an empty array is returned. * * @param name property name. * @return property value as an array of trimmed <code>String</code>s, * or empty array. */ public String[] getTrimmedStrings(String name) { String valueString = get(name); return StringUtils.getTrimmedStrings(valueString); } /** * Get the comma delimited values of the <code>name</code> property as * an array of <code>String</code>s, trimmed of the leading and trailing whitespace. * If no such property is specified then default value is returned. * * @param name property name. * @param defaultValue The default value * @return property value as an array of trimmed <code>String</code>s, * or default value. */ public String[] getTrimmedStrings(String name, String... defaultValue) { String valueString = get(name); if (null == valueString) { return defaultValue; } else { return StringUtils.getTrimmedStrings(valueString); } } /** * Set the array of string values for the <code>name</code> property as * as comma delimited values. * * @param name property name. * @param values The values */ public void setStrings(String name, String... values) { set(name, StringUtils.arrayToString(values)); } /** * Get the value for a known password configuration element. * In order to enable the elimination of clear text passwords in config, * this method attempts to resolve the property name as an alias through * the CredentialProvider API and conditionally fallsback to config. * @param name property name * @return password */ public char[] getPassword(String name) throws IOException { char[] pass = null; pass = getPasswordFromCredentialProviders(name); if (pass == null) { pass = getPasswordFromConfig(name); } return pass; } /** * Try and resolve the provided element name as a credential provider * alias. * @param name alias of the provisioned credential * @return password or null if not found * @throws IOException */ protected char[] getPasswordFromCredentialProviders(String name) throws IOException { char[] pass = null; try { List<CredentialProvider> providers = CredentialProviderFactory.getProviders(this); if (providers != null) { for (CredentialProvider provider : providers) { try { CredentialEntry entry = provider.getCredentialEntry(name); if (entry != null) { pass = entry.getCredential(); break; } } catch (IOException ioe) { throw new IOException("Can't get key " + name + " from key provider" + "of type: " + provider.getClass().getName() + ".", ioe); } } } } catch (IOException ioe) { throw new IOException("Configuration problem with provider path.", ioe); } return pass; } /** * Fallback to clear text passwords in configuration. * @param name * @return clear text password or null */ protected char[] getPasswordFromConfig(String name) { char[] pass = null; if (getBoolean(CredentialProvider.CLEAR_TEXT_FALLBACK, true)) { String passStr = get(name); if (passStr != null) { pass = passStr.toCharArray(); } } return pass; } /** * Get the socket address for <code>hostProperty</code> as a * <code>InetSocketAddress</code>. If <code>hostProperty</code> is * <code>null</code>, <code>addressProperty</code> will be used. This * is useful for cases where we want to differentiate between host * bind address and address clients should use to establish connection. * * @param hostProperty bind host property name. * @param addressProperty address property name. * @param defaultAddressValue the default value * @param defaultPort the default port * @return InetSocketAddress */ public InetSocketAddress getSocketAddr( String hostProperty, String addressProperty, String defaultAddressValue, int defaultPort) { InetSocketAddress bindAddr = getSocketAddr( addressProperty, defaultAddressValue, defaultPort); final String host = get(hostProperty); if (host == null || host.isEmpty()) { return bindAddr; } return NetUtils.createSocketAddr( host, bindAddr.getPort(), hostProperty); } /** * Get the socket address for <code>name</code> property as a * <code>InetSocketAddress</code>. * @param name property name. * @param defaultAddress the default value * @param defaultPort the default port * @return InetSocketAddress */ public InetSocketAddress getSocketAddr( String name, String defaultAddress, int defaultPort) { final String address = getTrimmed(name, defaultAddress); return NetUtils.createSocketAddr(address, defaultPort, name); } /** * Set the socket address for the <code>name</code> property as * a <code>host:port</code>. */ public void setSocketAddr(String name, InetSocketAddress addr) { set(name, NetUtils.getHostPortString(addr)); } /** * Set the socket address a client can use to connect for the * <code>name</code> property as a <code>host:port</code>. The wildcard * address is replaced with the local host's address. If the host and address * properties are configured the host component of the address will be combined * with the port component of the addr to generate the address. This is to allow * optional control over which host name is used in multi-home bind-host * cases where a host can have multiple names * @param hostProperty the bind-host configuration name * @param addressProperty the service address configuration name * @param defaultAddressValue the service default address configuration value * @param addr InetSocketAddress of the service listener * @return InetSocketAddress for clients to connect */ public InetSocketAddress updateConnectAddr( String hostProperty, String addressProperty, String defaultAddressValue, InetSocketAddress addr) { final String host = get(hostProperty); final String connectHostPort = getTrimmed(addressProperty, defaultAddressValue); if (host == null || host.isEmpty() || connectHostPort == null || connectHostPort.isEmpty()) { //not our case, fall back to original logic return updateConnectAddr(addressProperty, addr); } final String connectHost = connectHostPort.split(":")[0]; // Create connect address using client address hostname and server port. return updateConnectAddr(addressProperty, NetUtils.createSocketAddrForHost( connectHost, addr.getPort())); } /** * Set the socket address a client can use to connect for the * <code>name</code> property as a <code>host:port</code>. The wildcard * address is replaced with the local host's address. * @param name property name. * @param addr InetSocketAddress of a listener to store in the given property * @return InetSocketAddress for clients to connect */ public InetSocketAddress updateConnectAddr(String name, InetSocketAddress addr) { final InetSocketAddress connectAddr = NetUtils.getConnectAddress(addr); setSocketAddr(name, connectAddr); return connectAddr; } /** * Load a class by name. * * @param name the class name. * @return the class object. * @throws ClassNotFoundException if the class is not found. */ public Class<?> getClassByName(String name) throws ClassNotFoundException { Class<?> ret = getClassByNameOrNull(name); if (ret == null) { throw new ClassNotFoundException("Class " + name + " not found"); } return ret; } /** * Load a class by name, returning null rather than throwing an exception * if it couldn't be loaded. This is to avoid the overhead of creating * an exception. * * @param name the class name * @return the class object, or null if it could not be found. */ public Class<?> getClassByNameOrNull(String name) { Map<String, WeakReference<Class<?>>> map; synchronized (CACHE_CLASSES) { map = CACHE_CLASSES.get(classLoader); if (map == null) { map = Collections.synchronizedMap( new WeakHashMap<String, WeakReference<Class<?>>>()); CACHE_CLASSES.put(classLoader, map); } } Class<?> clazz = null; WeakReference<Class<?>> ref = map.get(name); if (ref != null) { clazz = ref.get(); } if (clazz == null) { try { clazz = Class.forName(name, true, classLoader); } catch (ClassNotFoundException e) { // Leave a marker that the class isn't found map.put(name, new WeakReference<Class<?>>(NEGATIVE_CACHE_SENTINEL)); return null; } // two putters can race here, but they'll put the same class map.put(name, new WeakReference<Class<?>>(clazz)); return clazz; } else if (clazz == NEGATIVE_CACHE_SENTINEL) { return null; // not found } else { // cache hit return clazz; } } /** * Get the value of the <code>name</code> property * as an array of <code>Class</code>. * The value of the property specifies a list of comma separated class names. * If no such property is specified, then <code>defaultValue</code> is * returned. * * @param name the property name. * @param defaultValue default value. * @return property value as a <code>Class[]</code>, * or <code>defaultValue</code>. */ public Class<?>[] getClasses(String name, Class<?> ... defaultValue) { String[] classnames = getTrimmedStrings(name); if (classnames == null) return defaultValue; try { Class<?>[] classes = new Class<?>[classnames.length]; for(int i = 0; i < classnames.length; i++) { classes[i] = getClassByName(classnames[i]); } return classes; } catch (ClassNotFoundException e) { throw new RuntimeException(e); } } /** * Get the value of the <code>name</code> property as a <code>Class</code>. * If no such property is specified, then <code>defaultValue</code> is * returned. * * @param name the class name. * @param defaultValue default value. * @return property value as a <code>Class</code>, * or <code>defaultValue</code>. */ public Class<?> getClass(String name, Class<?> defaultValue) { String valueString = getTrimmed(name); if (valueString == null) return defaultValue; try { return getClassByName(valueString); } catch (ClassNotFoundException e) { throw new RuntimeException(e); } } /** * Get the value of the <code>name</code> property as a <code>Class</code> * implementing the interface specified by <code>xface</code>. * * If no such property is specified, then <code>defaultValue</code> is * returned. * * An exception is thrown if the returned class does not implement the named * interface. * * @param name the class name. * @param defaultValue default value. * @param xface the interface implemented by the named class. * @return property value as a <code>Class</code>, * or <code>defaultValue</code>. */ public <U> Class<? extends U> getClass(String name, Class<? extends U> defaultValue, Class<U> xface) { try { Class<?> theClass = getClass(name, defaultValue); if (theClass != null && !xface.isAssignableFrom(theClass)) throw new RuntimeException(theClass+" not "+xface.getName()); else if (theClass != null) return theClass.asSubclass(xface); else return null; } catch (Exception e) { throw new RuntimeException(e); } } /** * Get the value of the <code>name</code> property as a <code>List</code> * of objects implementing the interface specified by <code>xface</code>. * * An exception is thrown if any of the classes does not exist, or if it does * not implement the named interface. * * @param name the property name. * @param xface the interface implemented by the classes named by * <code>name</code>. * @return a <code>List</code> of objects implementing <code>xface</code>. */ @SuppressWarnings("unchecked") public <U> List<U> getInstances(String name, Class<U> xface) { List<U> ret = new ArrayList<U>(); Class<?>[] classes = getClasses(name); for (Class<?> cl: classes) { if (!xface.isAssignableFrom(cl)) { throw new RuntimeException(cl + " does not implement " + xface); } ret.add((U)ReflectionUtils.newInstance(cl, this)); } return ret; } /** * Set the value of the <code>name</code> property to the name of a * <code>theClass</code> implementing the given interface <code>xface</code>. * * An exception is thrown if <code>theClass</code> does not implement the * interface <code>xface</code>. * * @param name property name. * @param theClass property value. * @param xface the interface implemented by the named class. */ public void setClass(String name, Class<?> theClass, Class<?> xface) { if (!xface.isAssignableFrom(theClass)) throw new RuntimeException(theClass+" not "+xface.getName()); set(name, theClass.getName()); } /** * Get a local file under a directory named by <i>dirsProp</i> with * the given <i>path</i>. If <i>dirsProp</i> contains multiple directories, * then one is chosen based on <i>path</i>'s hash code. If the selected * directory does not exist, an attempt is made to create it. * * @param dirsProp directory in which to locate the file. * @param path file-path. * @return local file under the directory with the given path. */ public Path getLocalPath(String dirsProp, String path) throws IOException { String[] dirs = getTrimmedStrings(dirsProp); int hashCode = path.hashCode(); FileSystem fs = FileSystem.getLocal(this); for (int i = 0; i < dirs.length; i++) { // try each local dir int index = (hashCode+i & Integer.MAX_VALUE) % dirs.length; Path file = new Path(dirs[index], path); Path dir = file.getParent(); if (fs.mkdirs(dir) || fs.exists(dir)) { return file; } } LOG.warn("Could not make " + path + " in local directories from " + dirsProp); for(int i=0; i < dirs.length; i++) { int index = (hashCode+i & Integer.MAX_VALUE) % dirs.length; LOG.warn(dirsProp + "[" + index + "]=" + dirs[index]); } throw new IOException("No valid local directories in property: "+dirsProp); } /** * Get a local file name under a directory named in <i>dirsProp</i> with * the given <i>path</i>. If <i>dirsProp</i> contains multiple directories, * then one is chosen based on <i>path</i>'s hash code. If the selected * directory does not exist, an attempt is made to create it. * * @param dirsProp directory in which to locate the file. * @param path file-path. * @return local file under the directory with the given path. */ public File getFile(String dirsProp, String path) throws IOException { String[] dirs = getTrimmedStrings(dirsProp); int hashCode = path.hashCode(); for (int i = 0; i < dirs.length; i++) { // try each local dir int index = (hashCode+i & Integer.MAX_VALUE) % dirs.length; File file = new File(dirs[index], path); File dir = file.getParentFile(); if (dir.exists() || dir.mkdirs()) { return file; } } throw new IOException("No valid local directories in property: "+dirsProp); } /** * Get the {@link URL} for the named resource. * * @param name resource name. * @return the url for the named resource. */ public URL getResource(String name) { return classLoader.getResource(name); } /** * Get an input stream attached to the configuration resource with the * given <code>name</code>. * * @param name configuration resource name. * @return an input stream attached to the resource. */ public InputStream getConfResourceAsInputStream(String name) { try { URL url= getResource(name); if (url == null) { LOG.info(name + " not found"); return null; } else { LOG.info("found resource " + name + " at " + url); } return url.openStream(); } catch (Exception e) { return null; } } /** * Get a {@link Reader} attached to the configuration resource with the * given <code>name</code>. * * @param name configuration resource name. * @return a reader attached to the resource. */ public Reader getConfResourceAsReader(String name) { try { URL url= getResource(name); if (url == null) { LOG.info(name + " not found"); return null; } else { LOG.info("found resource " + name + " at " + url); } return new InputStreamReader(url.openStream(), Charsets.UTF_8); } catch (Exception e) { return null; } } /** * Get the set of parameters marked final. * * @return final parameter set. */ public Set<String> getFinalParameters() { Set<String> setFinalParams = Collections.newSetFromMap( new ConcurrentHashMap<String, Boolean>()); setFinalParams.addAll(finalParameters); return setFinalParams; } protected synchronized Properties getProps() { if (properties == null) { properties = new Properties(); Map<String, String[]> backup = new ConcurrentHashMap<String, String[]>(updatingResource); loadResources(properties, resources, quietmode); if (overlay != null) { properties.putAll(overlay); for (Map.Entry<Object,Object> item: overlay.entrySet()) { String key = (String)item.getKey(); String[] source = backup.get(key); if(source != null) { updatingResource.put(key, source); } } } } return properties; } /** * Return the number of keys in the configuration. * * @return number of keys in the configuration. */ public int size() { return getProps().size(); } /** * Clears all keys from the configuration. */ public void clear() { getProps().clear(); getOverlay().clear(); } /** * Get an {@link Iterator} to go through the list of <code>String</code> * key-value pairs in the configuration. * * @return an iterator over the entries. */ @Override public Iterator<Map.Entry<String, String>> iterator() { // Get a copy of just the string to string pairs. After the old object // methods that allow non-strings to be put into configurations are removed, // we could replace properties with a Map<String,String> and get rid of this // code. Map<String,String> result = new HashMap<String,String>(); for(Map.Entry<Object,Object> item: getProps().entrySet()) { if (item.getKey() instanceof String && item.getValue() instanceof String) { result.put((String) item.getKey(), (String) item.getValue()); } } return result.entrySet().iterator(); } private Document parse(DocumentBuilder builder, URL url) throws IOException, SAXException { if (!quietmode) { if (LOG.isDebugEnabled()) { LOG.debug("parsing URL " + url); } } if (url == null) { return null; } return parse(builder, url.openStream(), url.toString()); } private Document parse(DocumentBuilder builder, InputStream is, String systemId) throws IOException, SAXException { if (!quietmode) { LOG.debug("parsing input stream " + is); } if (is == null) { return null; } try { return (systemId == null) ? builder.parse(is) : builder.parse(is, systemId); } finally { is.close(); } } private void loadResources(Properties properties, ArrayList<Resource> resources, boolean quiet) { if(loadDefaults) { for (String resource : defaultResources) { loadResource(properties, new Resource(resource), quiet); } //support the hadoop-site.xml as a deprecated case if(getResource("hadoop-site.xml")!=null) { loadResource(properties, new Resource("hadoop-site.xml"), quiet); } } for (int i = 0; i < resources.size(); i++) { Resource ret = loadResource(properties, resources.get(i), quiet); if (ret != null) { resources.set(i, ret); } } } private Resource loadResource(Properties properties, Resource wrapper, boolean quiet) { String name = UNKNOWN_RESOURCE; try { Object resource = wrapper.getResource(); name = wrapper.getName(); DocumentBuilderFactory docBuilderFactory = DocumentBuilderFactory.newInstance(); //ignore all comments inside the xml file docBuilderFactory.setIgnoringComments(true); //allow includes in the xml file docBuilderFactory.setNamespaceAware(true); try { docBuilderFactory.setXIncludeAware(true); } catch (UnsupportedOperationException e) { LOG.error("Failed to set setXIncludeAware(true) for parser " + docBuilderFactory + ":" + e, e); } DocumentBuilder builder = docBuilderFactory.newDocumentBuilder(); Document doc = null; Element root = null; boolean returnCachedProperties = false; if (resource instanceof URL) { // an URL resource doc = parse(builder, (URL)resource); } else if (resource instanceof String) { // a CLASSPATH resource URL url = getResource((String)resource); doc = parse(builder, url); } else if (resource instanceof Path) { // a file resource // Can't use FileSystem API or we get an infinite loop // since FileSystem uses Configuration API. Use java.io.File instead. File file = new File(((Path)resource).toUri().getPath()) .getAbsoluteFile(); if (file.exists()) { if (!quiet) { LOG.debug("parsing File " + file); } doc = parse(builder, new BufferedInputStream( new FileInputStream(file)), ((Path)resource).toString()); } } else if (resource instanceof InputStream) { doc = parse(builder, (InputStream) resource, null); returnCachedProperties = true; } else if (resource instanceof Properties) { overlay(properties, (Properties)resource); } else if (resource instanceof Element) { root = (Element)resource; } if (root == null) { if (doc == null) { if (quiet) { return null; } throw new RuntimeException(resource + " not found"); } root = doc.getDocumentElement(); } Properties toAddTo = properties; if(returnCachedProperties) { toAddTo = new Properties(); } if (!"configuration".equals(root.getTagName())) LOG.fatal("bad conf file: top-level element not <configuration>"); NodeList props = root.getChildNodes(); DeprecationContext deprecations = deprecationContext.get(); for (int i = 0; i < props.getLength(); i++) { Node propNode = props.item(i); if (!(propNode instanceof Element)) continue; Element prop = (Element)propNode; if ("configuration".equals(prop.getTagName())) { loadResource(toAddTo, new Resource(prop, name), quiet); continue; } if (!"property".equals(prop.getTagName())) LOG.warn("bad conf file: element not <property>"); NodeList fields = prop.getChildNodes(); String attr = null; String value = null; boolean finalParameter = false; LinkedList<String> source = new LinkedList<String>(); for (int j = 0; j < fields.getLength(); j++) { Node fieldNode = fields.item(j); if (!(fieldNode instanceof Element)) continue; Element field = (Element)fieldNode; if ("name".equals(field.getTagName()) && field.hasChildNodes()) attr = StringInterner.weakIntern( ((Text)field.getFirstChild()).getData().trim()); if ("value".equals(field.getTagName()) && field.hasChildNodes()) value = StringInterner.weakIntern( ((Text)field.getFirstChild()).getData()); if ("final".equals(field.getTagName()) && field.hasChildNodes()) finalParameter = "true".equals(((Text)field.getFirstChild()).getData()); if ("source".equals(field.getTagName()) && field.hasChildNodes()) source.add(StringInterner.weakIntern( ((Text)field.getFirstChild()).getData())); } source.add(name); // Ignore this parameter if it has already been marked as 'final' if (attr != null) { if (deprecations.getDeprecatedKeyMap().containsKey(attr)) { DeprecatedKeyInfo keyInfo = deprecations.getDeprecatedKeyMap().get(attr); keyInfo.clearAccessed(); for (String key:keyInfo.newKeys) { // update new keys with deprecated key's value loadProperty(toAddTo, name, key, value, finalParameter, source.toArray(new String[source.size()])); } } else { loadProperty(toAddTo, name, attr, value, finalParameter, source.toArray(new String[source.size()])); } } } if (returnCachedProperties) { overlay(properties, toAddTo); return new Resource(toAddTo, name); } return null; } catch (IOException e) { LOG.fatal("error parsing conf " + name, e); throw new RuntimeException(e); } catch (DOMException e) { LOG.fatal("error parsing conf " + name, e); throw new RuntimeException(e); } catch (SAXException e) { LOG.fatal("error parsing conf " + name, e); throw new RuntimeException(e); } catch (ParserConfigurationException e) { LOG.fatal("error parsing conf " + name , e); throw new RuntimeException(e); } } private void overlay(Properties to, Properties from) { for (Entry<Object, Object> entry: from.entrySet()) { to.put(entry.getKey(), entry.getValue()); } } private void loadProperty(Properties properties, String name, String attr, String value, boolean finalParameter, String[] source) { if (value != null || allowNullValueProperties) { if (value == null) { value = DEFAULT_STRING_CHECK; } if (!finalParameters.contains(attr)) { properties.setProperty(attr, value); if(source != null) { updatingResource.put(attr, source); } } else if (!value.equals(properties.getProperty(attr))) { LOG.warn(name+":an attempt to override final parameter: "+attr +"; Ignoring."); } } if (finalParameter && attr != null) { finalParameters.add(attr); } } /** * Write out the non-default properties in this configuration to the given * {@link OutputStream} using UTF-8 encoding. * * @param out the output stream to write to. */ public void writeXml(OutputStream out) throws IOException { writeXml(new OutputStreamWriter(out, "UTF-8")); } /** * Write out the non-default properties in this configuration to the given * {@link Writer}. * * @param out the writer to write to. */ public void writeXml(Writer out) throws IOException { Document doc = asXmlDocument(); try { DOMSource source = new DOMSource(doc); StreamResult result = new StreamResult(out); TransformerFactory transFactory = TransformerFactory.newInstance(); Transformer transformer = transFactory.newTransformer(); // Important to not hold Configuration log while writing result, since // 'out' may be an HDFS stream which needs to lock this configuration // from another thread. transformer.transform(source, result); } catch (TransformerException te) { throw new IOException(te); } } /** * Return the XML DOM corresponding to this Configuration. */ private synchronized Document asXmlDocument() throws IOException { Document doc; try { doc = DocumentBuilderFactory.newInstance().newDocumentBuilder().newDocument(); } catch (ParserConfigurationException pe) { throw new IOException(pe); } Element conf = doc.createElement("configuration"); doc.appendChild(conf); conf.appendChild(doc.createTextNode("\n")); handleDeprecation(); //ensure properties is set and deprecation is handled for (Enumeration<Object> e = properties.keys(); e.hasMoreElements();) { String name = (String)e.nextElement(); Object object = properties.get(name); String value = null; if (object instanceof String) { value = (String) object; }else { continue; } Element propNode = doc.createElement("property"); conf.appendChild(propNode); Element nameNode = doc.createElement("name"); nameNode.appendChild(doc.createTextNode(name)); propNode.appendChild(nameNode); Element valueNode = doc.createElement("value"); valueNode.appendChild(doc.createTextNode(value)); propNode.appendChild(valueNode); if (updatingResource != null) { String[] sources = updatingResource.get(name); if(sources != null) { for(String s : sources) { Element sourceNode = doc.createElement("source"); sourceNode.appendChild(doc.createTextNode(s)); propNode.appendChild(sourceNode); } } } conf.appendChild(doc.createTextNode("\n")); } return doc; } /** * Writes out all the parameters and their properties (final and resource) to * the given {@link Writer} * The format of the output would be * { "properties" : [ {key1,value1,key1.isFinal,key1.resource}, {key2,value2, * key2.isFinal,key2.resource}... ] } * It does not output the parameters of the configuration object which is * loaded from an input stream. * @param out the Writer to write to * @throws IOException */ public static void dumpConfiguration(Configuration config, Writer out) throws IOException { JsonFactory dumpFactory = new JsonFactory(); JsonGenerator dumpGenerator = dumpFactory.createJsonGenerator(out); dumpGenerator.writeStartObject(); dumpGenerator.writeFieldName("properties"); dumpGenerator.writeStartArray(); dumpGenerator.flush(); synchronized (config) { for (Map.Entry<Object,Object> item: config.getProps().entrySet()) { dumpGenerator.writeStartObject(); dumpGenerator.writeStringField("key", (String) item.getKey()); dumpGenerator.writeStringField("value", config.get((String) item.getKey())); dumpGenerator.writeBooleanField("isFinal", config.finalParameters.contains(item.getKey())); String[] resources = config.updatingResource.get(item.getKey()); String resource = UNKNOWN_RESOURCE; if(resources != null && resources.length > 0) { resource = resources[0]; } dumpGenerator.writeStringField("resource", resource); dumpGenerator.writeEndObject(); } } dumpGenerator.writeEndArray(); dumpGenerator.writeEndObject(); dumpGenerator.flush(); } /** * Get the {@link ClassLoader} for this job. * * @return the correct class loader. */ public ClassLoader getClassLoader() { return classLoader; } /** * Set the class loader that will be used to load the various objects. * * @param classLoader the new class loader. */ public void setClassLoader(ClassLoader classLoader) { this.classLoader = classLoader; } @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("Configuration: "); if(loadDefaults) { toString(defaultResources, sb); if(resources.size()>0) { sb.append(", "); } } toString(resources, sb); return sb.toString(); } private <T> void toString(List<T> resources, StringBuilder sb) { ListIterator<T> i = resources.listIterator(); while (i.hasNext()) { if (i.nextIndex() != 0) { sb.append(", "); } sb.append(i.next()); } } /** * Set the quietness-mode. * * In the quiet-mode, error and informational messages might not be logged. * * @param quietmode <code>true</code> to set quiet-mode on, <code>false</code> * to turn it off. */ public synchronized void setQuietMode(boolean quietmode) { this.quietmode = quietmode; } synchronized boolean getQuietMode() { return this.quietmode; } /** For debugging. List non-default properties to the terminal and exit. */ public static void main(String[] args) throws Exception { new Configuration().writeXml(System.out); } @Override public void readFields(DataInput in) throws IOException { clear(); int size = WritableUtils.readVInt(in); for(int i=0; i < size; ++i) { String key = org.apache.hadoop.io.Text.readString(in); String value = org.apache.hadoop.io.Text.readString(in); set(key, value); String sources[] = WritableUtils.readCompressedStringArray(in); if(sources != null) { updatingResource.put(key, sources); } } } //@Override @Override public void write(DataOutput out) throws IOException { Properties props = getProps(); WritableUtils.writeVInt(out, props.size()); for(Map.Entry<Object, Object> item: props.entrySet()) { org.apache.hadoop.io.Text.writeString(out, (String) item.getKey()); org.apache.hadoop.io.Text.writeString(out, (String) item.getValue()); WritableUtils.writeCompressedStringArray(out, updatingResource.get(item.getKey())); } } /** * get keys matching the the regex * @param regex * @return Map<String,String> with matching keys */ public Map<String,String> getValByRegex(String regex) { Pattern p = Pattern.compile(regex); Map<String,String> result = new HashMap<String,String>(); Matcher m; for(Map.Entry<Object,Object> item: getProps().entrySet()) { if (item.getKey() instanceof String && item.getValue() instanceof String) { m = p.matcher((String)item.getKey()); if(m.find()) { // match result.put((String) item.getKey(), substituteVars(getProps().getProperty((String) item.getKey()))); } } } return result; } /** * A unique class which is used as a sentinel value in the caching * for getClassByName. {@see Configuration#getClassByNameOrNull(String)} */ private static abstract class NegativeCacheSentinel {} public static void dumpDeprecatedKeys() { DeprecationContext deprecations = deprecationContext.get(); for (Map.Entry<String, DeprecatedKeyInfo> entry : deprecations.getDeprecatedKeyMap().entrySet()) { StringBuilder newKeys = new StringBuilder(); for (String newKey : entry.getValue().newKeys) { newKeys.append(newKey).append("\t"); } System.out.println(entry.getKey() + "\t" + newKeys.toString()); } } /** * Returns whether or not a deprecated name has been warned. If the name is not * deprecated then always return false */ public static boolean hasWarnedDeprecation(String name) { DeprecationContext deprecations = deprecationContext.get(); if(deprecations.getDeprecatedKeyMap().containsKey(name)) { if(deprecations.getDeprecatedKeyMap().get(name).accessed.get()) { return true; } } return false; } }
100,529
33.031821
102
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configurable.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.conf; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** Something that may be configured with a {@link Configuration}. */ @InterfaceAudience.Public @InterfaceStability.Stable public interface Configurable { /** Set the configuration to be used by this object. */ void setConf(Configuration conf); /** Return the configuration used by this object. */ Configuration getConf(); }
1,296
36.057143
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationUtil.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.conf; import java.util.Map; import java.util.Collection; import java.util.HashMap; public class ReconfigurationUtil { public static class PropertyChange { public String prop; public String oldVal; public String newVal; public PropertyChange(String prop, String newVal, String oldVal) { this.prop = prop; this.newVal = newVal; this.oldVal = oldVal; } } public static Collection<PropertyChange> getChangedProperties(Configuration newConf, Configuration oldConf) { Map<String, PropertyChange> changes = new HashMap<String, PropertyChange>(); // iterate over old configuration for (Map.Entry<String, String> oldEntry: oldConf) { String prop = oldEntry.getKey(); String oldVal = oldEntry.getValue(); String newVal = newConf.getRaw(prop); if (newVal == null || !newVal.equals(oldVal)) { changes.put(prop, new PropertyChange(prop, newVal, oldVal)); } } // now iterate over new configuration // (to look for properties not present in old conf) for (Map.Entry<String, String> newEntry: newConf) { String prop = newEntry.getKey(); String newVal = newEntry.getValue(); if (oldConf.get(prop) == null) { changes.put(prop, new PropertyChange(prop, newVal, null)); } } return changes.values(); } public Collection<PropertyChange> parseChangedProperties( Configuration newConf, Configuration oldConf) { return getChangedProperties(newConf, oldConf); } }
2,363
32.295775
80
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.ipc; import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; import java.io.OutputStream; import java.lang.reflect.Method; import java.lang.reflect.Proxy; import java.net.InetSocketAddress; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicBoolean; import javax.net.SocketFactory; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.DataOutputOutputStream; import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.retry.RetryPolicy; import org.apache.hadoop.ipc.Client.ConnectionId; import org.apache.hadoop.ipc.RPC.RpcInvoker; import org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto; import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcRequestHeaderProto; import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.SecretManager; import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.util.ProtoUtil; import org.apache.hadoop.util.Time; import org.apache.htrace.Trace; import org.apache.htrace.TraceScope; import com.google.common.annotations.VisibleForTesting; import com.google.protobuf.BlockingService; import com.google.protobuf.CodedOutputStream; import com.google.protobuf.Descriptors.MethodDescriptor; import com.google.protobuf.GeneratedMessage; import com.google.protobuf.Message; import com.google.protobuf.ServiceException; import com.google.protobuf.TextFormat; /** * RPC Engine for for protobuf based RPCs. */ @InterfaceStability.Evolving public class ProtobufRpcEngine implements RpcEngine { public static final Log LOG = LogFactory.getLog(ProtobufRpcEngine.class); static { // Register the rpcRequest deserializer for WritableRpcEngine org.apache.hadoop.ipc.Server.registerProtocolEngine( RPC.RpcKind.RPC_PROTOCOL_BUFFER, RpcRequestWrapper.class, new Server.ProtoBufRpcInvoker()); } private static final ClientCache CLIENTS = new ClientCache(); public <T> ProtocolProxy<T> getProxy(Class<T> protocol, long clientVersion, InetSocketAddress addr, UserGroupInformation ticket, Configuration conf, SocketFactory factory, int rpcTimeout) throws IOException { return getProxy(protocol, clientVersion, addr, ticket, conf, factory, rpcTimeout, null); } @Override public <T> ProtocolProxy<T> getProxy(Class<T> protocol, long clientVersion, InetSocketAddress addr, UserGroupInformation ticket, Configuration conf, SocketFactory factory, int rpcTimeout, RetryPolicy connectionRetryPolicy ) throws IOException { return getProxy(protocol, clientVersion, addr, ticket, conf, factory, rpcTimeout, connectionRetryPolicy, null); } @Override @SuppressWarnings("unchecked") public <T> ProtocolProxy<T> getProxy(Class<T> protocol, long clientVersion, InetSocketAddress addr, UserGroupInformation ticket, Configuration conf, SocketFactory factory, int rpcTimeout, RetryPolicy connectionRetryPolicy, AtomicBoolean fallbackToSimpleAuth) throws IOException { final Invoker invoker = new Invoker(protocol, addr, ticket, conf, factory, rpcTimeout, connectionRetryPolicy, fallbackToSimpleAuth); return new ProtocolProxy<T>(protocol, (T) Proxy.newProxyInstance( protocol.getClassLoader(), new Class[]{protocol}, invoker), false); } @Override public ProtocolProxy<ProtocolMetaInfoPB> getProtocolMetaInfoProxy( ConnectionId connId, Configuration conf, SocketFactory factory) throws IOException { Class<ProtocolMetaInfoPB> protocol = ProtocolMetaInfoPB.class; return new ProtocolProxy<ProtocolMetaInfoPB>(protocol, (ProtocolMetaInfoPB) Proxy.newProxyInstance(protocol.getClassLoader(), new Class[] { protocol }, new Invoker(protocol, connId, conf, factory)), false); } private static class Invoker implements RpcInvocationHandler { private final Map<String, Message> returnTypes = new ConcurrentHashMap<String, Message>(); private boolean isClosed = false; private final Client.ConnectionId remoteId; private final Client client; private final long clientProtocolVersion; private final String protocolName; private AtomicBoolean fallbackToSimpleAuth; private Invoker(Class<?> protocol, InetSocketAddress addr, UserGroupInformation ticket, Configuration conf, SocketFactory factory, int rpcTimeout, RetryPolicy connectionRetryPolicy, AtomicBoolean fallbackToSimpleAuth) throws IOException { this(protocol, Client.ConnectionId.getConnectionId( addr, protocol, ticket, rpcTimeout, connectionRetryPolicy, conf), conf, factory); this.fallbackToSimpleAuth = fallbackToSimpleAuth; } /** * This constructor takes a connectionId, instead of creating a new one. */ private Invoker(Class<?> protocol, Client.ConnectionId connId, Configuration conf, SocketFactory factory) { this.remoteId = connId; this.client = CLIENTS.getClient(conf, factory, RpcResponseWrapper.class); this.protocolName = RPC.getProtocolName(protocol); this.clientProtocolVersion = RPC .getProtocolVersion(protocol); } private RequestHeaderProto constructRpcRequestHeader(Method method) { RequestHeaderProto.Builder builder = RequestHeaderProto .newBuilder(); builder.setMethodName(method.getName()); // For protobuf, {@code protocol} used when creating client side proxy is // the interface extending BlockingInterface, which has the annotations // such as ProtocolName etc. // // Using Method.getDeclaringClass(), as in WritableEngine to get at // the protocol interface will return BlockingInterface, from where // the annotation ProtocolName and Version cannot be // obtained. // // Hence we simply use the protocol class used to create the proxy. // For PB this may limit the use of mixins on client side. builder.setDeclaringClassProtocolName(protocolName); builder.setClientProtocolVersion(clientProtocolVersion); return builder.build(); } /** * This is the client side invoker of RPC method. It only throws * ServiceException, since the invocation proxy expects only * ServiceException to be thrown by the method in case protobuf service. * * ServiceException has the following causes: * <ol> * <li>Exceptions encountered on the client side in this method are * set as cause in ServiceException as is.</li> * <li>Exceptions from the server are wrapped in RemoteException and are * set as cause in ServiceException</li> * </ol> * * Note that the client calling protobuf RPC methods, must handle * ServiceException by getting the cause from the ServiceException. If the * cause is RemoteException, then unwrap it to get the exception thrown by * the server. */ @Override public Object invoke(Object proxy, Method method, Object[] args) throws ServiceException { long startTime = 0; if (LOG.isDebugEnabled()) { startTime = Time.now(); } if (args.length != 2) { // RpcController + Message throw new ServiceException("Too many parameters for request. Method: [" + method.getName() + "]" + ", Expected: 2, Actual: " + args.length); } if (args[1] == null) { throw new ServiceException("null param while calling Method: [" + method.getName() + "]"); } TraceScope traceScope = null; // if Tracing is on then start a new span for this rpc. // guard it in the if statement to make sure there isn't // any extra string manipulation. if (Trace.isTracing()) { traceScope = Trace.startSpan(RpcClientUtil.methodToTraceString(method)); } RequestHeaderProto rpcRequestHeader = constructRpcRequestHeader(method); if (LOG.isTraceEnabled()) { LOG.trace(Thread.currentThread().getId() + ": Call -> " + remoteId + ": " + method.getName() + " {" + TextFormat.shortDebugString((Message) args[1]) + "}"); } Message theRequest = (Message) args[1]; final RpcResponseWrapper val; try { val = (RpcResponseWrapper) client.call(RPC.RpcKind.RPC_PROTOCOL_BUFFER, new RpcRequestWrapper(rpcRequestHeader, theRequest), remoteId, fallbackToSimpleAuth); } catch (Throwable e) { if (LOG.isTraceEnabled()) { LOG.trace(Thread.currentThread().getId() + ": Exception <- " + remoteId + ": " + method.getName() + " {" + e + "}"); } if (Trace.isTracing()) { traceScope.getSpan().addTimelineAnnotation( "Call got exception: " + e.toString()); } throw new ServiceException(e); } finally { if (traceScope != null) traceScope.close(); } if (LOG.isDebugEnabled()) { long callTime = Time.now() - startTime; LOG.debug("Call: " + method.getName() + " took " + callTime + "ms"); } Message prototype = null; try { prototype = getReturnProtoType(method); } catch (Exception e) { throw new ServiceException(e); } Message returnMessage; try { returnMessage = prototype.newBuilderForType() .mergeFrom(val.theResponseRead).build(); if (LOG.isTraceEnabled()) { LOG.trace(Thread.currentThread().getId() + ": Response <- " + remoteId + ": " + method.getName() + " {" + TextFormat.shortDebugString(returnMessage) + "}"); } } catch (Throwable e) { throw new ServiceException(e); } return returnMessage; } @Override public void close() throws IOException { if (!isClosed) { isClosed = true; CLIENTS.stopClient(client); } } private Message getReturnProtoType(Method method) throws Exception { if (returnTypes.containsKey(method.getName())) { return returnTypes.get(method.getName()); } Class<?> returnType = method.getReturnType(); Method newInstMethod = returnType.getMethod("getDefaultInstance"); newInstMethod.setAccessible(true); Message prototype = (Message) newInstMethod.invoke(null, (Object[]) null); returnTypes.put(method.getName(), prototype); return prototype; } @Override //RpcInvocationHandler public ConnectionId getConnectionId() { return remoteId; } } interface RpcWrapper extends Writable { int getLength(); } /** * Wrapper for Protocol Buffer Requests * * Note while this wrapper is writable, the request on the wire is in * Protobuf. Several methods on {@link org.apache.hadoop.ipc.Server and RPC} * use type Writable as a wrapper to work across multiple RpcEngine kinds. */ private static abstract class RpcMessageWithHeader<T extends GeneratedMessage> implements RpcWrapper { T requestHeader; Message theRequest; // for clientSide, the request is here byte[] theRequestRead; // for server side, the request is here public RpcMessageWithHeader() { } public RpcMessageWithHeader(T requestHeader, Message theRequest) { this.requestHeader = requestHeader; this.theRequest = theRequest; } @Override public void write(DataOutput out) throws IOException { OutputStream os = DataOutputOutputStream.constructOutputStream(out); ((Message)requestHeader).writeDelimitedTo(os); theRequest.writeDelimitedTo(os); } @Override public void readFields(DataInput in) throws IOException { requestHeader = parseHeaderFrom(readVarintBytes(in)); theRequestRead = readMessageRequest(in); } abstract T parseHeaderFrom(byte[] bytes) throws IOException; byte[] readMessageRequest(DataInput in) throws IOException { return readVarintBytes(in); } private static byte[] readVarintBytes(DataInput in) throws IOException { final int length = ProtoUtil.readRawVarint32(in); final byte[] bytes = new byte[length]; in.readFully(bytes); return bytes; } public T getMessageHeader() { return requestHeader; } public byte[] getMessageBytes() { return theRequestRead; } @Override public int getLength() { int headerLen = requestHeader.getSerializedSize(); int reqLen; if (theRequest != null) { reqLen = theRequest.getSerializedSize(); } else if (theRequestRead != null ) { reqLen = theRequestRead.length; } else { throw new IllegalArgumentException( "getLength on uninitialized RpcWrapper"); } return CodedOutputStream.computeRawVarint32Size(headerLen) + headerLen + CodedOutputStream.computeRawVarint32Size(reqLen) + reqLen; } } private static class RpcRequestWrapper extends RpcMessageWithHeader<RequestHeaderProto> { @SuppressWarnings("unused") public RpcRequestWrapper() {} public RpcRequestWrapper( RequestHeaderProto requestHeader, Message theRequest) { super(requestHeader, theRequest); } @Override RequestHeaderProto parseHeaderFrom(byte[] bytes) throws IOException { return RequestHeaderProto.parseFrom(bytes); } @Override public String toString() { return requestHeader.getDeclaringClassProtocolName() + "." + requestHeader.getMethodName(); } } @InterfaceAudience.LimitedPrivate({"RPC"}) public static class RpcRequestMessageWrapper extends RpcMessageWithHeader<RpcRequestHeaderProto> { public RpcRequestMessageWrapper() {} public RpcRequestMessageWrapper( RpcRequestHeaderProto requestHeader, Message theRequest) { super(requestHeader, theRequest); } @Override RpcRequestHeaderProto parseHeaderFrom(byte[] bytes) throws IOException { return RpcRequestHeaderProto.parseFrom(bytes); } } @InterfaceAudience.LimitedPrivate({"RPC"}) public static class RpcResponseMessageWrapper extends RpcMessageWithHeader<RpcResponseHeaderProto> { public RpcResponseMessageWrapper() {} public RpcResponseMessageWrapper( RpcResponseHeaderProto responseHeader, Message theRequest) { super(responseHeader, theRequest); } @Override byte[] readMessageRequest(DataInput in) throws IOException { // error message contain no message body switch (requestHeader.getStatus()) { case ERROR: case FATAL: return null; default: return super.readMessageRequest(in); } } @Override RpcResponseHeaderProto parseHeaderFrom(byte[] bytes) throws IOException { return RpcResponseHeaderProto.parseFrom(bytes); } } /** * Wrapper for Protocol Buffer Responses * * Note while this wrapper is writable, the request on the wire is in * Protobuf. Several methods on {@link org.apache.hadoop.ipc.Server and RPC} * use type Writable as a wrapper to work across multiple RpcEngine kinds. */ @InterfaceAudience.LimitedPrivate({"RPC"}) // temporarily exposed public static class RpcResponseWrapper implements RpcWrapper { Message theResponse; // for senderSide, the response is here byte[] theResponseRead; // for receiver side, the response is here public RpcResponseWrapper() { } public RpcResponseWrapper(Message message) { this.theResponse = message; } @Override public void write(DataOutput out) throws IOException { OutputStream os = DataOutputOutputStream.constructOutputStream(out); theResponse.writeDelimitedTo(os); } @Override public void readFields(DataInput in) throws IOException { int length = ProtoUtil.readRawVarint32(in); theResponseRead = new byte[length]; in.readFully(theResponseRead); } @Override public int getLength() { int resLen; if (theResponse != null) { resLen = theResponse.getSerializedSize(); } else if (theResponseRead != null ) { resLen = theResponseRead.length; } else { throw new IllegalArgumentException( "getLength on uninitialized RpcWrapper"); } return CodedOutputStream.computeRawVarint32Size(resLen) + resLen; } } @VisibleForTesting @InterfaceAudience.Private @InterfaceStability.Unstable static Client getClient(Configuration conf) { return CLIENTS.getClient(conf, SocketFactory.getDefault(), RpcResponseWrapper.class); } @Override public RPC.Server getServer(Class<?> protocol, Object protocolImpl, String bindAddress, int port, int numHandlers, int numReaders, int queueSizePerHandler, boolean verbose, Configuration conf, SecretManager<? extends TokenIdentifier> secretManager, String portRangeConfig) throws IOException { return new Server(protocol, protocolImpl, conf, bindAddress, port, numHandlers, numReaders, queueSizePerHandler, verbose, secretManager, portRangeConfig); } public static class Server extends RPC.Server { /** * Construct an RPC server. * * @param protocolClass the class of protocol * @param protocolImpl the protocolImpl whose methods will be called * @param conf the configuration to use * @param bindAddress the address to bind on to listen for connection * @param port the port to listen for connections on * @param numHandlers the number of method handler threads to run * @param verbose whether each call should be logged * @param portRangeConfig A config parameter that can be used to restrict * the range of ports used when port is 0 (an ephemeral port) */ public Server(Class<?> protocolClass, Object protocolImpl, Configuration conf, String bindAddress, int port, int numHandlers, int numReaders, int queueSizePerHandler, boolean verbose, SecretManager<? extends TokenIdentifier> secretManager, String portRangeConfig) throws IOException { super(bindAddress, port, null, numHandlers, numReaders, queueSizePerHandler, conf, classNameBase(protocolImpl .getClass().getName()), secretManager, portRangeConfig); this.verbose = verbose; registerProtocolAndImpl(RPC.RpcKind.RPC_PROTOCOL_BUFFER, protocolClass, protocolImpl); } /** * Protobuf invoker for {@link RpcInvoker} */ static class ProtoBufRpcInvoker implements RpcInvoker { private static ProtoClassProtoImpl getProtocolImpl(RPC.Server server, String protoName, long clientVersion) throws RpcServerException { ProtoNameVer pv = new ProtoNameVer(protoName, clientVersion); ProtoClassProtoImpl impl = server.getProtocolImplMap(RPC.RpcKind.RPC_PROTOCOL_BUFFER).get(pv); if (impl == null) { // no match for Protocol AND Version VerProtocolImpl highest = server.getHighestSupportedProtocol(RPC.RpcKind.RPC_PROTOCOL_BUFFER, protoName); if (highest == null) { throw new RpcNoSuchProtocolException( "Unknown protocol: " + protoName); } // protocol supported but not the version that client wants throw new RPC.VersionMismatch(protoName, clientVersion, highest.version); } return impl; } @Override /** * This is a server side method, which is invoked over RPC. On success * the return response has protobuf response payload. On failure, the * exception name and the stack trace are return in the resposne. * See {@link HadoopRpcResponseProto} * * In this method there three types of exceptions possible and they are * returned in response as follows. * <ol> * <li> Exceptions encountered in this method that are returned * as {@link RpcServerException} </li> * <li> Exceptions thrown by the service is wrapped in ServiceException. * In that this method returns in response the exception thrown by the * service.</li> * <li> Other exceptions thrown by the service. They are returned as * it is.</li> * </ol> */ public Writable call(RPC.Server server, String protocol, Writable writableRequest, long receiveTime) throws Exception { RpcRequestWrapper request = (RpcRequestWrapper) writableRequest; RequestHeaderProto rpcRequest = request.requestHeader; String methodName = rpcRequest.getMethodName(); String protoName = rpcRequest.getDeclaringClassProtocolName(); long clientVersion = rpcRequest.getClientProtocolVersion(); if (server.verbose) LOG.info("Call: protocol=" + protocol + ", method=" + methodName); ProtoClassProtoImpl protocolImpl = getProtocolImpl(server, protoName, clientVersion); BlockingService service = (BlockingService) protocolImpl.protocolImpl; MethodDescriptor methodDescriptor = service.getDescriptorForType() .findMethodByName(methodName); if (methodDescriptor == null) { String msg = "Unknown method " + methodName + " called on " + protocol + " protocol."; LOG.warn(msg); throw new RpcNoSuchMethodException(msg); } Message prototype = service.getRequestPrototype(methodDescriptor); Message param = prototype.newBuilderForType() .mergeFrom(request.theRequestRead).build(); Message result; long startTime = Time.now(); int qTime = (int) (startTime - receiveTime); Exception exception = null; try { server.rpcDetailedMetrics.init(protocolImpl.protocolClass); result = service.callBlockingMethod(methodDescriptor, null, param); } catch (ServiceException e) { exception = (Exception) e.getCause(); throw (Exception) e.getCause(); } catch (Exception e) { exception = e; throw e; } finally { int processingTime = (int) (Time.now() - startTime); if (LOG.isDebugEnabled()) { String msg = "Served: " + methodName + " queueTime= " + qTime + " procesingTime= " + processingTime; if (exception != null) { msg += " exception= " + exception.getClass().getSimpleName(); } LOG.debug(msg); } String detailedMetricsName = (exception == null) ? methodName : exception.getClass().getSimpleName(); server.rpcMetrics.addRpcQueueTime(qTime); server.rpcMetrics.addRpcProcessingTime(processingTime); server.rpcDetailedMetrics.addProcessingTime(detailedMetricsName, processingTime); } return new RpcResponseWrapper(result); } } } }
24,399
36.770898
82
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcEngine.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.ipc; import java.io.IOException; import java.net.InetSocketAddress; import java.util.concurrent.atomic.AtomicBoolean; import javax.net.SocketFactory; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.retry.RetryPolicy; import org.apache.hadoop.ipc.Client.ConnectionId; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.SecretManager; import org.apache.hadoop.security.token.TokenIdentifier; /** An RPC implementation. */ @InterfaceStability.Evolving public interface RpcEngine { /** Construct a client-side proxy object. * @param <T>*/ <T> ProtocolProxy<T> getProxy(Class<T> protocol, long clientVersion, InetSocketAddress addr, UserGroupInformation ticket, Configuration conf, SocketFactory factory, int rpcTimeout, RetryPolicy connectionRetryPolicy) throws IOException; /** Construct a client-side proxy object. */ <T> ProtocolProxy<T> getProxy(Class<T> protocol, long clientVersion, InetSocketAddress addr, UserGroupInformation ticket, Configuration conf, SocketFactory factory, int rpcTimeout, RetryPolicy connectionRetryPolicy, AtomicBoolean fallbackToSimpleAuth) throws IOException; /** * Construct a server for a protocol implementation instance. * * @param protocol the class of protocol to use * @param instance the instance of protocol whose methods will be called * @param conf the configuration to use * @param bindAddress the address to bind on to listen for connection * @param port the port to listen for connections on * @param numHandlers the number of method handler threads to run * @param numReaders the number of reader threads to run * @param queueSizePerHandler the size of the queue per hander thread * @param verbose whether each call should be logged * @param secretManager The secret manager to use to validate incoming requests. * @param portRangeConfig A config parameter that can be used to restrict * the range of ports used when port is 0 (an ephemeral port) * @return The Server instance * @throws IOException on any error */ RPC.Server getServer(Class<?> protocol, Object instance, String bindAddress, int port, int numHandlers, int numReaders, int queueSizePerHandler, boolean verbose, Configuration conf, SecretManager<? extends TokenIdentifier> secretManager, String portRangeConfig ) throws IOException; /** * Returns a proxy for ProtocolMetaInfoPB, which uses the given connection * id. * @param connId, ConnectionId to be used for the proxy. * @param conf, Configuration. * @param factory, Socket factory. * @return Proxy object. * @throws IOException */ ProtocolProxy<ProtocolMetaInfoPB> getProtocolMetaInfoProxy( ConnectionId connId, Configuration conf, SocketFactory factory) throws IOException; }
4,029
41.87234
82
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallQueueManager.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.ipc; import java.lang.reflect.Constructor; import java.util.concurrent.BlockingQueue; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; /** * Abstracts queue operations for different blocking queues. */ public class CallQueueManager<E> { public static final Log LOG = LogFactory.getLog(CallQueueManager.class); // Number of checkpoints for empty queue. private static final int CHECKPOINT_NUM = 20; // Interval to check empty queue. private static final long CHECKPOINT_INTERVAL_MS = 10; @SuppressWarnings("unchecked") static <E> Class<? extends BlockingQueue<E>> convertQueueClass( Class<?> queueClass, Class<E> elementClass) { return (Class<? extends BlockingQueue<E>>)queueClass; } private final boolean clientBackOffEnabled; // Atomic refs point to active callQueue // We have two so we can better control swapping private final AtomicReference<BlockingQueue<E>> putRef; private final AtomicReference<BlockingQueue<E>> takeRef; public CallQueueManager(Class<? extends BlockingQueue<E>> backingClass, boolean clientBackOffEnabled, int maxQueueSize, String namespace, Configuration conf) { BlockingQueue<E> bq = createCallQueueInstance(backingClass, maxQueueSize, namespace, conf); this.clientBackOffEnabled = clientBackOffEnabled; this.putRef = new AtomicReference<BlockingQueue<E>>(bq); this.takeRef = new AtomicReference<BlockingQueue<E>>(bq); LOG.info("Using callQueue " + backingClass); } private <T extends BlockingQueue<E>> T createCallQueueInstance( Class<T> theClass, int maxLen, String ns, Configuration conf) { // Used for custom, configurable callqueues try { Constructor<T> ctor = theClass.getDeclaredConstructor(int.class, String.class, Configuration.class); return ctor.newInstance(maxLen, ns, conf); } catch (RuntimeException e) { throw e; } catch (Exception e) { } // Used for LinkedBlockingQueue, ArrayBlockingQueue, etc try { Constructor<T> ctor = theClass.getDeclaredConstructor(int.class); return ctor.newInstance(maxLen); } catch (RuntimeException e) { throw e; } catch (Exception e) { } // Last attempt try { Constructor<T> ctor = theClass.getDeclaredConstructor(); return ctor.newInstance(); } catch (RuntimeException e) { throw e; } catch (Exception e) { } // Nothing worked throw new RuntimeException(theClass.getName() + " could not be constructed."); } boolean isClientBackoffEnabled() { return clientBackOffEnabled; } /** * Insert e into the backing queue or block until we can. * If we block and the queue changes on us, we will insert while the * queue is drained. */ public void put(E e) throws InterruptedException { putRef.get().put(e); } /** * Insert e into the backing queue. * Return true if e is queued. * Return false if the queue is full. */ public boolean offer(E e) throws InterruptedException { return putRef.get().offer(e); } /** * Retrieve an E from the backing queue or block until we can. * Guaranteed to return an element from the current queue. */ public E take() throws InterruptedException { E e = null; while (e == null) { e = takeRef.get().poll(1000L, TimeUnit.MILLISECONDS); } return e; } public int size() { return takeRef.get().size(); } /** * Replaces active queue with the newly requested one and transfers * all calls to the newQ before returning. */ public synchronized void swapQueue( Class<? extends BlockingQueue<E>> queueClassToUse, int maxSize, String ns, Configuration conf) { BlockingQueue<E> newQ = createCallQueueInstance(queueClassToUse, maxSize, ns, conf); // Our current queue becomes the old queue BlockingQueue<E> oldQ = putRef.get(); // Swap putRef first: allow blocked puts() to be unblocked putRef.set(newQ); // Wait for handlers to drain the oldQ while (!queueIsReallyEmpty(oldQ)) {} // Swap takeRef to handle new calls takeRef.set(newQ); LOG.info("Old Queue: " + stringRepr(oldQ) + ", " + "Replacement: " + stringRepr(newQ)); } /** * Checks if queue is empty by checking at CHECKPOINT_NUM points with * CHECKPOINT_INTERVAL_MS interval. * This doesn't mean the queue might not fill up at some point later, but * it should decrease the probability that we lose a call this way. */ private boolean queueIsReallyEmpty(BlockingQueue<?> q) { for (int i = 0; i < CHECKPOINT_NUM; i++) { try { Thread.sleep(CHECKPOINT_INTERVAL_MS); } catch (InterruptedException ie) { return false; } if (!q.isEmpty()) { return false; } } return true; } private String stringRepr(Object o) { return o.getClass().getName() + '@' + Integer.toHexString(o.hashCode()); } }
5,951
30.492063
84
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtocolMetaInfoPB.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.ipc; import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.ProtocolInfoService; /** * Protocol to get versions and signatures for supported protocols from the * server. * * Note: This extends the protocolbuffer service based interface to * add annotations. */ @ProtocolInfo( protocolName = "org.apache.hadoop.ipc.ProtocolMetaInfoPB", protocolVersion = 1) public interface ProtocolMetaInfoPB extends ProtocolInfoService.BlockingInterface { }
1,299
36.142857
77
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/package-info.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ @InterfaceAudience.LimitedPrivate({"HBase", "HDFS", "MapReduce"}) @InterfaceStability.Evolving package org.apache.hadoop.ipc; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability;
1,051
44.73913
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcClientException.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.ipc; /** * Indicates an exception in the RPC client */ public class RpcClientException extends RpcException { private static final long serialVersionUID = 1L; /** * Constructs exception with the specified detail message. * * @param messages detailed message. */ RpcClientException(final String message) { super(message); } /** * Constructs exception with the specified detail message and cause. * * @param message message. * @param cause that cause this exception * @param cause the cause (can be retried by the {@link #getCause()} method). * (A <tt>null</tt> value is permitted, and indicates that the cause * is nonexistent or unknown.) */ RpcClientException(final String message, final Throwable cause) { super(message, cause); } }
1,652
33.4375
79
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtocolTranslator.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.ipc; import org.apache.hadoop.classification.InterfaceAudience; /** * An interface implemented by client-side protocol translators to get the * underlying proxy object the translator is operating on. */ @InterfaceAudience.Private public interface ProtocolTranslator { /** * Return the proxy object underlying this protocol translator. * @return the proxy object underlying this protocol translator. */ public Object getUnderlyingProxyObject(); }
1,300
35.138889
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.ipc; import java.lang.ref.WeakReference; import java.util.Collections; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Timer; import java.util.TimerTask; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.metrics2.util.MBeans; import org.codehaus.jackson.map.ObjectMapper; import com.google.common.annotations.VisibleForTesting; /** * The decay RPC scheduler counts incoming requests in a map, then * decays the counts at a fixed time interval. The scheduler is optimized * for large periods (on the order of seconds), as it offloads work to the * decay sweep. */ public class DecayRpcScheduler implements RpcScheduler, DecayRpcSchedulerMXBean { /** * Period controls how many milliseconds between each decay sweep. */ public static final String IPC_CALLQUEUE_DECAYSCHEDULER_PERIOD_KEY = "faircallqueue.decay-scheduler.period-ms"; public static final long IPC_CALLQUEUE_DECAYSCHEDULER_PERIOD_DEFAULT = 5000L; /** * Decay factor controls how much each count is suppressed by on each sweep. * Valid numbers are > 0 and < 1. Decay factor works in tandem with period * to control how long the scheduler remembers an identity. */ public static final String IPC_CALLQUEUE_DECAYSCHEDULER_FACTOR_KEY = "faircallqueue.decay-scheduler.decay-factor"; public static final double IPC_CALLQUEUE_DECAYSCHEDULER_FACTOR_DEFAULT = 0.5; /** * Thresholds are specified as integer percentages, and specify which usage * range each queue will be allocated to. For instance, specifying the list * 10, 40, 80 * implies 4 queues, with * - q3 from 80% up * - q2 from 40 up to 80 * - q1 from 10 up to 40 * - q0 otherwise. */ public static final String IPC_CALLQUEUE_DECAYSCHEDULER_THRESHOLDS_KEY = "faircallqueue.decay-scheduler.thresholds"; // Specifies the identity to use when the IdentityProvider cannot handle // a schedulable. public static final String DECAYSCHEDULER_UNKNOWN_IDENTITY = "IdentityProvider.Unknown"; public static final Log LOG = LogFactory.getLog(DecayRpcScheduler.class); // Track the number of calls for each schedulable identity private final ConcurrentHashMap<Object, AtomicLong> callCounts = new ConcurrentHashMap<Object, AtomicLong>(); // Should be the sum of all AtomicLongs in callCounts private final AtomicLong totalCalls = new AtomicLong(); // Pre-computed scheduling decisions during the decay sweep are // atomically swapped in as a read-only map private final AtomicReference<Map<Object, Integer>> scheduleCacheRef = new AtomicReference<Map<Object, Integer>>(); // Tune the behavior of the scheduler private final long decayPeriodMillis; // How long between each tick private final double decayFactor; // nextCount = currentCount / decayFactor private final int numQueues; // affects scheduling decisions, from 0 to numQueues - 1 private final double[] thresholds; private final IdentityProvider identityProvider; /** * This TimerTask will call decayCurrentCounts until * the scheduler has been garbage collected. */ public static class DecayTask extends TimerTask { private WeakReference<DecayRpcScheduler> schedulerRef; private Timer timer; public DecayTask(DecayRpcScheduler scheduler, Timer timer) { this.schedulerRef = new WeakReference<DecayRpcScheduler>(scheduler); this.timer = timer; } @Override public void run() { DecayRpcScheduler sched = schedulerRef.get(); if (sched != null) { sched.decayCurrentCounts(); } else { // Our scheduler was garbage collected since it is no longer in use, // so we should terminate the timer as well timer.cancel(); timer.purge(); } } } /** * Create a decay scheduler. * @param numQueues number of queues to schedule for * @param ns config prefix, so that we can configure multiple schedulers * in a single instance. * @param conf configuration to use. */ public DecayRpcScheduler(int numQueues, String ns, Configuration conf) { if (numQueues < 1) { throw new IllegalArgumentException("number of queues must be > 0"); } this.numQueues = numQueues; this.decayFactor = parseDecayFactor(ns, conf); this.decayPeriodMillis = parseDecayPeriodMillis(ns, conf); this.identityProvider = this.parseIdentityProvider(ns, conf); this.thresholds = parseThresholds(ns, conf, numQueues); // Setup delay timer Timer timer = new Timer(); DecayTask task = new DecayTask(this, timer); timer.scheduleAtFixedRate(task, 0, this.decayPeriodMillis); MetricsProxy prox = MetricsProxy.getInstance(ns); prox.setDelegate(this); } // Load configs private IdentityProvider parseIdentityProvider(String ns, Configuration conf) { List<IdentityProvider> providers = conf.getInstances( ns + "." + CommonConfigurationKeys.IPC_CALLQUEUE_IDENTITY_PROVIDER_KEY, IdentityProvider.class); if (providers.size() < 1) { LOG.info("IdentityProvider not specified, " + "defaulting to UserIdentityProvider"); return new UserIdentityProvider(); } return providers.get(0); // use the first } private static double parseDecayFactor(String ns, Configuration conf) { double factor = conf.getDouble(ns + "." + IPC_CALLQUEUE_DECAYSCHEDULER_FACTOR_KEY, IPC_CALLQUEUE_DECAYSCHEDULER_FACTOR_DEFAULT ); if (factor <= 0 || factor >= 1) { throw new IllegalArgumentException("Decay Factor " + "must be between 0 and 1"); } return factor; } private static long parseDecayPeriodMillis(String ns, Configuration conf) { long period = conf.getLong(ns + "." + IPC_CALLQUEUE_DECAYSCHEDULER_PERIOD_KEY, IPC_CALLQUEUE_DECAYSCHEDULER_PERIOD_DEFAULT ); if (period <= 0) { throw new IllegalArgumentException("Period millis must be >= 0"); } return period; } private static double[] parseThresholds(String ns, Configuration conf, int numQueues) { int[] percentages = conf.getInts(ns + "." + IPC_CALLQUEUE_DECAYSCHEDULER_THRESHOLDS_KEY); if (percentages.length == 0) { return getDefaultThresholds(numQueues); } else if (percentages.length != numQueues-1) { throw new IllegalArgumentException("Number of thresholds should be " + (numQueues-1) + ". Was: " + percentages.length); } // Convert integer percentages to decimals double[] decimals = new double[percentages.length]; for (int i = 0; i < percentages.length; i++) { decimals[i] = percentages[i] / 100.0; } return decimals; } /** * Generate default thresholds if user did not specify. Strategy is * to halve each time, since queue usage tends to be exponential. * So if numQueues is 4, we would generate: double[]{0.125, 0.25, 0.5} * which specifies the boundaries between each queue's usage. * @param numQueues number of queues to compute for * @return array of boundaries of length numQueues - 1 */ private static double[] getDefaultThresholds(int numQueues) { double[] ret = new double[numQueues - 1]; double div = Math.pow(2, numQueues - 1); for (int i = 0; i < ret.length; i++) { ret[i] = Math.pow(2, i)/div; } return ret; } /** * Decay the stored counts for each user and clean as necessary. * This method should be called periodically in order to keep * counts current. */ private void decayCurrentCounts() { long total = 0; Iterator<Map.Entry<Object, AtomicLong>> it = callCounts.entrySet().iterator(); while (it.hasNext()) { Map.Entry<Object, AtomicLong> entry = it.next(); AtomicLong count = entry.getValue(); // Compute the next value by reducing it by the decayFactor long currentValue = count.get(); long nextValue = (long)(currentValue * decayFactor); total += nextValue; count.set(nextValue); if (nextValue == 0) { // We will clean up unused keys here. An interesting optimization might // be to have an upper bound on keyspace in callCounts and only // clean once we pass it. it.remove(); } } // Update the total so that we remain in sync totalCalls.set(total); // Now refresh the cache of scheduling decisions recomputeScheduleCache(); } /** * Update the scheduleCache to match current conditions in callCounts. */ private void recomputeScheduleCache() { Map<Object, Integer> nextCache = new HashMap<Object, Integer>(); for (Map.Entry<Object, AtomicLong> entry : callCounts.entrySet()) { Object id = entry.getKey(); AtomicLong value = entry.getValue(); long snapshot = value.get(); int computedLevel = computePriorityLevel(snapshot); nextCache.put(id, computedLevel); } // Swap in to activate scheduleCacheRef.set(Collections.unmodifiableMap(nextCache)); } /** * Get the number of occurrences and increment atomically. * @param identity the identity of the user to increment * @return the value before incrementation */ private long getAndIncrement(Object identity) throws InterruptedException { // We will increment the count, or create it if no such count exists AtomicLong count = this.callCounts.get(identity); if (count == null) { // Create the count since no such count exists. count = new AtomicLong(0); // Put it in, or get the AtomicInteger that was put in by another thread AtomicLong otherCount = callCounts.putIfAbsent(identity, count); if (otherCount != null) { count = otherCount; } } // Update the total totalCalls.getAndIncrement(); // At this point value is guaranteed to be not null. It may however have // been clobbered from callCounts. Nonetheless, we return what // we have. return count.getAndIncrement(); } /** * Given the number of occurrences, compute a scheduling decision. * @param occurrences how many occurrences * @return scheduling decision from 0 to numQueues - 1 */ private int computePriorityLevel(long occurrences) { long totalCallSnapshot = totalCalls.get(); double proportion = 0; if (totalCallSnapshot > 0) { proportion = (double) occurrences / totalCallSnapshot; } // Start with low priority queues, since they will be most common for(int i = (numQueues - 1); i > 0; i--) { if (proportion >= this.thresholds[i - 1]) { return i; // We've found our queue number } } // If we get this far, we're at queue 0 return 0; } /** * Returns the priority level for a given identity by first trying the cache, * then computing it. * @param identity an object responding to toString and hashCode * @return integer scheduling decision from 0 to numQueues - 1 */ private int cachedOrComputedPriorityLevel(Object identity) { try { long occurrences = this.getAndIncrement(identity); // Try the cache Map<Object, Integer> scheduleCache = scheduleCacheRef.get(); if (scheduleCache != null) { Integer priority = scheduleCache.get(identity); if (priority != null) { return priority; } } // Cache was no good, compute it return computePriorityLevel(occurrences); } catch (InterruptedException ie) { LOG.warn("Caught InterruptedException, returning low priority queue"); return numQueues - 1; } } /** * Compute the appropriate priority for a schedulable based on past requests. * @param obj the schedulable obj to query and remember * @return the queue index which we recommend scheduling in */ @Override public int getPriorityLevel(Schedulable obj) { // First get the identity String identity = this.identityProvider.makeIdentity(obj); if (identity == null) { // Identity provider did not handle this identity = DECAYSCHEDULER_UNKNOWN_IDENTITY; } return cachedOrComputedPriorityLevel(identity); } // For testing @VisibleForTesting public double getDecayFactor() { return decayFactor; } @VisibleForTesting public long getDecayPeriodMillis() { return decayPeriodMillis; } @VisibleForTesting public double[] getThresholds() { return thresholds; } @VisibleForTesting public void forceDecay() { decayCurrentCounts(); } @VisibleForTesting public Map<Object, Long> getCallCountSnapshot() { HashMap<Object, Long> snapshot = new HashMap<Object, Long>(); for (Map.Entry<Object, AtomicLong> entry : callCounts.entrySet()) { snapshot.put(entry.getKey(), entry.getValue().get()); } return Collections.unmodifiableMap(snapshot); } @VisibleForTesting public long getTotalCallSnapshot() { return totalCalls.get(); } /** * MetricsProxy is a singleton because we may init multiple schedulers and we * want to clean up resources when a new scheduler replaces the old one. */ private static final class MetricsProxy implements DecayRpcSchedulerMXBean { // One singleton per namespace private static final HashMap<String, MetricsProxy> INSTANCES = new HashMap<String, MetricsProxy>(); // Weakref for delegate, so we don't retain it forever if it can be GC'd private WeakReference<DecayRpcScheduler> delegate; private MetricsProxy(String namespace) { MBeans.register(namespace, "DecayRpcScheduler", this); } public static synchronized MetricsProxy getInstance(String namespace) { MetricsProxy mp = INSTANCES.get(namespace); if (mp == null) { // We must create one mp = new MetricsProxy(namespace); INSTANCES.put(namespace, mp); } return mp; } public void setDelegate(DecayRpcScheduler obj) { this.delegate = new WeakReference<DecayRpcScheduler>(obj); } @Override public String getSchedulingDecisionSummary() { DecayRpcScheduler scheduler = delegate.get(); if (scheduler == null) { return "No Active Scheduler"; } else { return scheduler.getSchedulingDecisionSummary(); } } @Override public String getCallVolumeSummary() { DecayRpcScheduler scheduler = delegate.get(); if (scheduler == null) { return "No Active Scheduler"; } else { return scheduler.getCallVolumeSummary(); } } @Override public int getUniqueIdentityCount() { DecayRpcScheduler scheduler = delegate.get(); if (scheduler == null) { return -1; } else { return scheduler.getUniqueIdentityCount(); } } @Override public long getTotalCallVolume() { DecayRpcScheduler scheduler = delegate.get(); if (scheduler == null) { return -1; } else { return scheduler.getTotalCallVolume(); } } } public int getUniqueIdentityCount() { return callCounts.size(); } public long getTotalCallVolume() { return totalCalls.get(); } public String getSchedulingDecisionSummary() { Map<Object, Integer> decisions = scheduleCacheRef.get(); if (decisions == null) { return "{}"; } else { try { ObjectMapper om = new ObjectMapper(); return om.writeValueAsString(decisions); } catch (Exception e) { return "Error: " + e.getMessage(); } } } public String getCallVolumeSummary() { try { ObjectMapper om = new ObjectMapper(); return om.writeValueAsString(callCounts); } catch (Exception e) { return "Error: " + e.getMessage(); } } }
16,877
31.271511
87
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RemoteException.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.ipc; import java.io.IOException; import java.lang.reflect.Constructor; import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto.RpcErrorCodeProto; import org.xml.sax.Attributes; public class RemoteException extends IOException { /** this value should not be defined in RpcHeader.proto so that protobuf will return a null */ private static final int UNSPECIFIED_ERROR = -1; /** For java.io.Serializable */ private static final long serialVersionUID = 1L; private final int errorCode; private final String className; /** * @param className wrapped exception, may be null * @param msg may be null */ public RemoteException(String className, String msg) { this(className, msg, null); } /** * @param className wrapped exception, may be null * @param msg may be null * @param erCode may be null */ public RemoteException(String className, String msg, RpcErrorCodeProto erCode) { super(msg); this.className = className; if (erCode != null) errorCode = erCode.getNumber(); else errorCode = UNSPECIFIED_ERROR; } /** * @return the class name for the wrapped exception; may be null if none was given. */ public String getClassName() { return className; } /** * @return may be null if the code was newer than our protobuf definitions or none was given. */ public RpcErrorCodeProto getErrorCode() { return RpcErrorCodeProto.valueOf(errorCode); } /** * If this remote exception wraps up one of the lookupTypes * then return this exception. * <p> * Unwraps any IOException. * * @param lookupTypes the desired exception class. may be null. * @return IOException, which is either the lookupClass exception or this. */ public IOException unwrapRemoteException(Class<?>... lookupTypes) { if(lookupTypes == null) return this; for(Class<?> lookupClass : lookupTypes) { if(!lookupClass.getName().equals(getClassName())) continue; try { return instantiateException(lookupClass.asSubclass(IOException.class)); } catch(Exception e) { // cannot instantiate lookupClass, just return this return this; } } // wrapped up exception is not in lookupTypes, just return this return this; } /** * Instantiate and return the exception wrapped up by this remote exception. * * <p> This unwraps any <code>Throwable</code> that has a constructor taking * a <code>String</code> as a parameter. * Otherwise it returns this. * * @return <code>Throwable */ public IOException unwrapRemoteException() { try { Class<?> realClass = Class.forName(getClassName()); return instantiateException(realClass.asSubclass(IOException.class)); } catch(Exception e) { // cannot instantiate the original exception, just return this } return this; } private IOException instantiateException(Class<? extends IOException> cls) throws Exception { Constructor<? extends IOException> cn = cls.getConstructor(String.class); cn.setAccessible(true); IOException ex = cn.newInstance(this.getMessage()); ex.initCause(this); return ex; } /** * Create RemoteException from attributes * @param attrs may not be null */ public static RemoteException valueOf(Attributes attrs) { return new RemoteException(attrs.getValue("class"), attrs.getValue("message")); } @Override public String toString() { return getClass().getName() + "(" + className + "): " + getMessage(); } }
4,430
30.65
96
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufHelper.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.ipc; import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; import com.google.protobuf.ServiceException; /** * Helper methods for protobuf related RPC implementation */ @InterfaceAudience.Private public class ProtobufHelper { private ProtobufHelper() { // Hidden constructor for class with only static helper methods } /** * Return the IOException thrown by the remote server wrapped in * ServiceException as cause. * @param se ServiceException that wraps IO exception thrown by the server * @return Exception wrapped in ServiceException or * a new IOException that wraps the unexpected ServiceException. */ public static IOException getRemoteException(ServiceException se) { Throwable e = se.getCause(); if (e == null) { return new IOException(se); } return e instanceof IOException ? (IOException) e : new IOException(se); } }
1,764
34.3
76
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Schedulable.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.ipc; import java.nio.ByteBuffer; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.io.Writable; /** * Interface which allows extracting information necessary to * create schedulable identity strings. */ @InterfaceAudience.Private public interface Schedulable { public UserGroupInformation getUserGroupInformation(); }
1,250
34.742857
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtocolProxy.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.ipc; import java.io.IOException; import java.lang.reflect.Method; import java.util.HashSet; /** * a class wraps around a server's proxy, * containing a list of its supported methods. * * A list of methods with a value of null indicates that the client and server * have the same protocol. */ public class ProtocolProxy<T> { private Class<T> protocol; private T proxy; private HashSet<Integer> serverMethods = null; final private boolean supportServerMethodCheck; private boolean serverMethodsFetched = false; /** * Constructor * * @param protocol protocol class * @param proxy its proxy * @param supportServerMethodCheck If false proxy will never fetch server * methods and isMethodSupported will always return true. If true, * server methods will be fetched for the first call to * isMethodSupported. */ public ProtocolProxy(Class<T> protocol, T proxy, boolean supportServerMethodCheck) { this.protocol = protocol; this.proxy = proxy; this.supportServerMethodCheck = supportServerMethodCheck; } private void fetchServerMethods(Method method) throws IOException { long clientVersion; clientVersion = RPC.getProtocolVersion(method.getDeclaringClass()); int clientMethodsHash = ProtocolSignature.getFingerprint(method .getDeclaringClass().getMethods()); ProtocolSignature serverInfo = ((VersionedProtocol) proxy) .getProtocolSignature(RPC.getProtocolName(protocol), clientVersion, clientMethodsHash); long serverVersion = serverInfo.getVersion(); if (serverVersion != clientVersion) { throw new RPC.VersionMismatch(protocol.getName(), clientVersion, serverVersion); } int[] serverMethodsCodes = serverInfo.getMethods(); if (serverMethodsCodes != null) { serverMethods = new HashSet<Integer>(serverMethodsCodes.length); for (int m : serverMethodsCodes) { this.serverMethods.add(Integer.valueOf(m)); } } serverMethodsFetched = true; } /* * Get the proxy */ public T getProxy() { return proxy; } /** * Check if a method is supported by the server or not * * @param methodName a method's name in String format * @param parameterTypes a method's parameter types * @return true if the method is supported by the server */ public synchronized boolean isMethodSupported(String methodName, Class<?>... parameterTypes) throws IOException { if (!supportServerMethodCheck) { return true; } Method method; try { method = protocol.getDeclaredMethod(methodName, parameterTypes); } catch (SecurityException e) { throw new IOException(e); } catch (NoSuchMethodException e) { throw new IOException(e); } if (!serverMethodsFetched) { fetchServerMethods(method); } if (serverMethods == null) { // client & server have the same protocol return true; } return serverMethods.contains( Integer.valueOf(ProtocolSignature.getFingerprint(method))); } }
3,951
32.777778
78
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcSchedulerMXBean.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.ipc; /** * Provides metrics for Decay scheduler. */ public interface DecayRpcSchedulerMXBean { // Get an overview of the requests in history. String getSchedulingDecisionSummary(); String getCallVolumeSummary(); int getUniqueIdentityCount(); long getTotalCallVolume(); }
1,117
36.266667
75
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcNoSuchProtocolException.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.ipc; import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto.RpcErrorCodeProto; import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto.RpcStatusProto; /** * No such protocol (i.e. interface) for and Rpc Call * */ public class RpcNoSuchProtocolException extends RpcServerException { private static final long serialVersionUID = 1L; public RpcNoSuchProtocolException(final String message) { super(message); } /** * get the rpc status corresponding to this exception */ public RpcStatusProto getRpcStatusProto() { return RpcStatusProto.ERROR; } /** * get the detailed rpc status corresponding to this exception */ public RpcErrorCodeProto getRpcErrorCodeProto() { return RpcErrorCodeProto.ERROR_NO_SUCH_PROTOCOL; } }
1,641
33.93617
95
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RetriableException.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.ipc; import java.io.IOException; import org.apache.hadoop.classification.InterfaceStability; /** * Exception thrown by a server typically to indicate that server is in a state * where request cannot be processed temporarily (such as still starting up). * Client may retry the request. If the service is up, the server may be able to * process a retried request. */ @InterfaceStability.Evolving public class RetriableException extends IOException { private static final long serialVersionUID = 1915561725516487301L; public RetriableException(Exception e) { super(e); } public RetriableException(String msg) { super(msg); } }
1,489
34.47619
80
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RefreshHandler.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.ipc; import org.apache.hadoop.classification.InterfaceStability; /** * Used to registry custom methods to refresh at runtime. */ @InterfaceStability.Unstable public interface RefreshHandler { /** * Implement this method to accept refresh requests from the administrator. * @param identifier is the identifier you registered earlier * @param args contains a list of string args from the administrator * @throws Exception as a shorthand for a RefreshResponse(-1, message) * @return a RefreshResponse */ RefreshResponse handleRefresh(String identifier, String[] args); }
1,427
38.666667
77
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.ipc; import static org.apache.hadoop.ipc.RpcConstants.*; import java.io.BufferedInputStream; import java.io.BufferedOutputStream; import java.io.ByteArrayOutputStream; import java.io.DataInputStream; import java.io.DataOutputStream; import java.io.EOFException; import java.io.FilterInputStream; import java.io.IOException; import java.io.InputStream; import java.io.InterruptedIOException; import java.io.OutputStream; import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.Socket; import java.net.SocketTimeoutException; import java.net.UnknownHostException; import java.security.PrivilegedExceptionAction; import java.util.Arrays; import java.util.Hashtable; import java.util.Iterator; import java.util.Map.Entry; import java.util.Random; import java.util.Set; import java.util.concurrent.Callable; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.RejectedExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import javax.net.SocketFactory; import javax.security.sasl.Sasl; import com.google.common.cache.Cache; import com.google.common.cache.CacheBuilder; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.retry.RetryPolicies; import org.apache.hadoop.io.retry.RetryPolicy; import org.apache.hadoop.io.retry.RetryPolicy.RetryAction; import org.apache.hadoop.ipc.ProtobufRpcEngine.RpcRequestMessageWrapper; import org.apache.hadoop.ipc.RPC.RpcKind; import org.apache.hadoop.ipc.Server.AuthProtocol; import org.apache.hadoop.ipc.protobuf.IpcConnectionContextProtos.IpcConnectionContextProto; import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcRequestHeaderProto; import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcRequestHeaderProto.OperationProto; import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto; import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto.RpcErrorCodeProto; import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto.RpcStatusProto; import org.apache.hadoop.net.ConnectTimeoutException; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.KerberosInfo; import org.apache.hadoop.security.SaslRpcClient; import org.apache.hadoop.security.SaslRpcServer.AuthMethod; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.ProtoUtil; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; import org.apache.htrace.Trace; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.util.concurrent.ThreadFactoryBuilder; import com.google.protobuf.CodedOutputStream; /** A client for an IPC service. IPC calls take a single {@link Writable} as a * parameter, and return a {@link Writable} as their value. A service runs on * a port and is defined by a parameter class and a value class. * * @see Server */ public class Client { public static final Log LOG = LogFactory.getLog(Client.class); /** A counter for generating call IDs. */ private static final AtomicInteger callIdCounter = new AtomicInteger(); private static final ThreadLocal<Integer> callId = new ThreadLocal<Integer>(); private static final ThreadLocal<Integer> retryCount = new ThreadLocal<Integer>(); /** Set call id and retry count for the next call. */ public static void setCallIdAndRetryCount(int cid, int rc) { Preconditions.checkArgument(cid != RpcConstants.INVALID_CALL_ID); Preconditions.checkState(callId.get() == null); Preconditions.checkArgument(rc != RpcConstants.INVALID_RETRY_COUNT); callId.set(cid); retryCount.set(rc); } private final Cache<ConnectionId, Connection> connections = CacheBuilder.newBuilder().build(); private Class<? extends Writable> valueClass; // class of call values private AtomicBoolean running = new AtomicBoolean(true); // if client runs final private Configuration conf; private SocketFactory socketFactory; // how to create sockets private int refCount = 1; private final int connectionTimeout; private final boolean fallbackAllowed; private final byte[] clientId; final static int CONNECTION_CONTEXT_CALL_ID = -3; /** * Executor on which IPC calls' parameters are sent. * Deferring the sending of parameters to a separate * thread isolates them from thread interruptions in the * calling code. */ private final ExecutorService sendParamsExecutor; private final static ClientExecutorServiceFactory clientExcecutorFactory = new ClientExecutorServiceFactory(); private static class ClientExecutorServiceFactory { private int executorRefCount = 0; private ExecutorService clientExecutor = null; /** * Get Executor on which IPC calls' parameters are sent. * If the internal reference counter is zero, this method * creates the instance of Executor. If not, this method * just returns the reference of clientExecutor. * * @return An ExecutorService instance */ synchronized ExecutorService refAndGetInstance() { if (executorRefCount == 0) { clientExecutor = Executors.newCachedThreadPool( new ThreadFactoryBuilder() .setDaemon(true) .setNameFormat("IPC Parameter Sending Thread #%d") .build()); } executorRefCount++; return clientExecutor; } /** * Cleanup Executor on which IPC calls' parameters are sent. * If reference counter is zero, this method discards the * instance of the Executor. If not, this method * just decrements the internal reference counter. * * @return An ExecutorService instance if it exists. * Null is returned if not. */ synchronized ExecutorService unrefAndCleanup() { executorRefCount--; assert(executorRefCount >= 0); if (executorRefCount == 0) { clientExecutor.shutdown(); try { if (!clientExecutor.awaitTermination(1, TimeUnit.MINUTES)) { clientExecutor.shutdownNow(); } } catch (InterruptedException e) { LOG.error("Interrupted while waiting for clientExecutor" + "to stop", e); clientExecutor.shutdownNow(); } clientExecutor = null; } return clientExecutor; } }; /** * set the ping interval value in configuration * * @param conf Configuration * @param pingInterval the ping interval */ final public static void setPingInterval(Configuration conf, int pingInterval) { conf.setInt(CommonConfigurationKeys.IPC_PING_INTERVAL_KEY, pingInterval); } /** * Get the ping interval from configuration; * If not set in the configuration, return the default value. * * @param conf Configuration * @return the ping interval */ final public static int getPingInterval(Configuration conf) { return conf.getInt(CommonConfigurationKeys.IPC_PING_INTERVAL_KEY, CommonConfigurationKeys.IPC_PING_INTERVAL_DEFAULT); } /** * The time after which a RPC will timeout. * If ping is not enabled (via ipc.client.ping), then the timeout value is the * same as the pingInterval. * If ping is enabled, then there is no timeout value. * * @param conf Configuration * @return the timeout period in milliseconds. -1 if no timeout value is set */ final public static int getTimeout(Configuration conf) { if (!conf.getBoolean(CommonConfigurationKeys.IPC_CLIENT_PING_KEY, CommonConfigurationKeys.IPC_CLIENT_PING_DEFAULT)) { return getPingInterval(conf); } return -1; } /** * set the connection timeout value in configuration * * @param conf Configuration * @param timeout the socket connect timeout value */ public static final void setConnectTimeout(Configuration conf, int timeout) { conf.setInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_TIMEOUT_KEY, timeout); } /** * Increment this client's reference count * */ synchronized void incCount() { refCount++; } /** * Decrement this client's reference count * */ synchronized void decCount() { refCount--; } /** * Return if this client has no reference * * @return true if this client has no reference; false otherwise */ synchronized boolean isZeroReference() { return refCount==0; } /** Check the rpc response header. */ void checkResponse(RpcResponseHeaderProto header) throws IOException { if (header == null) { throw new EOFException("Response is null."); } if (header.hasClientId()) { // check client IDs final byte[] id = header.getClientId().toByteArray(); if (!Arrays.equals(id, RpcConstants.DUMMY_CLIENT_ID)) { if (!Arrays.equals(id, clientId)) { throw new IOException("Client IDs not matched: local ID=" + StringUtils.byteToHexString(clientId) + ", ID in response=" + StringUtils.byteToHexString(header.getClientId().toByteArray())); } } } } Call createCall(RPC.RpcKind rpcKind, Writable rpcRequest) { return new Call(rpcKind, rpcRequest); } /** * Class that represents an RPC call */ static class Call { final int id; // call id final int retry; // retry count final Writable rpcRequest; // the serialized rpc request Writable rpcResponse; // null if rpc has error IOException error; // exception, null if success final RPC.RpcKind rpcKind; // Rpc EngineKind boolean done; // true when call is done private Call(RPC.RpcKind rpcKind, Writable param) { this.rpcKind = rpcKind; this.rpcRequest = param; final Integer id = callId.get(); if (id == null) { this.id = nextCallId(); } else { callId.set(null); this.id = id; } final Integer rc = retryCount.get(); if (rc == null) { this.retry = 0; } else { this.retry = rc; } } /** Indicate when the call is complete and the * value or error are available. Notifies by default. */ protected synchronized void callComplete() { this.done = true; notify(); // notify caller } /** Set the exception when there is an error. * Notify the caller the call is done. * * @param error exception thrown by the call; either local or remote */ public synchronized void setException(IOException error) { this.error = error; callComplete(); } /** Set the return value when there is no error. * Notify the caller the call is done. * * @param rpcResponse return value of the rpc call. */ public synchronized void setRpcResponse(Writable rpcResponse) { this.rpcResponse = rpcResponse; callComplete(); } public synchronized Writable getRpcResponse() { return rpcResponse; } } /** Thread that reads responses and notifies callers. Each connection owns a * socket connected to a remote address. Calls are multiplexed through this * socket: responses may be delivered out of order. */ private class Connection extends Thread { private InetSocketAddress server; // server ip:port private final ConnectionId remoteId; // connection id private AuthMethod authMethod; // authentication method private AuthProtocol authProtocol; private int serviceClass; private SaslRpcClient saslRpcClient; private Socket socket = null; // connected socket private DataInputStream in; private DataOutputStream out; private int rpcTimeout; private int maxIdleTime; //connections will be culled if it was idle for //maxIdleTime msecs private final RetryPolicy connectionRetryPolicy; private final int maxRetriesOnSasl; private int maxRetriesOnSocketTimeouts; private final boolean tcpNoDelay; // if T then disable Nagle's Algorithm private final boolean tcpLowLatency; // if T then use low-delay QoS private boolean doPing; //do we need to send ping message private int pingInterval; // how often sends ping to the server in msecs private ByteArrayOutputStream pingRequest; // ping message // currently active calls private Hashtable<Integer, Call> calls = new Hashtable<Integer, Call>(); private AtomicLong lastActivity = new AtomicLong();// last I/O activity time private AtomicBoolean shouldCloseConnection = new AtomicBoolean(); // indicate if the connection is closed private IOException closeException; // close reason private final Object sendRpcRequestLock = new Object(); public Connection(ConnectionId remoteId, int serviceClass) throws IOException { this.remoteId = remoteId; this.server = remoteId.getAddress(); if (server.isUnresolved()) { throw NetUtils.wrapException(server.getHostName(), server.getPort(), null, 0, new UnknownHostException()); } this.rpcTimeout = remoteId.getRpcTimeout(); this.maxIdleTime = remoteId.getMaxIdleTime(); this.connectionRetryPolicy = remoteId.connectionRetryPolicy; this.maxRetriesOnSasl = remoteId.getMaxRetriesOnSasl(); this.maxRetriesOnSocketTimeouts = remoteId.getMaxRetriesOnSocketTimeouts(); this.tcpNoDelay = remoteId.getTcpNoDelay(); this.tcpLowLatency = remoteId.getTcpLowLatency(); this.doPing = remoteId.getDoPing(); if (doPing) { // construct a RPC header with the callId as the ping callId pingRequest = new ByteArrayOutputStream(); RpcRequestHeaderProto pingHeader = ProtoUtil .makeRpcRequestHeader(RpcKind.RPC_PROTOCOL_BUFFER, OperationProto.RPC_FINAL_PACKET, PING_CALL_ID, RpcConstants.INVALID_RETRY_COUNT, clientId); pingHeader.writeDelimitedTo(pingRequest); } this.pingInterval = remoteId.getPingInterval(); this.serviceClass = serviceClass; if (LOG.isDebugEnabled()) { LOG.debug("The ping interval is " + this.pingInterval + " ms."); } UserGroupInformation ticket = remoteId.getTicket(); // try SASL if security is enabled or if the ugi contains tokens. // this causes a SIMPLE client with tokens to attempt SASL boolean trySasl = UserGroupInformation.isSecurityEnabled() || (ticket != null && !ticket.getTokens().isEmpty()); this.authProtocol = trySasl ? AuthProtocol.SASL : AuthProtocol.NONE; this.setName("IPC Client (" + socketFactory.hashCode() +") connection to " + server.toString() + " from " + ((ticket==null)?"an unknown user":ticket.getUserName())); this.setDaemon(true); } /** Update lastActivity with the current time. */ private void touch() { lastActivity.set(Time.now()); } /** * Add a call to this connection's call queue and notify * a listener; synchronized. * Returns false if called during shutdown. * @param call to add * @return true if the call was added. */ private synchronized boolean addCall(Call call) { if (shouldCloseConnection.get()) return false; calls.put(call.id, call); notify(); return true; } /** This class sends a ping to the remote side when timeout on * reading. If no failure is detected, it retries until at least * a byte is read. */ private class PingInputStream extends FilterInputStream { /* constructor */ protected PingInputStream(InputStream in) { super(in); } /* Process timeout exception * if the connection is not going to be closed or * is not configured to have a RPC timeout, send a ping. * (if rpcTimeout is not set to be 0, then RPC should timeout. * otherwise, throw the timeout exception. */ private void handleTimeout(SocketTimeoutException e) throws IOException { if (shouldCloseConnection.get() || !running.get() || rpcTimeout > 0) { throw e; } else { sendPing(); } } /** Read a byte from the stream. * Send a ping if timeout on read. Retries if no failure is detected * until a byte is read. * @throws IOException for any IO problem other than socket timeout */ @Override public int read() throws IOException { do { try { return super.read(); } catch (SocketTimeoutException e) { handleTimeout(e); } } while (true); } /** Read bytes into a buffer starting from offset <code>off</code> * Send a ping if timeout on read. Retries if no failure is detected * until a byte is read. * * @return the total number of bytes read; -1 if the connection is closed. */ @Override public int read(byte[] buf, int off, int len) throws IOException { do { try { return super.read(buf, off, len); } catch (SocketTimeoutException e) { handleTimeout(e); } } while (true); } } private synchronized void disposeSasl() { if (saslRpcClient != null) { try { saslRpcClient.dispose(); saslRpcClient = null; } catch (IOException ignored) { } } } private synchronized boolean shouldAuthenticateOverKrb() throws IOException { UserGroupInformation loginUser = UserGroupInformation.getLoginUser(); UserGroupInformation currentUser = UserGroupInformation.getCurrentUser(); UserGroupInformation realUser = currentUser.getRealUser(); if (authMethod == AuthMethod.KERBEROS && loginUser != null && // Make sure user logged in using Kerberos either keytab or TGT loginUser.hasKerberosCredentials() && // relogin only in case it is the login user (e.g. JT) // or superuser (like oozie). (loginUser.equals(currentUser) || loginUser.equals(realUser))) { return true; } return false; } private synchronized AuthMethod setupSaslConnection(final InputStream in2, final OutputStream out2) throws IOException { // Do not use Client.conf here! We must use ConnectionId.conf, since the // Client object is cached and shared between all RPC clients, even those // for separate services. saslRpcClient = new SaslRpcClient(remoteId.getTicket(), remoteId.getProtocol(), remoteId.getAddress(), remoteId.conf); return saslRpcClient.saslConnect(in2, out2); } /** * Update the server address if the address corresponding to the host * name has changed. * * @return true if an addr change was detected. * @throws IOException when the hostname cannot be resolved. */ private synchronized boolean updateAddress() throws IOException { // Do a fresh lookup with the old host name. InetSocketAddress currentAddr = NetUtils.createSocketAddrForHost( server.getHostName(), server.getPort()); if (!server.equals(currentAddr)) { LOG.warn("Address change detected. Old: " + server.toString() + " New: " + currentAddr.toString()); server = currentAddr; return true; } return false; } private synchronized void setupConnection() throws IOException { short ioFailures = 0; short timeoutFailures = 0; while (true) { try { this.socket = socketFactory.createSocket(); this.socket.setTcpNoDelay(tcpNoDelay); this.socket.setKeepAlive(true); if (tcpLowLatency) { /* * This allows intermediate switches to shape IPC traffic * differently from Shuffle/HDFS DataStreamer traffic. * * IPTOS_RELIABILITY (0x04) | IPTOS_LOWDELAY (0x10) * * Prefer to optimize connect() speed & response latency over net * throughput. */ this.socket.setTrafficClass(0x04 | 0x10); this.socket.setPerformancePreferences(1, 2, 0); } /* * Bind the socket to the host specified in the principal name of the * client, to ensure Server matching address of the client connection * to host name in principal passed. */ UserGroupInformation ticket = remoteId.getTicket(); if (ticket != null && ticket.hasKerberosCredentials()) { KerberosInfo krbInfo = remoteId.getProtocol().getAnnotation(KerberosInfo.class); if (krbInfo != null && krbInfo.clientPrincipal() != null) { String host = SecurityUtil.getHostFromPrincipal(remoteId.getTicket().getUserName()); // If host name is a valid local address then bind socket to it InetAddress localAddr = NetUtils.getLocalInetAddress(host); if (localAddr != null) { this.socket.bind(new InetSocketAddress(localAddr, 0)); } } } NetUtils.connect(this.socket, server, connectionTimeout); if (rpcTimeout > 0) { pingInterval = rpcTimeout; // rpcTimeout overwrites pingInterval } this.socket.setSoTimeout(pingInterval); return; } catch (ConnectTimeoutException toe) { /* Check for an address change and update the local reference. * Reset the failure counter if the address was changed */ if (updateAddress()) { timeoutFailures = ioFailures = 0; } handleConnectionTimeout(timeoutFailures++, maxRetriesOnSocketTimeouts, toe); } catch (IOException ie) { if (updateAddress()) { timeoutFailures = ioFailures = 0; } handleConnectionFailure(ioFailures++, ie); } } } /** * If multiple clients with the same principal try to connect to the same * server at the same time, the server assumes a replay attack is in * progress. This is a feature of kerberos. In order to work around this, * what is done is that the client backs off randomly and tries to initiate * the connection again. The other problem is to do with ticket expiry. To * handle that, a relogin is attempted. */ private synchronized void handleSaslConnectionFailure( final int currRetries, final int maxRetries, final Exception ex, final Random rand, final UserGroupInformation ugi) throws IOException, InterruptedException { ugi.doAs(new PrivilegedExceptionAction<Object>() { @Override public Object run() throws IOException, InterruptedException { final short MAX_BACKOFF = 5000; closeConnection(); disposeSasl(); if (shouldAuthenticateOverKrb()) { if (currRetries < maxRetries) { if(LOG.isDebugEnabled()) { LOG.debug("Exception encountered while connecting to " + "the server : " + ex); } // try re-login if (UserGroupInformation.isLoginKeytabBased()) { UserGroupInformation.getLoginUser().reloginFromKeytab(); } else if (UserGroupInformation.isLoginTicketBased()) { UserGroupInformation.getLoginUser().reloginFromTicketCache(); } // have granularity of milliseconds //we are sleeping with the Connection lock held but since this //connection instance is being used for connecting to the server //in question, it is okay Thread.sleep((rand.nextInt(MAX_BACKOFF) + 1)); return null; } else { String msg = "Couldn't setup connection for " + UserGroupInformation.getLoginUser().getUserName() + " to " + remoteId; LOG.warn(msg, ex); throw (IOException) new IOException(msg).initCause(ex); } } else { LOG.warn("Exception encountered while connecting to " + "the server : " + ex); } if (ex instanceof RemoteException) throw (RemoteException) ex; throw new IOException(ex); } }); } /** Connect to the server and set up the I/O streams. It then sends * a header to the server and starts * the connection thread that waits for responses. */ private synchronized void setupIOstreams( AtomicBoolean fallbackToSimpleAuth) { if (socket != null || shouldCloseConnection.get()) { return; } try { if (LOG.isDebugEnabled()) { LOG.debug("Connecting to "+server); } if (Trace.isTracing()) { Trace.addTimelineAnnotation("IPC client connecting to " + server); } short numRetries = 0; Random rand = null; while (true) { setupConnection(); InputStream inStream = NetUtils.getInputStream(socket); OutputStream outStream = NetUtils.getOutputStream(socket); writeConnectionHeader(outStream); if (authProtocol == AuthProtocol.SASL) { final InputStream in2 = inStream; final OutputStream out2 = outStream; UserGroupInformation ticket = remoteId.getTicket(); if (ticket.getRealUser() != null) { ticket = ticket.getRealUser(); } try { authMethod = ticket .doAs(new PrivilegedExceptionAction<AuthMethod>() { @Override public AuthMethod run() throws IOException, InterruptedException { return setupSaslConnection(in2, out2); } }); } catch (Exception ex) { authMethod = saslRpcClient.getAuthMethod(); if (rand == null) { rand = new Random(); } handleSaslConnectionFailure(numRetries++, maxRetriesOnSasl, ex, rand, ticket); continue; } if (authMethod != AuthMethod.SIMPLE) { // Sasl connect is successful. Let's set up Sasl i/o streams. inStream = saslRpcClient.getInputStream(inStream); outStream = saslRpcClient.getOutputStream(outStream); // for testing remoteId.saslQop = (String)saslRpcClient.getNegotiatedProperty(Sasl.QOP); LOG.debug("Negotiated QOP is :" + remoteId.saslQop); if (fallbackToSimpleAuth != null) { fallbackToSimpleAuth.set(false); } } else if (UserGroupInformation.isSecurityEnabled()) { if (!fallbackAllowed) { throw new IOException("Server asks us to fall back to SIMPLE " + "auth, but this client is configured to only allow secure " + "connections."); } if (fallbackToSimpleAuth != null) { fallbackToSimpleAuth.set(true); } } } if (doPing) { inStream = new PingInputStream(inStream); } this.in = new DataInputStream(new BufferedInputStream(inStream)); // SASL may have already buffered the stream if (!(outStream instanceof BufferedOutputStream)) { outStream = new BufferedOutputStream(outStream); } this.out = new DataOutputStream(outStream); writeConnectionContext(remoteId, authMethod); // update last activity time touch(); if (Trace.isTracing()) { Trace.addTimelineAnnotation("IPC client connected to " + server); } // start the receiver thread after the socket connection has been set // up start(); return; } } catch (Throwable t) { if (t instanceof IOException) { markClosed((IOException)t); } else { markClosed(new IOException("Couldn't set up IO streams", t)); } close(); } } private void closeConnection() { if (socket == null) { return; } // close the current connection try { socket.close(); } catch (IOException e) { LOG.warn("Not able to close a socket", e); } // set socket to null so that the next call to setupIOstreams // can start the process of connect all over again. socket = null; } /* Handle connection failures due to timeout on connect * * If the current number of retries is equal to the max number of retries, * stop retrying and throw the exception; Otherwise backoff 1 second and * try connecting again. * * This Method is only called from inside setupIOstreams(), which is * synchronized. Hence the sleep is synchronized; the locks will be retained. * * @param curRetries current number of retries * @param maxRetries max number of retries allowed * @param ioe failure reason * @throws IOException if max number of retries is reached */ private void handleConnectionTimeout( int curRetries, int maxRetries, IOException ioe) throws IOException { closeConnection(); // throw the exception if the maximum number of retries is reached if (curRetries >= maxRetries) { throw ioe; } LOG.info("Retrying connect to server: " + server + ". Already tried " + curRetries + " time(s); maxRetries=" + maxRetries); } private void handleConnectionFailure(int curRetries, IOException ioe ) throws IOException { closeConnection(); final RetryAction action; try { action = connectionRetryPolicy.shouldRetry(ioe, curRetries, 0, true); } catch(Exception e) { throw e instanceof IOException? (IOException)e: new IOException(e); } if (action.action == RetryAction.RetryDecision.FAIL) { if (action.reason != null) { LOG.warn("Failed to connect to server: " + server + ": " + action.reason, ioe); } throw ioe; } // Throw the exception if the thread is interrupted if (Thread.currentThread().isInterrupted()) { LOG.warn("Interrupted while trying for connection"); throw ioe; } try { Thread.sleep(action.delayMillis); } catch (InterruptedException e) { throw (IOException)new InterruptedIOException("Interrupted: action=" + action + ", retry policy=" + connectionRetryPolicy).initCause(e); } LOG.info("Retrying connect to server: " + server + ". Already tried " + curRetries + " time(s); retry policy is " + connectionRetryPolicy); } /** * Write the connection header - this is sent when connection is established * +----------------------------------+ * | "hrpc" 4 bytes | * +----------------------------------+ * | Version (1 byte) | * +----------------------------------+ * | Service Class (1 byte) | * +----------------------------------+ * | AuthProtocol (1 byte) | * +----------------------------------+ */ private void writeConnectionHeader(OutputStream outStream) throws IOException { DataOutputStream out = new DataOutputStream(new BufferedOutputStream(outStream)); // Write out the header, version and authentication method out.write(RpcConstants.HEADER.array()); out.write(RpcConstants.CURRENT_VERSION); out.write(serviceClass); out.write(authProtocol.callId); out.flush(); } /* Write the connection context header for each connection * Out is not synchronized because only the first thread does this. */ private void writeConnectionContext(ConnectionId remoteId, AuthMethod authMethod) throws IOException { // Write out the ConnectionHeader IpcConnectionContextProto message = ProtoUtil.makeIpcConnectionContext( RPC.getProtocolName(remoteId.getProtocol()), remoteId.getTicket(), authMethod); RpcRequestHeaderProto connectionContextHeader = ProtoUtil .makeRpcRequestHeader(RpcKind.RPC_PROTOCOL_BUFFER, OperationProto.RPC_FINAL_PACKET, CONNECTION_CONTEXT_CALL_ID, RpcConstants.INVALID_RETRY_COUNT, clientId); RpcRequestMessageWrapper request = new RpcRequestMessageWrapper(connectionContextHeader, message); // Write out the packet length out.writeInt(request.getLength()); request.write(out); } /* wait till someone signals us to start reading RPC response or * it is idle too long, it is marked as to be closed, * or the client is marked as not running. * * Return true if it is time to read a response; false otherwise. */ private synchronized boolean waitForWork() { if (calls.isEmpty() && !shouldCloseConnection.get() && running.get()) { long timeout = maxIdleTime- (Time.now()-lastActivity.get()); if (timeout>0) { try { wait(timeout); } catch (InterruptedException e) {} } } if (!calls.isEmpty() && !shouldCloseConnection.get() && running.get()) { return true; } else if (shouldCloseConnection.get()) { return false; } else if (calls.isEmpty()) { // idle connection closed or stopped markClosed(null); return false; } else { // get stopped but there are still pending requests markClosed((IOException)new IOException().initCause( new InterruptedException())); return false; } } public InetSocketAddress getRemoteAddress() { return server; } /* Send a ping to the server if the time elapsed * since last I/O activity is equal to or greater than the ping interval */ private synchronized void sendPing() throws IOException { long curTime = Time.now(); if ( curTime - lastActivity.get() >= pingInterval) { lastActivity.set(curTime); synchronized (out) { out.writeInt(pingRequest.size()); pingRequest.writeTo(out); out.flush(); } } } @Override public void run() { if (LOG.isDebugEnabled()) LOG.debug(getName() + ": starting, having connections " + connections.size()); try { while (waitForWork()) {//wait here for work - read or close connection receiveRpcResponse(); } } catch (Throwable t) { // This truly is unexpected, since we catch IOException in receiveResponse // -- this is only to be really sure that we don't leave a client hanging // forever. LOG.warn("Unexpected error reading responses on connection " + this, t); markClosed(new IOException("Error reading responses", t)); } close(); if (LOG.isDebugEnabled()) LOG.debug(getName() + ": stopped, remaining connections " + connections.size()); } /** Initiates a rpc call by sending the rpc request to the remote server. * Note: this is not called from the Connection thread, but by other * threads. * @param call - the rpc request */ public void sendRpcRequest(final Call call) throws InterruptedException, IOException { if (shouldCloseConnection.get()) { return; } // Serialize the call to be sent. This is done from the actual // caller thread, rather than the sendParamsExecutor thread, // so that if the serialization throws an error, it is reported // properly. This also parallelizes the serialization. // // Format of a call on the wire: // 0) Length of rest below (1 + 2) // 1) RpcRequestHeader - is serialized Delimited hence contains length // 2) RpcRequest // // Items '1' and '2' are prepared here. final DataOutputBuffer d = new DataOutputBuffer(); RpcRequestHeaderProto header = ProtoUtil.makeRpcRequestHeader( call.rpcKind, OperationProto.RPC_FINAL_PACKET, call.id, call.retry, clientId); header.writeDelimitedTo(d); call.rpcRequest.write(d); synchronized (sendRpcRequestLock) { Future<?> senderFuture = sendParamsExecutor.submit(new Runnable() { @Override public void run() { try { synchronized (Connection.this.out) { if (shouldCloseConnection.get()) { return; } if (LOG.isDebugEnabled()) LOG.debug(getName() + " sending #" + call.id); byte[] data = d.getData(); int totalLength = d.getLength(); out.writeInt(totalLength); // Total Length out.write(data, 0, totalLength);// RpcRequestHeader + RpcRequest out.flush(); } } catch (IOException e) { // exception at this point would leave the connection in an // unrecoverable state (eg half a call left on the wire). // So, close the connection, killing any outstanding calls markClosed(e); } finally { //the buffer is just an in-memory buffer, but it is still polite to // close early IOUtils.closeStream(d); } } }); try { senderFuture.get(); } catch (ExecutionException e) { Throwable cause = e.getCause(); // cause should only be a RuntimeException as the Runnable above // catches IOException if (cause instanceof RuntimeException) { throw (RuntimeException) cause; } else { throw new RuntimeException("unexpected checked exception", cause); } } } } /* Receive a response. * Because only one receiver, so no synchronization on in. */ private void receiveRpcResponse() { if (shouldCloseConnection.get()) { return; } touch(); try { int totalLen = in.readInt(); RpcResponseHeaderProto header = RpcResponseHeaderProto.parseDelimitedFrom(in); checkResponse(header); int headerLen = header.getSerializedSize(); headerLen += CodedOutputStream.computeRawVarint32Size(headerLen); int callId = header.getCallId(); if (LOG.isDebugEnabled()) LOG.debug(getName() + " got value #" + callId); Call call = calls.get(callId); RpcStatusProto status = header.getStatus(); if (status == RpcStatusProto.SUCCESS) { Writable value = ReflectionUtils.newInstance(valueClass, conf); value.readFields(in); // read value calls.remove(callId); call.setRpcResponse(value); // verify that length was correct // only for ProtobufEngine where len can be verified easily if (call.getRpcResponse() instanceof ProtobufRpcEngine.RpcWrapper) { ProtobufRpcEngine.RpcWrapper resWrapper = (ProtobufRpcEngine.RpcWrapper) call.getRpcResponse(); if (totalLen != headerLen + resWrapper.getLength()) { throw new RpcClientException( "RPC response length mismatch on rpc success"); } } } else { // Rpc Request failed // Verify that length was correct if (totalLen != headerLen) { throw new RpcClientException( "RPC response length mismatch on rpc error"); } final String exceptionClassName = header.hasExceptionClassName() ? header.getExceptionClassName() : "ServerDidNotSetExceptionClassName"; final String errorMsg = header.hasErrorMsg() ? header.getErrorMsg() : "ServerDidNotSetErrorMsg" ; final RpcErrorCodeProto erCode = (header.hasErrorDetail() ? header.getErrorDetail() : null); if (erCode == null) { LOG.warn("Detailed error code not set by server on rpc error"); } RemoteException re = new RemoteException(exceptionClassName, errorMsg, erCode); if (status == RpcStatusProto.ERROR) { calls.remove(callId); call.setException(re); } else if (status == RpcStatusProto.FATAL) { // Close the connection markClosed(re); } } } catch (IOException e) { markClosed(e); } } private synchronized void markClosed(IOException e) { if (shouldCloseConnection.compareAndSet(false, true)) { closeException = e; notifyAll(); } } /** Close the connection. */ private synchronized void close() { if (!shouldCloseConnection.get()) { LOG.error("The connection is not in the closed state"); return; } connections.invalidate(remoteId); // close the streams and therefore the socket IOUtils.closeStream(out); IOUtils.closeStream(in); disposeSasl(); // clean up all calls if (closeException == null) { if (!calls.isEmpty()) { LOG.warn( "A connection is closed for no cause and calls are not empty"); // clean up calls anyway closeException = new IOException("Unexpected closed connection"); cleanupCalls(); } } else { // log the info if (LOG.isDebugEnabled()) { LOG.debug("closing ipc connection to " + server + ": " + closeException.getMessage(),closeException); } // cleanup calls cleanupCalls(); } closeConnection(); if (LOG.isDebugEnabled()) LOG.debug(getName() + ": closed"); } /* Cleanup all calls and mark them as done */ private void cleanupCalls() { Iterator<Entry<Integer, Call>> itor = calls.entrySet().iterator() ; while (itor.hasNext()) { Call c = itor.next().getValue(); itor.remove(); c.setException(closeException); // local exception } } } /** Construct an IPC client whose values are of the given {@link Writable} * class. */ public Client(Class<? extends Writable> valueClass, Configuration conf, SocketFactory factory) { this.valueClass = valueClass; this.conf = conf; this.socketFactory = factory; this.connectionTimeout = conf.getInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_TIMEOUT_KEY, CommonConfigurationKeys.IPC_CLIENT_CONNECT_TIMEOUT_DEFAULT); this.fallbackAllowed = conf.getBoolean(CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY, CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_DEFAULT); this.clientId = ClientId.getClientId(); this.sendParamsExecutor = clientExcecutorFactory.refAndGetInstance(); } /** * Construct an IPC client with the default SocketFactory * @param valueClass * @param conf */ public Client(Class<? extends Writable> valueClass, Configuration conf) { this(valueClass, conf, NetUtils.getDefaultSocketFactory(conf)); } /** Return the socket factory of this client * * @return this client's socket factory */ SocketFactory getSocketFactory() { return socketFactory; } /** Stop all threads related to this client. No further calls may be made * using this client. */ public void stop() { if (LOG.isDebugEnabled()) { LOG.debug("Stopping client"); } if (!running.compareAndSet(true, false)) { return; } // wake up all connections for (Connection conn : connections.asMap().values()) { conn.interrupt(); } // wait until all connections are closed while (connections.size() > 0) { try { Thread.sleep(100); } catch (InterruptedException e) { } } clientExcecutorFactory.unrefAndCleanup(); } /** * Same as {@link #call(RPC.RpcKind, Writable, ConnectionId)} * for RPC_BUILTIN */ public Writable call(Writable param, InetSocketAddress address) throws IOException { ConnectionId remoteId = ConnectionId.getConnectionId(address, null, null, 0, conf); return call(RpcKind.RPC_BUILTIN, param, remoteId); } /** * Same as {@link #call(RPC.RpcKind, Writable, InetSocketAddress, * Class, UserGroupInformation, int, Configuration)} * except that rpcKind is writable. */ public Writable call(Writable param, InetSocketAddress addr, Class<?> protocol, UserGroupInformation ticket, int rpcTimeout, Configuration conf) throws IOException { ConnectionId remoteId = ConnectionId.getConnectionId(addr, protocol, ticket, rpcTimeout, conf); return call(RPC.RpcKind.RPC_BUILTIN, param, remoteId); } /** * Same as {@link #call(Writable, InetSocketAddress, * Class, UserGroupInformation, int, Configuration)} * except that specifying serviceClass. */ public Writable call(Writable param, InetSocketAddress addr, Class<?> protocol, UserGroupInformation ticket, int rpcTimeout, int serviceClass, Configuration conf) throws IOException { ConnectionId remoteId = ConnectionId.getConnectionId(addr, protocol, ticket, rpcTimeout, conf); return call(RPC.RpcKind.RPC_BUILTIN, param, remoteId, serviceClass); } /** * Make a call, passing <code>param</code>, to the IPC server running at * <code>address</code> which is servicing the <code>protocol</code> protocol, * with the <code>ticket</code> credentials, <code>rpcTimeout</code> as * timeout and <code>conf</code> as conf for this connection, returning the * value. Throws exceptions if there are network problems or if the remote * code threw an exception. */ public Writable call(RPC.RpcKind rpcKind, Writable param, InetSocketAddress addr, Class<?> protocol, UserGroupInformation ticket, int rpcTimeout, Configuration conf) throws IOException { ConnectionId remoteId = ConnectionId.getConnectionId(addr, protocol, ticket, rpcTimeout, conf); return call(rpcKind, param, remoteId); } /** * Same as {link {@link #call(RPC.RpcKind, Writable, ConnectionId)} * except the rpcKind is RPC_BUILTIN */ public Writable call(Writable param, ConnectionId remoteId) throws IOException { return call(RPC.RpcKind.RPC_BUILTIN, param, remoteId); } /** * Make a call, passing <code>rpcRequest</code>, to the IPC server defined by * <code>remoteId</code>, returning the rpc respond. * * @param rpcKind * @param rpcRequest - contains serialized method and method parameters * @param remoteId - the target rpc server * @returns the rpc response * Throws exceptions if there are network problems or if the remote code * threw an exception. */ public Writable call(RPC.RpcKind rpcKind, Writable rpcRequest, ConnectionId remoteId) throws IOException { return call(rpcKind, rpcRequest, remoteId, RPC.RPC_SERVICE_CLASS_DEFAULT); } /** * Make a call, passing <code>rpcRequest</code>, to the IPC server defined by * <code>remoteId</code>, returning the rpc respond. * * @param rpcKind * @param rpcRequest - contains serialized method and method parameters * @param remoteId - the target rpc server * @param fallbackToSimpleAuth - set to true or false during this method to * indicate if a secure client falls back to simple auth * @returns the rpc response * Throws exceptions if there are network problems or if the remote code * threw an exception. */ public Writable call(RPC.RpcKind rpcKind, Writable rpcRequest, ConnectionId remoteId, AtomicBoolean fallbackToSimpleAuth) throws IOException { return call(rpcKind, rpcRequest, remoteId, RPC.RPC_SERVICE_CLASS_DEFAULT, fallbackToSimpleAuth); } /** * Make a call, passing <code>rpcRequest</code>, to the IPC server defined by * <code>remoteId</code>, returning the rpc response. * * @param rpcKind * @param rpcRequest - contains serialized method and method parameters * @param remoteId - the target rpc server * @param serviceClass - service class for RPC * @returns the rpc response * Throws exceptions if there are network problems or if the remote code * threw an exception. */ public Writable call(RPC.RpcKind rpcKind, Writable rpcRequest, ConnectionId remoteId, int serviceClass) throws IOException { return call(rpcKind, rpcRequest, remoteId, serviceClass, null); } /** * Make a call, passing <code>rpcRequest</code>, to the IPC server defined by * <code>remoteId</code>, returning the rpc response. * * @param rpcKind * @param rpcRequest - contains serialized method and method parameters * @param remoteId - the target rpc server * @param serviceClass - service class for RPC * @param fallbackToSimpleAuth - set to true or false during this method to * indicate if a secure client falls back to simple auth * @returns the rpc response * Throws exceptions if there are network problems or if the remote code * threw an exception. */ public Writable call(RPC.RpcKind rpcKind, Writable rpcRequest, ConnectionId remoteId, int serviceClass, AtomicBoolean fallbackToSimpleAuth) throws IOException { final Call call = createCall(rpcKind, rpcRequest); Connection connection = getConnection(remoteId, call, serviceClass, fallbackToSimpleAuth); try { connection.sendRpcRequest(call); // send the rpc request } catch (RejectedExecutionException e) { throw new IOException("connection has been closed", e); } catch (InterruptedException e) { Thread.currentThread().interrupt(); LOG.warn("interrupted waiting to send rpc request to server", e); throw new IOException(e); } boolean interrupted = false; synchronized (call) { while (!call.done) { try { call.wait(); // wait for the result } catch (InterruptedException ie) { // save the fact that we were interrupted interrupted = true; } } if (interrupted) { // set the interrupt flag now that we are done waiting Thread.currentThread().interrupt(); } if (call.error != null) { if (call.error instanceof RemoteException) { call.error.fillInStackTrace(); throw call.error; } else { // local exception InetSocketAddress address = connection.getRemoteAddress(); throw NetUtils.wrapException(address.getHostName(), address.getPort(), NetUtils.getHostname(), 0, call.error); } } else { return call.getRpcResponse(); } } } // for unit testing only @InterfaceAudience.Private @InterfaceStability.Unstable Set<ConnectionId> getConnectionIds() { return connections.asMap().keySet(); } /** Get a connection from the pool, or create a new one and add it to the * pool. Connections to a given ConnectionId are reused. */ private Connection getConnection( final ConnectionId remoteId, Call call, final int serviceClass, AtomicBoolean fallbackToSimpleAuth) throws IOException { if (!running.get()) { // the client is stopped throw new IOException("The client is stopped"); } Connection connection; /* we could avoid this allocation for each RPC by having a * connectionsId object and with set() method. We need to manage the * refs for keys in HashMap properly. For now its ok. */ while(true) { try { connection = connections.get(remoteId, new Callable<Connection>() { @Override public Connection call() throws Exception { return new Connection(remoteId, serviceClass); } }); } catch (ExecutionException e) { Throwable cause = e.getCause(); // the underlying exception should normally be IOException if (cause instanceof IOException) { throw (IOException) cause; } else { throw new IOException(cause); } } if (connection.addCall(call)) { break; } else { connections.invalidate(remoteId); } } //we don't invoke the method below inside "synchronized (connections)" //block above. The reason for that is if the server happens to be slow, //it will take longer to establish a connection and that will slow the //entire system down. connection.setupIOstreams(fallbackToSimpleAuth); return connection; } /** * This class holds the address and the user ticket. The client connections * to servers are uniquely identified by <remoteAddress, protocol, ticket> */ @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"}) @InterfaceStability.Evolving public static class ConnectionId { InetSocketAddress address; UserGroupInformation ticket; final Class<?> protocol; private static final int PRIME = 16777619; private final int rpcTimeout; private final int maxIdleTime; //connections will be culled if it was idle for //maxIdleTime msecs private final RetryPolicy connectionRetryPolicy; private final int maxRetriesOnSasl; // the max. no. of retries for socket connections on time out exceptions private final int maxRetriesOnSocketTimeouts; private final boolean tcpNoDelay; // if T then disable Nagle's Algorithm private final boolean tcpLowLatency; // if T then use low-delay QoS private final boolean doPing; //do we need to send ping message private final int pingInterval; // how often sends ping to the server in msecs private String saslQop; // here for testing private final Configuration conf; // used to get the expected kerberos principal name ConnectionId(InetSocketAddress address, Class<?> protocol, UserGroupInformation ticket, int rpcTimeout, RetryPolicy connectionRetryPolicy, Configuration conf) { this.protocol = protocol; this.address = address; this.ticket = ticket; this.rpcTimeout = rpcTimeout; this.connectionRetryPolicy = connectionRetryPolicy; this.maxIdleTime = conf.getInt( CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_DEFAULT); this.maxRetriesOnSasl = conf.getInt( CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY, CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_DEFAULT); this.maxRetriesOnSocketTimeouts = conf.getInt( CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY, CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_DEFAULT); this.tcpNoDelay = conf.getBoolean( CommonConfigurationKeysPublic.IPC_CLIENT_TCPNODELAY_KEY, CommonConfigurationKeysPublic.IPC_CLIENT_TCPNODELAY_DEFAULT); this.tcpLowLatency = conf.getBoolean( CommonConfigurationKeysPublic.IPC_CLIENT_LOW_LATENCY, CommonConfigurationKeysPublic.IPC_CLIENT_LOW_LATENCY_DEFAULT ); this.doPing = conf.getBoolean( CommonConfigurationKeys.IPC_CLIENT_PING_KEY, CommonConfigurationKeys.IPC_CLIENT_PING_DEFAULT); this.pingInterval = (doPing ? Client.getPingInterval(conf) : 0); this.conf = conf; } InetSocketAddress getAddress() { return address; } Class<?> getProtocol() { return protocol; } UserGroupInformation getTicket() { return ticket; } private int getRpcTimeout() { return rpcTimeout; } int getMaxIdleTime() { return maxIdleTime; } public int getMaxRetriesOnSasl() { return maxRetriesOnSasl; } /** max connection retries on socket time outs */ public int getMaxRetriesOnSocketTimeouts() { return maxRetriesOnSocketTimeouts; } /** disable nagle's algorithm */ boolean getTcpNoDelay() { return tcpNoDelay; } /** use low-latency QoS bits over TCP */ boolean getTcpLowLatency() { return tcpLowLatency; } boolean getDoPing() { return doPing; } int getPingInterval() { return pingInterval; } @VisibleForTesting String getSaslQop() { return saslQop; } static ConnectionId getConnectionId(InetSocketAddress addr, Class<?> protocol, UserGroupInformation ticket, int rpcTimeout, Configuration conf) throws IOException { return getConnectionId(addr, protocol, ticket, rpcTimeout, null, conf); } /** * Returns a ConnectionId object. * @param addr Remote address for the connection. * @param protocol Protocol for RPC. * @param ticket UGI * @param rpcTimeout timeout * @param conf Configuration object * @return A ConnectionId instance * @throws IOException */ static ConnectionId getConnectionId(InetSocketAddress addr, Class<?> protocol, UserGroupInformation ticket, int rpcTimeout, RetryPolicy connectionRetryPolicy, Configuration conf) throws IOException { if (connectionRetryPolicy == null) { final int max = conf.getInt( CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_DEFAULT); final int retryInterval = conf.getInt( CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_RETRY_INTERVAL_KEY, CommonConfigurationKeysPublic .IPC_CLIENT_CONNECT_RETRY_INTERVAL_DEFAULT); connectionRetryPolicy = RetryPolicies.retryUpToMaximumCountWithFixedSleep( max, retryInterval, TimeUnit.MILLISECONDS); } return new ConnectionId(addr, protocol, ticket, rpcTimeout, connectionRetryPolicy, conf); } static boolean isEqual(Object a, Object b) { return a == null ? b == null : a.equals(b); } @Override public boolean equals(Object obj) { if (obj == this) { return true; } if (obj instanceof ConnectionId) { ConnectionId that = (ConnectionId) obj; return isEqual(this.address, that.address) && this.doPing == that.doPing && this.maxIdleTime == that.maxIdleTime && isEqual(this.connectionRetryPolicy, that.connectionRetryPolicy) && this.pingInterval == that.pingInterval && isEqual(this.protocol, that.protocol) && this.rpcTimeout == that.rpcTimeout && this.tcpNoDelay == that.tcpNoDelay && isEqual(this.ticket, that.ticket); } return false; } @Override public int hashCode() { int result = connectionRetryPolicy.hashCode(); result = PRIME * result + ((address == null) ? 0 : address.hashCode()); result = PRIME * result + (doPing ? 1231 : 1237); result = PRIME * result + maxIdleTime; result = PRIME * result + pingInterval; result = PRIME * result + ((protocol == null) ? 0 : protocol.hashCode()); result = PRIME * result + rpcTimeout; result = PRIME * result + (tcpNoDelay ? 1231 : 1237); result = PRIME * result + ((ticket == null) ? 0 : ticket.hashCode()); return result; } @Override public String toString() { return address.toString(); } } /** * Returns the next valid sequential call ID by incrementing an atomic counter * and masking off the sign bit. Valid call IDs are non-negative integers in * the range [ 0, 2^31 - 1 ]. Negative numbers are reserved for special * purposes. The values can overflow back to 0 and be reused. Note that prior * versions of the client did not mask off the sign bit, so a server may still * see a negative call ID if it receives connections from an old client. * * @return next call ID */ public static int nextCallId() { return callIdCounter.getAndIncrement() & 0x7FFFFFFF; } }
63,221
35.885648
114
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcException.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.ipc; import java.io.IOException; /** * Indicates an exception during the execution of remote procedure call. */ public class RpcException extends IOException { private static final long serialVersionUID = 1L; /** * Constructs exception with the specified detail message. * * @param messages detailed message. */ RpcException(final String message) { super(message); } /** * Constructs exception with the specified detail message and cause. * * @param message message. * @param cause that cause this exception * @param cause the cause (can be retried by the {@link #getCause()} method). * (A <tt>null</tt> value is permitted, and indicates that the cause * is nonexistent or unknown.) */ RpcException(final String message, final Throwable cause) { super(message, cause); } }
1,690
32.82
79
java
hadoop
hadoop-master/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/VersionedProtocol.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.ipc; import java.io.IOException; /** * Superclass of all protocols that use Hadoop RPC. * Subclasses of this interface are also supposed to have * a static final long versionID field. */ public interface VersionedProtocol { /** * Return protocol version corresponding to protocol interface. * @param protocol The classname of the protocol interface * @param clientVersion The version of the protocol that the client speaks * @return the version that the server will speak * @throws IOException if any IO error occurs */ public long getProtocolVersion(String protocol, long clientVersion) throws IOException; /** * Return protocol version corresponding to protocol interface. * @param protocol The classname of the protocol interface * @param clientVersion The version of the protocol that the client speaks * @param clientMethodsHash the hashcode of client protocol methods * @return the server protocol signature containing its version and * a list of its supported methods * @see ProtocolSignature#getProtocolSignature(VersionedProtocol, String, * long, int) for a default implementation */ public ProtocolSignature getProtocolSignature(String protocol, long clientVersion, int clientMethodsHash) throws IOException; }
2,242
40.537037
76
java