repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSymlinkOp.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import org.apache.hadoop.fs.InvalidPathException;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import java.io.IOException;
import java.util.Map;
import static org.apache.hadoop.util.Time.now;
class FSDirSymlinkOp {
static HdfsFileStatus createSymlinkInt(
FSNamesystem fsn, String target, final String linkArg,
PermissionStatus dirPerms, boolean createParent, boolean logRetryCache)
throws IOException {
FSDirectory fsd = fsn.getFSDirectory();
String link = linkArg;
if (!DFSUtil.isValidName(link)) {
throw new InvalidPathException("Invalid link name: " + link);
}
if (FSDirectory.isReservedName(target) || target.isEmpty()) {
throw new InvalidPathException("Invalid target name: " + target);
}
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("DIR* NameSystem.createSymlink: target="
+ target + " link=" + link);
}
FSPermissionChecker pc = fsn.getPermissionChecker();
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(link);
INodesInPath iip;
fsd.writeLock();
try {
link = fsd.resolvePath(pc, link, pathComponents);
iip = fsd.getINodesInPath4Write(link, false);
if (!createParent) {
fsd.verifyParentDir(iip, link);
}
if (!fsd.isValidToCreate(link, iip)) {
throw new IOException(
"failed to create link " + link +
" either because the filename is invalid or the file exists");
}
if (fsd.isPermissionEnabled()) {
fsd.checkAncestorAccess(pc, iip, FsAction.WRITE);
}
// validate that we have enough inodes.
fsn.checkFsObjectLimit();
// add symbolic link to namespace
addSymlink(fsd, link, iip, target, dirPerms, createParent, logRetryCache);
} finally {
fsd.writeUnlock();
}
NameNode.getNameNodeMetrics().incrCreateSymlinkOps();
return fsd.getAuditFileInfo(iip);
}
static INodeSymlink unprotectedAddSymlink(FSDirectory fsd, INodesInPath iip,
byte[] localName, long id, String target, long mtime, long atime,
PermissionStatus perm)
throws UnresolvedLinkException, QuotaExceededException {
assert fsd.hasWriteLock();
final INodeSymlink symlink = new INodeSymlink(id, null, perm, mtime, atime,
target);
symlink.setLocalName(localName);
return fsd.addINode(iip, symlink) != null ? symlink : null;
}
/**
* Add the given symbolic link to the fs. Record it in the edits log.
*/
private static INodeSymlink addSymlink(FSDirectory fsd, String path,
INodesInPath iip, String target, PermissionStatus dirPerms,
boolean createParent, boolean logRetryCache) throws IOException {
final long mtime = now();
final byte[] localName = iip.getLastLocalName();
if (createParent) {
Map.Entry<INodesInPath, String> e = FSDirMkdirOp
.createAncestorDirectories(fsd, iip, dirPerms);
if (e == null) {
return null;
}
iip = INodesInPath.append(e.getKey(), null, localName);
}
final String userName = dirPerms.getUserName();
long id = fsd.allocateNewInodeId();
PermissionStatus perm = new PermissionStatus(
userName, null, FsPermission.getDefault());
INodeSymlink newNode = unprotectedAddSymlink(fsd, iip.getExistingINodes(),
localName, id, target, mtime, mtime, perm);
if (newNode == null) {
NameNode.stateChangeLog.info("addSymlink: failed to add " + path);
return null;
}
fsd.getEditLog().logSymlink(path, target, mtime, mtime, newNode,
logRetryCache);
if(NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("addSymlink: " + path + " is added");
}
return newNode;
}
}
| 4,958 | 37.146154 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.fs.PathIsNotDirectoryException;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
import org.apache.hadoop.hdfs.util.EnumCounters;
import org.apache.hadoop.security.AccessControlException;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.Arrays;
import java.util.EnumSet;
import java.util.List;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_QUOTA_BY_STORAGETYPE_ENABLED_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY;
public class FSDirAttrOp {
static HdfsFileStatus setPermission(
FSDirectory fsd, final String srcArg, FsPermission permission)
throws IOException {
String src = srcArg;
FSPermissionChecker pc = fsd.getPermissionChecker();
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
INodesInPath iip;
fsd.writeLock();
try {
src = fsd.resolvePath(pc, src, pathComponents);
iip = fsd.getINodesInPath4Write(src);
fsd.checkOwner(pc, iip);
unprotectedSetPermission(fsd, src, permission);
} finally {
fsd.writeUnlock();
}
fsd.getEditLog().logSetPermissions(src, permission);
return fsd.getAuditFileInfo(iip);
}
static HdfsFileStatus setOwner(
FSDirectory fsd, String src, String username, String group)
throws IOException {
FSPermissionChecker pc = fsd.getPermissionChecker();
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
INodesInPath iip;
fsd.writeLock();
try {
src = fsd.resolvePath(pc, src, pathComponents);
iip = fsd.getINodesInPath4Write(src);
fsd.checkOwner(pc, iip);
if (!pc.isSuperUser()) {
if (username != null && !pc.getUser().equals(username)) {
throw new AccessControlException("Non-super user cannot change owner");
}
if (group != null && !pc.containsGroup(group)) {
throw new AccessControlException("User does not belong to " + group);
}
}
unprotectedSetOwner(fsd, src, username, group);
} finally {
fsd.writeUnlock();
}
fsd.getEditLog().logSetOwner(src, username, group);
return fsd.getAuditFileInfo(iip);
}
static HdfsFileStatus setTimes(
FSDirectory fsd, String src, long mtime, long atime)
throws IOException {
if (!fsd.isAccessTimeSupported() && atime != -1) {
throw new IOException(
"Access time for hdfs is not configured. " +
" Please set " + DFS_NAMENODE_ACCESSTIME_PRECISION_KEY
+ " configuration parameter.");
}
FSPermissionChecker pc = fsd.getPermissionChecker();
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
INodesInPath iip;
fsd.writeLock();
try {
src = fsd.resolvePath(pc, src, pathComponents);
iip = fsd.getINodesInPath4Write(src);
// Write access is required to set access and modification times
if (fsd.isPermissionEnabled()) {
fsd.checkPathAccess(pc, iip, FsAction.WRITE);
}
final INode inode = iip.getLastINode();
if (inode == null) {
throw new FileNotFoundException("File/Directory " + src +
" does not exist.");
}
boolean changed = unprotectedSetTimes(fsd, inode, mtime, atime, true,
iip.getLatestSnapshotId());
if (changed) {
fsd.getEditLog().logTimes(src, mtime, atime);
}
} finally {
fsd.writeUnlock();
}
return fsd.getAuditFileInfo(iip);
}
static boolean setReplication(
FSDirectory fsd, BlockManager bm, String src, final short replication)
throws IOException {
bm.verifyReplication(src, replication, null);
final boolean isFile;
FSPermissionChecker pc = fsd.getPermissionChecker();
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
fsd.writeLock();
try {
src = fsd.resolvePath(pc, src, pathComponents);
final INodesInPath iip = fsd.getINodesInPath4Write(src);
if (fsd.isPermissionEnabled()) {
fsd.checkPathAccess(pc, iip, FsAction.WRITE);
}
final short[] blockRepls = new short[2]; // 0: old, 1: new
final BlockInfo[] blocks = unprotectedSetReplication(fsd, src,
replication, blockRepls);
isFile = blocks != null;
if (isFile) {
fsd.getEditLog().logSetReplication(src, replication);
bm.setReplication(blockRepls[0], blockRepls[1], src, blocks);
}
} finally {
fsd.writeUnlock();
}
return isFile;
}
static HdfsFileStatus setStoragePolicy(
FSDirectory fsd, BlockManager bm, String src, final String policyName)
throws IOException {
if (!fsd.isStoragePolicyEnabled()) {
throw new IOException(
"Failed to set storage policy since "
+ DFS_STORAGE_POLICY_ENABLED_KEY + " is set to false.");
}
FSPermissionChecker pc = fsd.getPermissionChecker();
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
INodesInPath iip;
fsd.writeLock();
try {
src = FSDirectory.resolvePath(src, pathComponents, fsd);
iip = fsd.getINodesInPath4Write(src);
if (fsd.isPermissionEnabled()) {
fsd.checkPathAccess(pc, iip, FsAction.WRITE);
}
// get the corresponding policy and make sure the policy name is valid
BlockStoragePolicy policy = bm.getStoragePolicy(policyName);
if (policy == null) {
throw new HadoopIllegalArgumentException(
"Cannot find a block policy with the name " + policyName);
}
unprotectedSetStoragePolicy(fsd, bm, iip, policy.getId());
fsd.getEditLog().logSetStoragePolicy(src, policy.getId());
} finally {
fsd.writeUnlock();
}
return fsd.getAuditFileInfo(iip);
}
static BlockStoragePolicy[] getStoragePolicies(BlockManager bm)
throws IOException {
return bm.getStoragePolicies();
}
static BlockStoragePolicy getStoragePolicy(FSDirectory fsd, BlockManager bm,
String path) throws IOException {
FSPermissionChecker pc = fsd.getPermissionChecker();
byte[][] pathComponents = FSDirectory
.getPathComponentsForReservedPath(path);
fsd.readLock();
try {
path = fsd.resolvePath(pc, path, pathComponents);
final INodesInPath iip = fsd.getINodesInPath(path, false);
if (fsd.isPermissionEnabled()) {
fsd.checkPathAccess(pc, iip, FsAction.READ);
}
INode inode = iip.getLastINode();
if (inode == null) {
throw new FileNotFoundException("File/Directory does not exist: "
+ iip.getPath());
}
return bm.getStoragePolicy(inode.getStoragePolicyID());
} finally {
fsd.readUnlock();
}
}
static long getPreferredBlockSize(FSDirectory fsd, String src)
throws IOException {
FSPermissionChecker pc = fsd.getPermissionChecker();
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
fsd.readLock();
try {
src = fsd.resolvePath(pc, src, pathComponents);
final INodesInPath iip = fsd.getINodesInPath(src, false);
if (fsd.isPermissionEnabled()) {
fsd.checkTraverse(pc, iip);
}
return INodeFile.valueOf(iip.getLastINode(), src)
.getPreferredBlockSize();
} finally {
fsd.readUnlock();
}
}
/**
* Set the namespace, storagespace and typespace quota for a directory.
*
* Note: This does not support ".inodes" relative path.
*/
static void setQuota(FSDirectory fsd, String src, long nsQuota, long ssQuota,
StorageType type) throws IOException {
if (fsd.isPermissionEnabled()) {
FSPermissionChecker pc = fsd.getPermissionChecker();
pc.checkSuperuserPrivilege();
}
fsd.writeLock();
try {
INodeDirectory changed = unprotectedSetQuota(fsd, src, nsQuota, ssQuota, type);
if (changed != null) {
final QuotaCounts q = changed.getQuotaCounts();
if (type == null) {
fsd.getEditLog().logSetQuota(src, q.getNameSpace(), q.getStorageSpace());
} else {
fsd.getEditLog().logSetQuotaByStorageType(
src, q.getTypeSpaces().get(type), type);
}
}
} finally {
fsd.writeUnlock();
}
}
static void unprotectedSetPermission(
FSDirectory fsd, String src, FsPermission permissions)
throws FileNotFoundException, UnresolvedLinkException,
QuotaExceededException, SnapshotAccessControlException {
assert fsd.hasWriteLock();
final INodesInPath inodesInPath = fsd.getINodesInPath4Write(src, true);
final INode inode = inodesInPath.getLastINode();
if (inode == null) {
throw new FileNotFoundException("File does not exist: " + src);
}
int snapshotId = inodesInPath.getLatestSnapshotId();
inode.setPermission(permissions, snapshotId);
}
static void unprotectedSetOwner(
FSDirectory fsd, String src, String username, String groupname)
throws FileNotFoundException, UnresolvedLinkException,
QuotaExceededException, SnapshotAccessControlException {
assert fsd.hasWriteLock();
final INodesInPath inodesInPath = fsd.getINodesInPath4Write(src, true);
INode inode = inodesInPath.getLastINode();
if (inode == null) {
throw new FileNotFoundException("File does not exist: " + src);
}
if (username != null) {
inode = inode.setUser(username, inodesInPath.getLatestSnapshotId());
}
if (groupname != null) {
inode.setGroup(groupname, inodesInPath.getLatestSnapshotId());
}
}
static boolean setTimes(
FSDirectory fsd, INode inode, long mtime, long atime, boolean force,
int latestSnapshotId) throws QuotaExceededException {
fsd.writeLock();
try {
return unprotectedSetTimes(fsd, inode, mtime, atime, force,
latestSnapshotId);
} finally {
fsd.writeUnlock();
}
}
static boolean unprotectedSetTimes(
FSDirectory fsd, String src, long mtime, long atime, boolean force)
throws UnresolvedLinkException, QuotaExceededException {
assert fsd.hasWriteLock();
final INodesInPath i = fsd.getINodesInPath(src, true);
return unprotectedSetTimes(fsd, i.getLastINode(), mtime, atime,
force, i.getLatestSnapshotId());
}
/**
* See {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#setQuota(String,
* long, long, StorageType)}
* for the contract.
* Sets quota for for a directory.
* @return INodeDirectory if any of the quotas have changed. null otherwise.
* @throws FileNotFoundException if the path does not exist.
* @throws PathIsNotDirectoryException if the path is not a directory.
* @throws QuotaExceededException if the directory tree size is
* greater than the given quota
* @throws UnresolvedLinkException if a symlink is encountered in src.
* @throws SnapshotAccessControlException if path is in RO snapshot
*/
static INodeDirectory unprotectedSetQuota(
FSDirectory fsd, String src, long nsQuota, long ssQuota, StorageType type)
throws FileNotFoundException, PathIsNotDirectoryException,
QuotaExceededException, UnresolvedLinkException,
SnapshotAccessControlException, UnsupportedActionException {
assert fsd.hasWriteLock();
// sanity check
if ((nsQuota < 0 && nsQuota != HdfsConstants.QUOTA_DONT_SET &&
nsQuota != HdfsConstants.QUOTA_RESET) ||
(ssQuota < 0 && ssQuota != HdfsConstants.QUOTA_DONT_SET &&
ssQuota != HdfsConstants.QUOTA_RESET)) {
throw new IllegalArgumentException("Illegal value for nsQuota or " +
"ssQuota : " + nsQuota + " and " +
ssQuota);
}
// sanity check for quota by storage type
if ((type != null) && (!fsd.isQuotaByStorageTypeEnabled() ||
nsQuota != HdfsConstants.QUOTA_DONT_SET)) {
throw new UnsupportedActionException(
"Failed to set quota by storage type because either" +
DFS_QUOTA_BY_STORAGETYPE_ENABLED_KEY + " is set to " +
fsd.isQuotaByStorageTypeEnabled() + " or nsQuota value is illegal " +
nsQuota);
}
String srcs = FSDirectory.normalizePath(src);
final INodesInPath iip = fsd.getINodesInPath4Write(srcs, true);
INodeDirectory dirNode = INodeDirectory.valueOf(iip.getLastINode(), srcs);
if (dirNode.isRoot() && nsQuota == HdfsConstants.QUOTA_RESET) {
throw new IllegalArgumentException("Cannot clear namespace quota on root.");
} else { // a directory inode
final QuotaCounts oldQuota = dirNode.getQuotaCounts();
final long oldNsQuota = oldQuota.getNameSpace();
final long oldSsQuota = oldQuota.getStorageSpace();
if (nsQuota == HdfsConstants.QUOTA_DONT_SET) {
nsQuota = oldNsQuota;
}
if (ssQuota == HdfsConstants.QUOTA_DONT_SET) {
ssQuota = oldSsQuota;
}
// unchanged space/namespace quota
if (type == null && oldNsQuota == nsQuota && oldSsQuota == ssQuota) {
return null;
}
// unchanged type quota
if (type != null) {
EnumCounters<StorageType> oldTypeQuotas = oldQuota.getTypeSpaces();
if (oldTypeQuotas != null && oldTypeQuotas.get(type) == ssQuota) {
return null;
}
}
final int latest = iip.getLatestSnapshotId();
dirNode.recordModification(latest);
dirNode.setQuota(fsd.getBlockStoragePolicySuite(), nsQuota, ssQuota, type);
return dirNode;
}
}
static BlockInfo[] unprotectedSetReplication(
FSDirectory fsd, String src, short replication, short[] blockRepls)
throws QuotaExceededException, UnresolvedLinkException,
SnapshotAccessControlException {
assert fsd.hasWriteLock();
final INodesInPath iip = fsd.getINodesInPath4Write(src, true);
final INode inode = iip.getLastINode();
if (inode == null || !inode.isFile()) {
return null;
}
INodeFile file = inode.asFile();
final short oldBR = file.getPreferredBlockReplication();
// before setFileReplication, check for increasing block replication.
// if replication > oldBR, then newBR == replication.
// if replication < oldBR, we don't know newBR yet.
if (replication > oldBR) {
long dsDelta = file.storagespaceConsumed(null).getStorageSpace() / oldBR;
fsd.updateCount(iip, 0L, dsDelta, oldBR, replication, true);
}
file.setFileReplication(replication, iip.getLatestSnapshotId());
final short newBR = file.getPreferredBlockReplication();
// check newBR < oldBR case.
if (newBR < oldBR) {
long dsDelta = file.storagespaceConsumed(null).getStorageSpace() / newBR;
fsd.updateCount(iip, 0L, dsDelta, oldBR, newBR, true);
}
if (blockRepls != null) {
blockRepls[0] = oldBR;
blockRepls[1] = newBR;
}
return file.getBlocks();
}
static void unprotectedSetStoragePolicy(
FSDirectory fsd, BlockManager bm, INodesInPath iip, byte policyId)
throws IOException {
assert fsd.hasWriteLock();
final INode inode = iip.getLastINode();
if (inode == null) {
throw new FileNotFoundException("File/Directory does not exist: "
+ iip.getPath());
}
final int snapshotId = iip.getLatestSnapshotId();
if (inode.isFile()) {
BlockStoragePolicy newPolicy = bm.getStoragePolicy(policyId);
if (newPolicy.isCopyOnCreateFile()) {
throw new HadoopIllegalArgumentException(
"Policy " + newPolicy + " cannot be set after file creation.");
}
BlockStoragePolicy currentPolicy =
bm.getStoragePolicy(inode.getLocalStoragePolicyID());
if (currentPolicy != null && currentPolicy.isCopyOnCreateFile()) {
throw new HadoopIllegalArgumentException(
"Existing policy " + currentPolicy.getName() +
" cannot be changed after file creation.");
}
inode.asFile().setStoragePolicyID(policyId, snapshotId);
} else if (inode.isDirectory()) {
setDirStoragePolicy(fsd, inode.asDirectory(), policyId, snapshotId);
} else {
throw new FileNotFoundException(iip.getPath()
+ " is not a file or directory");
}
}
private static void setDirStoragePolicy(
FSDirectory fsd, INodeDirectory inode, byte policyId,
int latestSnapshotId) throws IOException {
List<XAttr> existingXAttrs = XAttrStorage.readINodeXAttrs(inode);
XAttr xAttr = BlockStoragePolicySuite.buildXAttr(policyId);
List<XAttr> newXAttrs = FSDirXAttrOp.setINodeXAttrs(fsd, existingXAttrs,
Arrays.asList(xAttr),
EnumSet.of(
XAttrSetFlag.CREATE,
XAttrSetFlag.REPLACE));
XAttrStorage.updateINodeXAttrs(inode, newXAttrs, latestSnapshotId);
}
private static boolean unprotectedSetTimes(
FSDirectory fsd, INode inode, long mtime, long atime, boolean force,
int latest) throws QuotaExceededException {
assert fsd.hasWriteLock();
boolean status = false;
if (mtime != -1) {
inode = inode.setModificationTime(mtime, latest);
status = true;
}
if (atime != -1) {
long inodeTime = inode.getAccessTime();
// if the last access time update was within the last precision interval, then
// no need to store access time
if (atime <= inodeTime + fsd.getAccessTimePrecision() && !force) {
status = false;
} else {
inode.setAccessTime(atime, latest);
status = true;
}
}
return status;
}
}
| 19,667 | 37.564706 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.util.ExitUtil.terminate;
import static org.apache.hadoop.util.Time.monotonicNow;
import java.io.IOException;
import java.lang.reflect.Constructor;
import java.net.URI;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.hdfs.server.common.Storage.FormatConfirmable;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddBlockOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddCacheDirectiveInfoOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddCachePoolOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddCloseOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AllocateBlockIdOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AllowSnapshotOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AppendOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.CancelDelegationTokenOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.CloseOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ConcatDeleteOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.CreateSnapshotOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.DeleteOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.DeleteSnapshotOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.DisallowSnapshotOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.GetDelegationTokenOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.LogSegmentOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.MkdirOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ModifyCacheDirectiveInfoOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ModifyCachePoolOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.OpInstanceCache;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ReassignLeaseOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemoveCacheDirectiveInfoOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemoveCachePoolOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemoveXAttrOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOldOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameSnapshotOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenewDelegationTokenOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RollingUpgradeOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetAclOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetGenstampV1Op;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetGenstampV2Op;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetOwnerOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetPermissionsOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetQuotaOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetQuotaByStorageTypeOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetReplicationOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetStoragePolicyOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetXAttrOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SymlinkOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.TimesOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.TruncateOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateBlocksOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateMasterKeyOp;
import org.apache.hadoop.hdfs.server.namenode.JournalSet.JournalAndStream;
import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.security.token.delegation.DelegationKey;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
/**
* FSEditLog maintains a log of the namespace modifications.
*
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class FSEditLog implements LogsPurgeable {
static final Log LOG = LogFactory.getLog(FSEditLog.class);
/**
* State machine for edit log.
*
* In a non-HA setup:
*
* The log starts in UNITIALIZED state upon construction. Once it's
* initialized, it is usually in IN_SEGMENT state, indicating that edits may
* be written. In the middle of a roll, or while saving the namespace, it
* briefly enters the BETWEEN_LOG_SEGMENTS state, indicating that the previous
* segment has been closed, but the new one has not yet been opened.
*
* In an HA setup:
*
* The log starts in UNINITIALIZED state upon construction. Once it's
* initialized, it sits in the OPEN_FOR_READING state the entire time that the
* NN is in standby. Upon the NN transition to active, the log will be CLOSED,
* and then move to being BETWEEN_LOG_SEGMENTS, much as if the NN had just
* started up, and then will move to IN_SEGMENT so it can begin writing to the
* log. The log states will then revert to behaving as they do in a non-HA
* setup.
*/
private enum State {
UNINITIALIZED,
BETWEEN_LOG_SEGMENTS,
IN_SEGMENT,
OPEN_FOR_READING,
CLOSED;
}
private State state = State.UNINITIALIZED;
//initialize
private JournalSet journalSet = null;
private EditLogOutputStream editLogStream = null;
// a monotonically increasing counter that represents transactionIds.
private long txid = 0;
// stores the last synced transactionId.
private long synctxid = 0;
// the first txid of the log that's currently open for writing.
// If this value is N, we are currently writing to edits_inprogress_N
private long curSegmentTxId = HdfsServerConstants.INVALID_TXID;
// the time of printing the statistics to the log file.
private long lastPrintTime;
// is a sync currently running?
private volatile boolean isSyncRunning;
// is an automatic sync scheduled?
private volatile boolean isAutoSyncScheduled = false;
// these are statistics counters.
private long numTransactions; // number of transactions
private long numTransactionsBatchedInSync;
private long totalTimeTransactions; // total time for all transactions
private NameNodeMetrics metrics;
private final NNStorage storage;
private final Configuration conf;
private final List<URI> editsDirs;
private final ThreadLocal<OpInstanceCache> cache =
new ThreadLocal<OpInstanceCache>() {
@Override
protected OpInstanceCache initialValue() {
return new OpInstanceCache();
}
};
/**
* The edit directories that are shared between primary and secondary.
*/
private final List<URI> sharedEditsDirs;
/**
* Take this lock when adding journals to or closing the JournalSet. Allows
* us to ensure that the JournalSet isn't closed or updated underneath us
* in selectInputStreams().
*/
private final Object journalSetLock = new Object();
private static class TransactionId {
public long txid;
TransactionId(long value) {
this.txid = value;
}
}
// stores the most current transactionId of this thread.
private static final ThreadLocal<TransactionId> myTransactionId = new ThreadLocal<TransactionId>() {
@Override
protected synchronized TransactionId initialValue() {
return new TransactionId(Long.MAX_VALUE);
}
};
/**
* Constructor for FSEditLog. Underlying journals are constructed, but
* no streams are opened until open() is called.
*
* @param conf The namenode configuration
* @param storage Storage object used by namenode
* @param editsDirs List of journals to use
*/
FSEditLog(Configuration conf, NNStorage storage, List<URI> editsDirs) {
isSyncRunning = false;
this.conf = conf;
this.storage = storage;
metrics = NameNode.getNameNodeMetrics();
lastPrintTime = monotonicNow();
// If this list is empty, an error will be thrown on first use
// of the editlog, as no journals will exist
this.editsDirs = Lists.newArrayList(editsDirs);
this.sharedEditsDirs = FSNamesystem.getSharedEditsDirs(conf);
}
public synchronized void initJournalsForWrite() {
Preconditions.checkState(state == State.UNINITIALIZED ||
state == State.CLOSED, "Unexpected state: %s", state);
initJournals(this.editsDirs);
state = State.BETWEEN_LOG_SEGMENTS;
}
public synchronized void initSharedJournalsForRead() {
if (state == State.OPEN_FOR_READING) {
LOG.warn("Initializing shared journals for READ, already open for READ",
new Exception());
return;
}
Preconditions.checkState(state == State.UNINITIALIZED ||
state == State.CLOSED);
initJournals(this.sharedEditsDirs);
state = State.OPEN_FOR_READING;
}
private synchronized void initJournals(List<URI> dirs) {
int minimumRedundantJournals = conf.getInt(
DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_MINIMUM_KEY,
DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_MINIMUM_DEFAULT);
synchronized(journalSetLock) {
journalSet = new JournalSet(minimumRedundantJournals);
for (URI u : dirs) {
boolean required = FSNamesystem.getRequiredNamespaceEditsDirs(conf)
.contains(u);
if (u.getScheme().equals(NNStorage.LOCAL_URI_SCHEME)) {
StorageDirectory sd = storage.getStorageDirectory(u);
if (sd != null) {
journalSet.add(new FileJournalManager(conf, sd, storage),
required, sharedEditsDirs.contains(u));
}
} else {
journalSet.add(createJournal(u), required,
sharedEditsDirs.contains(u));
}
}
}
if (journalSet.isEmpty()) {
LOG.error("No edits directories configured!");
}
}
/**
* Get the list of URIs the editlog is using for storage
* @return collection of URIs in use by the edit log
*/
Collection<URI> getEditURIs() {
return editsDirs;
}
/**
* Initialize the output stream for logging, opening the first
* log segment.
*/
synchronized void openForWrite(int layoutVersion) throws IOException {
Preconditions.checkState(state == State.BETWEEN_LOG_SEGMENTS,
"Bad state: %s", state);
long segmentTxId = getLastWrittenTxId() + 1;
// Safety check: we should never start a segment if there are
// newer txids readable.
List<EditLogInputStream> streams = new ArrayList<EditLogInputStream>();
journalSet.selectInputStreams(streams, segmentTxId, true);
if (!streams.isEmpty()) {
String error = String.format("Cannot start writing at txid %s " +
"when there is a stream available for read: %s",
segmentTxId, streams.get(0));
IOUtils.cleanup(LOG, streams.toArray(new EditLogInputStream[0]));
throw new IllegalStateException(error);
}
startLogSegment(segmentTxId, true, layoutVersion);
assert state == State.IN_SEGMENT : "Bad state: " + state;
}
/**
* @return true if the log is currently open in write mode, regardless
* of whether it actually has an open segment.
*/
synchronized boolean isOpenForWrite() {
return state == State.IN_SEGMENT ||
state == State.BETWEEN_LOG_SEGMENTS;
}
/**
* @return true if the log is open in write mode and has a segment open
* ready to take edits.
*/
synchronized boolean isSegmentOpen() {
return state == State.IN_SEGMENT;
}
/**
* @return true if the log is open in read mode.
*/
public synchronized boolean isOpenForRead() {
return state == State.OPEN_FOR_READING;
}
/**
* Shutdown the file store.
*/
synchronized void close() {
if (state == State.CLOSED) {
LOG.debug("Closing log when already closed");
return;
}
try {
if (state == State.IN_SEGMENT) {
assert editLogStream != null;
waitForSyncToFinish();
endCurrentLogSegment(true);
}
} finally {
if (journalSet != null && !journalSet.isEmpty()) {
try {
synchronized(journalSetLock) {
journalSet.close();
}
} catch (IOException ioe) {
LOG.warn("Error closing journalSet", ioe);
}
}
state = State.CLOSED;
}
}
/**
* Format all configured journals which are not file-based.
*
* File-based journals are skipped, since they are formatted by the
* Storage format code.
*/
synchronized void formatNonFileJournals(NamespaceInfo nsInfo) throws IOException {
Preconditions.checkState(state == State.BETWEEN_LOG_SEGMENTS,
"Bad state: %s", state);
for (JournalManager jm : journalSet.getJournalManagers()) {
if (!(jm instanceof FileJournalManager)) {
jm.format(nsInfo);
}
}
}
synchronized List<FormatConfirmable> getFormatConfirmables() {
Preconditions.checkState(state == State.BETWEEN_LOG_SEGMENTS,
"Bad state: %s", state);
List<FormatConfirmable> ret = Lists.newArrayList();
for (final JournalManager jm : journalSet.getJournalManagers()) {
// The FJMs are confirmed separately since they are also
// StorageDirectories
if (!(jm instanceof FileJournalManager)) {
ret.add(jm);
}
}
return ret;
}
/**
* Write an operation to the edit log.
* <p/>
* Additionally, this will sync the edit log if required by the underlying
* edit stream's automatic sync policy (e.g. when the buffer is full, or
* if a time interval has elapsed).
*/
void logEdit(final FSEditLogOp op) {
boolean needsSync = false;
synchronized (this) {
assert isOpenForWrite() :
"bad state: " + state;
// wait if an automatic sync is scheduled
waitIfAutoSyncScheduled();
long start = beginTransaction();
op.setTransactionId(txid);
try {
editLogStream.write(op);
} catch (IOException ex) {
// All journals failed, it is handled in logSync.
} finally {
op.reset();
}
endTransaction(start);
// check if it is time to schedule an automatic sync
needsSync = shouldForceSync();
if (needsSync) {
isAutoSyncScheduled = true;
}
}
// Sync the log if an automatic sync is required.
if (needsSync) {
logSync();
}
}
/**
* Wait if an automatic sync is scheduled
*/
synchronized void waitIfAutoSyncScheduled() {
try {
while (isAutoSyncScheduled) {
this.wait(1000);
}
} catch (InterruptedException e) {
}
}
/**
* Signal that an automatic sync scheduling is done if it is scheduled
*/
synchronized void doneWithAutoSyncScheduling() {
if (isAutoSyncScheduled) {
isAutoSyncScheduled = false;
notifyAll();
}
}
/**
* Check if should automatically sync buffered edits to
* persistent store
*
* @return true if any of the edit stream says that it should sync
*/
private boolean shouldForceSync() {
return editLogStream.shouldForceSync();
}
private long beginTransaction() {
assert Thread.holdsLock(this);
// get a new transactionId
txid++;
//
// record the transactionId when new data was written to the edits log
//
TransactionId id = myTransactionId.get();
id.txid = txid;
return monotonicNow();
}
private void endTransaction(long start) {
assert Thread.holdsLock(this);
// update statistics
long end = monotonicNow();
numTransactions++;
totalTimeTransactions += (end-start);
if (metrics != null) // Metrics is non-null only when used inside name node
metrics.addTransaction(end-start);
}
/**
* Return the transaction ID of the last transaction written to the log.
*/
public synchronized long getLastWrittenTxId() {
return txid;
}
/**
* @return the first transaction ID in the current log segment
*/
synchronized long getCurSegmentTxId() {
Preconditions.checkState(isSegmentOpen(),
"Bad state: %s", state);
return curSegmentTxId;
}
/**
* Set the transaction ID to use for the next transaction written.
*/
synchronized void setNextTxId(long nextTxId) {
Preconditions.checkArgument(synctxid <= txid &&
nextTxId >= txid,
"May not decrease txid." +
" synctxid=%s txid=%s nextTxId=%s",
synctxid, txid, nextTxId);
txid = nextTxId - 1;
}
/**
* Blocks until all ongoing edits have been synced to disk.
* This differs from logSync in that it waits for edits that have been
* written by other threads, not just edits from the calling thread.
*
* NOTE: this should be done while holding the FSNamesystem lock, or
* else more operations can start writing while this is in progress.
*/
void logSyncAll() {
// Record the most recent transaction ID as our own id
synchronized (this) {
TransactionId id = myTransactionId.get();
id.txid = txid;
}
// Then make sure we're synced up to this point
logSync();
}
/**
* Sync all modifications done by this thread.
*
* The internal concurrency design of this class is as follows:
* - Log items are written synchronized into an in-memory buffer,
* and each assigned a transaction ID.
* - When a thread (client) would like to sync all of its edits, logSync()
* uses a ThreadLocal transaction ID to determine what edit number must
* be synced to.
* - The isSyncRunning volatile boolean tracks whether a sync is currently
* under progress.
*
* The data is double-buffered within each edit log implementation so that
* in-memory writing can occur in parallel with the on-disk writing.
*
* Each sync occurs in three steps:
* 1. synchronized, it swaps the double buffer and sets the isSyncRunning
* flag.
* 2. unsynchronized, it flushes the data to storage
* 3. synchronized, it resets the flag and notifies anyone waiting on the
* sync.
*
* The lack of synchronization on step 2 allows other threads to continue
* to write into the memory buffer while the sync is in progress.
* Because this step is unsynchronized, actions that need to avoid
* concurrency with sync() should be synchronized and also call
* waitForSyncToFinish() before assuming they are running alone.
*/
public void logSync() {
long syncStart = 0;
// Fetch the transactionId of this thread.
long mytxid = myTransactionId.get().txid;
boolean sync = false;
try {
EditLogOutputStream logStream = null;
synchronized (this) {
try {
printStatistics(false);
// if somebody is already syncing, then wait
while (mytxid > synctxid && isSyncRunning) {
try {
wait(1000);
} catch (InterruptedException ie) {
}
}
//
// If this transaction was already flushed, then nothing to do
//
if (mytxid <= synctxid) {
numTransactionsBatchedInSync++;
if (metrics != null) {
// Metrics is non-null only when used inside name node
metrics.incrTransactionsBatchedInSync();
}
return;
}
// now, this thread will do the sync
syncStart = txid;
isSyncRunning = true;
sync = true;
// swap buffers
try {
if (journalSet.isEmpty()) {
throw new IOException("No journals available to flush");
}
editLogStream.setReadyToFlush();
} catch (IOException e) {
final String msg =
"Could not sync enough journals to persistent storage " +
"due to " + e.getMessage() + ". " +
"Unsynced transactions: " + (txid - synctxid);
LOG.fatal(msg, new Exception());
synchronized(journalSetLock) {
IOUtils.cleanup(LOG, journalSet);
}
terminate(1, msg);
}
} finally {
// Prevent RuntimeException from blocking other log edit write
doneWithAutoSyncScheduling();
}
//editLogStream may become null,
//so store a local variable for flush.
logStream = editLogStream;
}
// do the sync
long start = monotonicNow();
try {
if (logStream != null) {
logStream.flush();
}
} catch (IOException ex) {
synchronized (this) {
final String msg =
"Could not sync enough journals to persistent storage. "
+ "Unsynced transactions: " + (txid - synctxid);
LOG.fatal(msg, new Exception());
synchronized(journalSetLock) {
IOUtils.cleanup(LOG, journalSet);
}
terminate(1, msg);
}
}
long elapsed = monotonicNow() - start;
if (metrics != null) { // Metrics non-null only when used inside name node
metrics.addSync(elapsed);
}
} finally {
// Prevent RuntimeException from blocking other log edit sync
synchronized (this) {
if (sync) {
synctxid = syncStart;
isSyncRunning = false;
}
this.notifyAll();
}
}
}
//
// print statistics every 1 minute.
//
private void printStatistics(boolean force) {
long now = monotonicNow();
if (lastPrintTime + 60000 > now && !force) {
return;
}
lastPrintTime = now;
StringBuilder buf = new StringBuilder();
buf.append("Number of transactions: ");
buf.append(numTransactions);
buf.append(" Total time for transactions(ms): ");
buf.append(totalTimeTransactions);
buf.append(" Number of transactions batched in Syncs: ");
buf.append(numTransactionsBatchedInSync);
buf.append(" Number of syncs: ");
buf.append(editLogStream.getNumSync());
buf.append(" SyncTimes(ms): ");
buf.append(journalSet.getSyncTimes());
LOG.info(buf);
}
/** Record the RPC IDs if necessary */
private void logRpcIds(FSEditLogOp op, boolean toLogRpcIds) {
if (toLogRpcIds) {
op.setRpcClientId(Server.getClientId());
op.setRpcCallId(Server.getCallId());
}
}
public void logAppendFile(String path, INodeFile file, boolean newBlock,
boolean toLogRpcIds) {
FileUnderConstructionFeature uc = file.getFileUnderConstructionFeature();
assert uc != null;
AppendOp op = AppendOp.getInstance(cache.get()).setPath(path)
.setClientName(uc.getClientName())
.setClientMachine(uc.getClientMachine())
.setNewBlock(newBlock);
logRpcIds(op, toLogRpcIds);
logEdit(op);
}
/**
* Add open lease record to edit log.
* Records the block locations of the last block.
*/
public void logOpenFile(String path, INodeFile newNode, boolean overwrite,
boolean toLogRpcIds) {
Preconditions.checkArgument(newNode.isUnderConstruction());
PermissionStatus permissions = newNode.getPermissionStatus();
AddOp op = AddOp.getInstance(cache.get())
.setInodeId(newNode.getId())
.setPath(path)
.setReplication(newNode.getFileReplication())
.setModificationTime(newNode.getModificationTime())
.setAccessTime(newNode.getAccessTime())
.setBlockSize(newNode.getPreferredBlockSize())
.setBlocks(newNode.getBlocks())
.setPermissionStatus(permissions)
.setClientName(newNode.getFileUnderConstructionFeature().getClientName())
.setClientMachine(
newNode.getFileUnderConstructionFeature().getClientMachine())
.setOverwrite(overwrite)
.setStoragePolicyId(newNode.getLocalStoragePolicyID());
AclFeature f = newNode.getAclFeature();
if (f != null) {
op.setAclEntries(AclStorage.readINodeLogicalAcl(newNode));
}
XAttrFeature x = newNode.getXAttrFeature();
if (x != null) {
op.setXAttrs(x.getXAttrs());
}
logRpcIds(op, toLogRpcIds);
logEdit(op);
}
/**
* Add close lease record to edit log.
*/
public void logCloseFile(String path, INodeFile newNode) {
CloseOp op = CloseOp.getInstance(cache.get())
.setPath(path)
.setReplication(newNode.getFileReplication())
.setModificationTime(newNode.getModificationTime())
.setAccessTime(newNode.getAccessTime())
.setBlockSize(newNode.getPreferredBlockSize())
.setBlocks(newNode.getBlocks())
.setPermissionStatus(newNode.getPermissionStatus());
logEdit(op);
}
public void logAddBlock(String path, INodeFile file) {
Preconditions.checkArgument(file.isUnderConstruction());
BlockInfo[] blocks = file.getBlocks();
Preconditions.checkState(blocks != null && blocks.length > 0);
BlockInfo pBlock = blocks.length > 1 ? blocks[blocks.length - 2] : null;
BlockInfo lastBlock = blocks[blocks.length - 1];
AddBlockOp op = AddBlockOp.getInstance(cache.get()).setPath(path)
.setPenultimateBlock(pBlock).setLastBlock(lastBlock);
logEdit(op);
}
public void logUpdateBlocks(String path, INodeFile file, boolean toLogRpcIds) {
Preconditions.checkArgument(file.isUnderConstruction());
UpdateBlocksOp op = UpdateBlocksOp.getInstance(cache.get())
.setPath(path)
.setBlocks(file.getBlocks());
logRpcIds(op, toLogRpcIds);
logEdit(op);
}
/**
* Add create directory record to edit log
*/
public void logMkDir(String path, INode newNode) {
PermissionStatus permissions = newNode.getPermissionStatus();
MkdirOp op = MkdirOp.getInstance(cache.get())
.setInodeId(newNode.getId())
.setPath(path)
.setTimestamp(newNode.getModificationTime())
.setPermissionStatus(permissions);
AclFeature f = newNode.getAclFeature();
if (f != null) {
op.setAclEntries(AclStorage.readINodeLogicalAcl(newNode));
}
XAttrFeature x = newNode.getXAttrFeature();
if (x != null) {
op.setXAttrs(x.getXAttrs());
}
logEdit(op);
}
/**
* Add rename record to edit log.
*
* The destination should be the file name, not the destination directory.
* TODO: use String parameters until just before writing to disk
*/
void logRename(String src, String dst, long timestamp, boolean toLogRpcIds) {
RenameOldOp op = RenameOldOp.getInstance(cache.get())
.setSource(src)
.setDestination(dst)
.setTimestamp(timestamp);
logRpcIds(op, toLogRpcIds);
logEdit(op);
}
/**
* Add rename record to edit log.
*
* The destination should be the file name, not the destination directory.
*/
void logRename(String src, String dst, long timestamp, boolean toLogRpcIds,
Options.Rename... options) {
RenameOp op = RenameOp.getInstance(cache.get())
.setSource(src)
.setDestination(dst)
.setTimestamp(timestamp)
.setOptions(options);
logRpcIds(op, toLogRpcIds);
logEdit(op);
}
/**
* Add set replication record to edit log
*/
void logSetReplication(String src, short replication) {
SetReplicationOp op = SetReplicationOp.getInstance(cache.get())
.setPath(src)
.setReplication(replication);
logEdit(op);
}
/**
* Add set storage policy id record to edit log
*/
void logSetStoragePolicy(String src, byte policyId) {
SetStoragePolicyOp op = SetStoragePolicyOp.getInstance(cache.get())
.setPath(src).setPolicyId(policyId);
logEdit(op);
}
/** Add set namespace quota record to edit log
*
* @param src the string representation of the path to a directory
* @param nsQuota namespace quota
* @param dsQuota diskspace quota
*/
void logSetQuota(String src, long nsQuota, long dsQuota) {
SetQuotaOp op = SetQuotaOp.getInstance(cache.get())
.setSource(src)
.setNSQuota(nsQuota)
.setDSQuota(dsQuota);
logEdit(op);
}
/** Add set quota by storage type record to edit log */
void logSetQuotaByStorageType(String src, long dsQuota, StorageType type) {
SetQuotaByStorageTypeOp op = SetQuotaByStorageTypeOp.getInstance(cache.get())
.setSource(src)
.setQuotaByStorageType(dsQuota, type);
logEdit(op);
}
/** Add set permissions record to edit log */
void logSetPermissions(String src, FsPermission permissions) {
SetPermissionsOp op = SetPermissionsOp.getInstance(cache.get())
.setSource(src)
.setPermissions(permissions);
logEdit(op);
}
/** Add set owner record to edit log */
void logSetOwner(String src, String username, String groupname) {
SetOwnerOp op = SetOwnerOp.getInstance(cache.get())
.setSource(src)
.setUser(username)
.setGroup(groupname);
logEdit(op);
}
/**
* concat(trg,src..) log
*/
void logConcat(String trg, String[] srcs, long timestamp, boolean toLogRpcIds) {
ConcatDeleteOp op = ConcatDeleteOp.getInstance(cache.get())
.setTarget(trg)
.setSources(srcs)
.setTimestamp(timestamp);
logRpcIds(op, toLogRpcIds);
logEdit(op);
}
/**
* Add delete file record to edit log
*/
void logDelete(String src, long timestamp, boolean toLogRpcIds) {
DeleteOp op = DeleteOp.getInstance(cache.get())
.setPath(src)
.setTimestamp(timestamp);
logRpcIds(op, toLogRpcIds);
logEdit(op);
}
/**
* Add truncate file record to edit log
*/
void logTruncate(String src, String clientName, String clientMachine,
long size, long timestamp, Block truncateBlock) {
TruncateOp op = TruncateOp.getInstance(cache.get())
.setPath(src)
.setClientName(clientName)
.setClientMachine(clientMachine)
.setNewLength(size)
.setTimestamp(timestamp)
.setTruncateBlock(truncateBlock);
logEdit(op);
}
/**
* Add legacy block generation stamp record to edit log
*/
void logGenerationStampV1(long genstamp) {
SetGenstampV1Op op = SetGenstampV1Op.getInstance(cache.get())
.setGenerationStamp(genstamp);
logEdit(op);
}
/**
* Add generation stamp record to edit log
*/
void logGenerationStampV2(long genstamp) {
SetGenstampV2Op op = SetGenstampV2Op.getInstance(cache.get())
.setGenerationStamp(genstamp);
logEdit(op);
}
/**
* Record a newly allocated block ID in the edit log
*/
void logAllocateBlockId(long blockId) {
AllocateBlockIdOp op = AllocateBlockIdOp.getInstance(cache.get())
.setBlockId(blockId);
logEdit(op);
}
/**
* Add access time record to edit log
*/
void logTimes(String src, long mtime, long atime) {
TimesOp op = TimesOp.getInstance(cache.get())
.setPath(src)
.setModificationTime(mtime)
.setAccessTime(atime);
logEdit(op);
}
/**
* Add a create symlink record.
*/
void logSymlink(String path, String value, long mtime, long atime,
INodeSymlink node, boolean toLogRpcIds) {
SymlinkOp op = SymlinkOp.getInstance(cache.get())
.setId(node.getId())
.setPath(path)
.setValue(value)
.setModificationTime(mtime)
.setAccessTime(atime)
.setPermissionStatus(node.getPermissionStatus());
logRpcIds(op, toLogRpcIds);
logEdit(op);
}
/**
* log delegation token to edit log
* @param id DelegationTokenIdentifier
* @param expiryTime of the token
*/
void logGetDelegationToken(DelegationTokenIdentifier id,
long expiryTime) {
GetDelegationTokenOp op = GetDelegationTokenOp.getInstance(cache.get())
.setDelegationTokenIdentifier(id)
.setExpiryTime(expiryTime);
logEdit(op);
}
void logRenewDelegationToken(DelegationTokenIdentifier id,
long expiryTime) {
RenewDelegationTokenOp op = RenewDelegationTokenOp.getInstance(cache.get())
.setDelegationTokenIdentifier(id)
.setExpiryTime(expiryTime);
logEdit(op);
}
void logCancelDelegationToken(DelegationTokenIdentifier id) {
CancelDelegationTokenOp op = CancelDelegationTokenOp.getInstance(cache.get())
.setDelegationTokenIdentifier(id);
logEdit(op);
}
void logUpdateMasterKey(DelegationKey key) {
UpdateMasterKeyOp op = UpdateMasterKeyOp.getInstance(cache.get())
.setDelegationKey(key);
logEdit(op);
}
void logReassignLease(String leaseHolder, String src, String newHolder) {
ReassignLeaseOp op = ReassignLeaseOp.getInstance(cache.get())
.setLeaseHolder(leaseHolder)
.setPath(src)
.setNewHolder(newHolder);
logEdit(op);
}
void logCreateSnapshot(String snapRoot, String snapName, boolean toLogRpcIds) {
CreateSnapshotOp op = CreateSnapshotOp.getInstance(cache.get())
.setSnapshotRoot(snapRoot).setSnapshotName(snapName);
logRpcIds(op, toLogRpcIds);
logEdit(op);
}
void logDeleteSnapshot(String snapRoot, String snapName, boolean toLogRpcIds) {
DeleteSnapshotOp op = DeleteSnapshotOp.getInstance(cache.get())
.setSnapshotRoot(snapRoot).setSnapshotName(snapName);
logRpcIds(op, toLogRpcIds);
logEdit(op);
}
void logRenameSnapshot(String path, String snapOldName, String snapNewName,
boolean toLogRpcIds) {
RenameSnapshotOp op = RenameSnapshotOp.getInstance(cache.get())
.setSnapshotRoot(path).setSnapshotOldName(snapOldName)
.setSnapshotNewName(snapNewName);
logRpcIds(op, toLogRpcIds);
logEdit(op);
}
void logAllowSnapshot(String path) {
AllowSnapshotOp op = AllowSnapshotOp.getInstance(cache.get())
.setSnapshotRoot(path);
logEdit(op);
}
void logDisallowSnapshot(String path) {
DisallowSnapshotOp op = DisallowSnapshotOp.getInstance(cache.get())
.setSnapshotRoot(path);
logEdit(op);
}
/**
* Log a CacheDirectiveInfo returned from
* {@link CacheManager#addDirective(CacheDirectiveInfo, FSPermissionChecker)}
*/
void logAddCacheDirectiveInfo(CacheDirectiveInfo directive,
boolean toLogRpcIds) {
AddCacheDirectiveInfoOp op =
AddCacheDirectiveInfoOp.getInstance(cache.get())
.setDirective(directive);
logRpcIds(op, toLogRpcIds);
logEdit(op);
}
void logModifyCacheDirectiveInfo(
CacheDirectiveInfo directive, boolean toLogRpcIds) {
ModifyCacheDirectiveInfoOp op =
ModifyCacheDirectiveInfoOp.getInstance(
cache.get()).setDirective(directive);
logRpcIds(op, toLogRpcIds);
logEdit(op);
}
void logRemoveCacheDirectiveInfo(Long id, boolean toLogRpcIds) {
RemoveCacheDirectiveInfoOp op =
RemoveCacheDirectiveInfoOp.getInstance(cache.get()).setId(id);
logRpcIds(op, toLogRpcIds);
logEdit(op);
}
void logAddCachePool(CachePoolInfo pool, boolean toLogRpcIds) {
AddCachePoolOp op =
AddCachePoolOp.getInstance(cache.get()).setPool(pool);
logRpcIds(op, toLogRpcIds);
logEdit(op);
}
void logModifyCachePool(CachePoolInfo info, boolean toLogRpcIds) {
ModifyCachePoolOp op =
ModifyCachePoolOp.getInstance(cache.get()).setInfo(info);
logRpcIds(op, toLogRpcIds);
logEdit(op);
}
void logRemoveCachePool(String poolName, boolean toLogRpcIds) {
RemoveCachePoolOp op =
RemoveCachePoolOp.getInstance(cache.get()).setPoolName(poolName);
logRpcIds(op, toLogRpcIds);
logEdit(op);
}
void logStartRollingUpgrade(long startTime) {
RollingUpgradeOp op = RollingUpgradeOp.getStartInstance(cache.get());
op.setTime(startTime);
logEdit(op);
}
void logFinalizeRollingUpgrade(long finalizeTime) {
RollingUpgradeOp op = RollingUpgradeOp.getFinalizeInstance(cache.get());
op.setTime(finalizeTime);
logEdit(op);
}
void logSetAcl(String src, List<AclEntry> entries) {
SetAclOp op = SetAclOp.getInstance();
op.src = src;
op.aclEntries = entries;
logEdit(op);
}
void logSetXAttrs(String src, List<XAttr> xAttrs, boolean toLogRpcIds) {
final SetXAttrOp op = SetXAttrOp.getInstance();
op.src = src;
op.xAttrs = xAttrs;
logRpcIds(op, toLogRpcIds);
logEdit(op);
}
void logRemoveXAttrs(String src, List<XAttr> xAttrs, boolean toLogRpcIds) {
final RemoveXAttrOp op = RemoveXAttrOp.getInstance();
op.src = src;
op.xAttrs = xAttrs;
logRpcIds(op, toLogRpcIds);
logEdit(op);
}
/**
* Get all the journals this edit log is currently operating on.
*/
synchronized List<JournalAndStream> getJournals() {
return journalSet.getAllJournalStreams();
}
/**
* Used only by tests.
*/
@VisibleForTesting
synchronized public JournalSet getJournalSet() {
return journalSet;
}
@VisibleForTesting
synchronized void setJournalSetForTesting(JournalSet js) {
this.journalSet = js;
}
/**
* Used only by tests.
*/
@VisibleForTesting
void setMetricsForTests(NameNodeMetrics metrics) {
this.metrics = metrics;
}
/**
* Return a manifest of what finalized edit logs are available
*/
public synchronized RemoteEditLogManifest getEditLogManifest(long fromTxId)
throws IOException {
return journalSet.getEditLogManifest(fromTxId);
}
/**
* Finalizes the current edit log and opens a new log segment.
*
* @param layoutVersion The layout version of the new edit log segment.
* @return the transaction id of the BEGIN_LOG_SEGMENT transaction in the new
* log.
*/
synchronized long rollEditLog(int layoutVersion) throws IOException {
LOG.info("Rolling edit logs");
endCurrentLogSegment(true);
long nextTxId = getLastWrittenTxId() + 1;
startLogSegment(nextTxId, true, layoutVersion);
assert curSegmentTxId == nextTxId;
return nextTxId;
}
/**
* Start writing to the log segment with the given txid.
* Transitions from BETWEEN_LOG_SEGMENTS state to IN_LOG_SEGMENT state.
*/
synchronized void startLogSegment(final long segmentTxId,
boolean writeHeaderTxn, int layoutVersion) throws IOException {
LOG.info("Starting log segment at " + segmentTxId);
Preconditions.checkArgument(segmentTxId > 0,
"Bad txid: %s", segmentTxId);
Preconditions.checkState(state == State.BETWEEN_LOG_SEGMENTS,
"Bad state: %s", state);
Preconditions.checkState(segmentTxId > curSegmentTxId,
"Cannot start writing to log segment " + segmentTxId +
" when previous log segment started at " + curSegmentTxId);
Preconditions.checkArgument(segmentTxId == txid + 1,
"Cannot start log segment at txid %s when next expected " +
"txid is %s", segmentTxId, txid + 1);
numTransactions = totalTimeTransactions = numTransactionsBatchedInSync = 0;
// TODO no need to link this back to storage anymore!
// See HDFS-2174.
storage.attemptRestoreRemovedStorage();
try {
editLogStream = journalSet.startLogSegment(segmentTxId, layoutVersion);
} catch (IOException ex) {
throw new IOException("Unable to start log segment " +
segmentTxId + ": too few journals successfully started.", ex);
}
curSegmentTxId = segmentTxId;
state = State.IN_SEGMENT;
if (writeHeaderTxn) {
logEdit(LogSegmentOp.getInstance(cache.get(),
FSEditLogOpCodes.OP_START_LOG_SEGMENT));
logSync();
}
}
/**
* Finalize the current log segment.
* Transitions from IN_SEGMENT state to BETWEEN_LOG_SEGMENTS state.
*/
public synchronized void endCurrentLogSegment(boolean writeEndTxn) {
LOG.info("Ending log segment " + curSegmentTxId);
Preconditions.checkState(isSegmentOpen(),
"Bad state: %s", state);
if (writeEndTxn) {
logEdit(LogSegmentOp.getInstance(cache.get(),
FSEditLogOpCodes.OP_END_LOG_SEGMENT));
logSync();
}
printStatistics(true);
final long lastTxId = getLastWrittenTxId();
try {
journalSet.finalizeLogSegment(curSegmentTxId, lastTxId);
editLogStream = null;
} catch (IOException e) {
//All journals have failed, it will be handled in logSync.
}
state = State.BETWEEN_LOG_SEGMENTS;
}
/**
* Abort all current logs. Called from the backup node.
*/
synchronized void abortCurrentLogSegment() {
try {
//Check for null, as abort can be called any time.
if (editLogStream != null) {
editLogStream.abort();
editLogStream = null;
state = State.BETWEEN_LOG_SEGMENTS;
}
} catch (IOException e) {
LOG.warn("All journals failed to abort", e);
}
}
/**
* Archive any log files that are older than the given txid.
*
* If the edit log is not open for write, then this call returns with no
* effect.
*/
@Override
public synchronized void purgeLogsOlderThan(final long minTxIdToKeep) {
// Should not purge logs unless they are open for write.
// This prevents the SBN from purging logs on shared storage, for example.
if (!isOpenForWrite()) {
return;
}
assert curSegmentTxId == HdfsServerConstants.INVALID_TXID || // on format this is no-op
minTxIdToKeep <= curSegmentTxId :
"cannot purge logs older than txid " + minTxIdToKeep +
" when current segment starts at " + curSegmentTxId;
if (minTxIdToKeep == 0) {
return;
}
// This could be improved to not need synchronization. But currently,
// journalSet is not threadsafe, so we need to synchronize this method.
try {
journalSet.purgeLogsOlderThan(minTxIdToKeep);
} catch (IOException ex) {
//All journals have failed, it will be handled in logSync.
}
}
/**
* The actual sync activity happens while not synchronized on this object.
* Thus, synchronized activities that require that they are not concurrent
* with file operations should wait for any running sync to finish.
*/
synchronized void waitForSyncToFinish() {
while (isSyncRunning) {
try {
wait(1000);
} catch (InterruptedException ie) {}
}
}
/**
* Return the txid of the last synced transaction.
*/
public synchronized long getSyncTxId() {
return synctxid;
}
// sets the initial capacity of the flush buffer.
synchronized void setOutputBufferCapacity(int size) {
journalSet.setOutputBufferCapacity(size);
}
/**
* Create (or find if already exists) an edit output stream, which
* streams journal records (edits) to the specified backup node.<br>
*
* The new BackupNode will start receiving edits the next time this
* NameNode's logs roll.
*
* @param bnReg the backup node registration information.
* @param nnReg this (active) name-node registration.
* @throws IOException
*/
synchronized void registerBackupNode(
NamenodeRegistration bnReg, // backup node
NamenodeRegistration nnReg) // active name-node
throws IOException {
if(bnReg.isRole(NamenodeRole.CHECKPOINT))
return; // checkpoint node does not stream edits
JournalManager jas = findBackupJournal(bnReg);
if (jas != null) {
// already registered
LOG.info("Backup node " + bnReg + " re-registers");
return;
}
LOG.info("Registering new backup node: " + bnReg);
BackupJournalManager bjm = new BackupJournalManager(bnReg, nnReg);
synchronized(journalSetLock) {
journalSet.add(bjm, false);
}
}
synchronized void releaseBackupStream(NamenodeRegistration registration)
throws IOException {
BackupJournalManager bjm = this.findBackupJournal(registration);
if (bjm != null) {
LOG.info("Removing backup journal " + bjm);
synchronized(journalSetLock) {
journalSet.remove(bjm);
}
}
}
/**
* Find the JournalAndStream associated with this BackupNode.
*
* @return null if it cannot be found
*/
private synchronized BackupJournalManager findBackupJournal(
NamenodeRegistration bnReg) {
for (JournalManager bjm : journalSet.getJournalManagers()) {
if ((bjm instanceof BackupJournalManager)
&& ((BackupJournalManager) bjm).matchesRegistration(bnReg)) {
return (BackupJournalManager) bjm;
}
}
return null;
}
/**
* Write an operation to the edit log. Do not sync to persistent
* store yet.
*/
synchronized void logEdit(final int length, final byte[] data) {
long start = beginTransaction();
try {
editLogStream.writeRaw(data, 0, length);
} catch (IOException ex) {
// All journals have failed, it will be handled in logSync.
}
endTransaction(start);
}
/**
* Run recovery on all journals to recover any unclosed segments
*/
synchronized void recoverUnclosedStreams() {
Preconditions.checkState(
state == State.BETWEEN_LOG_SEGMENTS,
"May not recover segments - wrong state: %s", state);
try {
journalSet.recoverUnfinalizedSegments();
} catch (IOException ex) {
// All journals have failed, it is handled in logSync.
// TODO: are we sure this is OK?
}
}
public long getSharedLogCTime() throws IOException {
for (JournalAndStream jas : journalSet.getAllJournalStreams()) {
if (jas.isShared()) {
return jas.getManager().getJournalCTime();
}
}
throw new IOException("No shared log found.");
}
public synchronized void doPreUpgradeOfSharedLog() throws IOException {
for (JournalAndStream jas : journalSet.getAllJournalStreams()) {
if (jas.isShared()) {
jas.getManager().doPreUpgrade();
}
}
}
public synchronized void doUpgradeOfSharedLog() throws IOException {
for (JournalAndStream jas : journalSet.getAllJournalStreams()) {
if (jas.isShared()) {
jas.getManager().doUpgrade(storage);
}
}
}
public synchronized void doFinalizeOfSharedLog() throws IOException {
for (JournalAndStream jas : journalSet.getAllJournalStreams()) {
if (jas.isShared()) {
jas.getManager().doFinalize();
}
}
}
public synchronized boolean canRollBackSharedLog(StorageInfo prevStorage,
int targetLayoutVersion) throws IOException {
for (JournalAndStream jas : journalSet.getAllJournalStreams()) {
if (jas.isShared()) {
return jas.getManager().canRollBack(storage, prevStorage,
targetLayoutVersion);
}
}
throw new IOException("No shared log found.");
}
public synchronized void doRollback() throws IOException {
for (JournalAndStream jas : journalSet.getAllJournalStreams()) {
if (jas.isShared()) {
jas.getManager().doRollback();
}
}
}
public synchronized void discardSegments(long markerTxid)
throws IOException {
for (JournalAndStream jas : journalSet.getAllJournalStreams()) {
jas.getManager().discardSegments(markerTxid);
}
}
@Override
public void selectInputStreams(Collection<EditLogInputStream> streams,
long fromTxId, boolean inProgressOk) throws IOException {
journalSet.selectInputStreams(streams, fromTxId, inProgressOk);
}
public Collection<EditLogInputStream> selectInputStreams(
long fromTxId, long toAtLeastTxId) throws IOException {
return selectInputStreams(fromTxId, toAtLeastTxId, null, true);
}
/**
* Select a list of input streams.
*
* @param fromTxId first transaction in the selected streams
* @param toAtLeastTxId the selected streams must contain this transaction
* @param recovery recovery context
* @param inProgressOk set to true if in-progress streams are OK
*/
public Collection<EditLogInputStream> selectInputStreams(
long fromTxId, long toAtLeastTxId, MetaRecoveryContext recovery,
boolean inProgressOk) throws IOException {
List<EditLogInputStream> streams = new ArrayList<EditLogInputStream>();
synchronized(journalSetLock) {
Preconditions.checkState(journalSet.isOpen(), "Cannot call " +
"selectInputStreams() on closed FSEditLog");
selectInputStreams(streams, fromTxId, inProgressOk);
}
try {
checkForGaps(streams, fromTxId, toAtLeastTxId, inProgressOk);
} catch (IOException e) {
if (recovery != null) {
// If recovery mode is enabled, continue loading even if we know we
// can't load up to toAtLeastTxId.
LOG.error(e);
} else {
closeAllStreams(streams);
throw e;
}
}
return streams;
}
/**
* Check for gaps in the edit log input stream list.
* Note: we're assuming that the list is sorted and that txid ranges don't
* overlap. This could be done better and with more generality with an
* interval tree.
*/
private void checkForGaps(List<EditLogInputStream> streams, long fromTxId,
long toAtLeastTxId, boolean inProgressOk) throws IOException {
Iterator<EditLogInputStream> iter = streams.iterator();
long txId = fromTxId;
while (true) {
if (txId > toAtLeastTxId) return;
if (!iter.hasNext()) break;
EditLogInputStream elis = iter.next();
if (elis.getFirstTxId() > txId) break;
long next = elis.getLastTxId();
if (next == HdfsServerConstants.INVALID_TXID) {
if (!inProgressOk) {
throw new RuntimeException("inProgressOk = false, but " +
"selectInputStreams returned an in-progress edit " +
"log input stream (" + elis + ")");
}
// We don't know where the in-progress stream ends.
// It could certainly go all the way up to toAtLeastTxId.
return;
}
txId = next + 1;
}
throw new IOException(String.format("Gap in transactions. Expected to "
+ "be able to read up until at least txid %d but unable to find any "
+ "edit logs containing txid %d", toAtLeastTxId, txId));
}
/**
* Close all the streams in a collection
* @param streams The list of streams to close
*/
static void closeAllStreams(Iterable<EditLogInputStream> streams) {
for (EditLogInputStream s : streams) {
IOUtils.closeStream(s);
}
}
/**
* Retrieve the implementation class for a Journal scheme.
* @param conf The configuration to retrieve the information from
* @param uriScheme The uri scheme to look up.
* @return the class of the journal implementation
* @throws IllegalArgumentException if no class is configured for uri
*/
static Class<? extends JournalManager> getJournalClass(Configuration conf,
String uriScheme) {
String key
= DFSConfigKeys.DFS_NAMENODE_EDITS_PLUGIN_PREFIX + "." + uriScheme;
Class <? extends JournalManager> clazz = null;
try {
clazz = conf.getClass(key, null, JournalManager.class);
} catch (RuntimeException re) {
throw new IllegalArgumentException(
"Invalid class specified for " + uriScheme, re);
}
if (clazz == null) {
LOG.warn("No class configured for " +uriScheme
+ ", " + key + " is empty");
throw new IllegalArgumentException(
"No class configured for " + uriScheme);
}
return clazz;
}
/**
* Construct a custom journal manager.
* The class to construct is taken from the configuration.
* @param uri Uri to construct
* @return The constructed journal manager
* @throws IllegalArgumentException if no class is configured for uri
*/
private JournalManager createJournal(URI uri) {
Class<? extends JournalManager> clazz
= getJournalClass(conf, uri.getScheme());
try {
Constructor<? extends JournalManager> cons
= clazz.getConstructor(Configuration.class, URI.class,
NamespaceInfo.class);
return cons.newInstance(conf, uri, storage.getNamespaceInfo());
} catch (Exception e) {
throw new IllegalArgumentException("Unable to construct journal, "
+ uri, e);
}
}
}
| 54,384 | 31.940642 | 102 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrStorage.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.util.List;
import java.util.Map;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
/**
* XAttrStorage is used to read and set xattrs for an inode.
*/
@InterfaceAudience.Private
public class XAttrStorage {
private static final Map<String, String> internedNames = Maps.newHashMap();
/**
* Reads the existing extended attributes of an inode. If the
* inode does not have an <code>XAttr</code>, then this method
* returns an empty list.
* <p/>
* Must be called while holding the FSDirectory read lock.
*
* @param inode INode to read
* @param snapshotId
* @return List<XAttr> <code>XAttr</code> list.
*/
public static List<XAttr> readINodeXAttrs(INode inode, int snapshotId) {
XAttrFeature f = inode.getXAttrFeature(snapshotId);
return f == null ? ImmutableList.<XAttr> of() : f.getXAttrs();
}
/**
* Reads the existing extended attributes of an inode.
* <p/>
* Must be called while holding the FSDirectory read lock.
*
* @param inodeAttr INodeAttributes to read.
* @return List<XAttr> <code>XAttr</code> list.
*/
public static List<XAttr> readINodeXAttrs(INodeAttributes inodeAttr) {
XAttrFeature f = inodeAttr.getXAttrFeature();
return f == null ? ImmutableList.<XAttr> of() : f.getXAttrs();
}
/**
* Update xattrs of inode.
* <p/>
* Must be called while holding the FSDirectory write lock.
*
* @param inode INode to update
* @param xAttrs to update xAttrs.
* @param snapshotId id of the latest snapshot of the inode
*/
public static void updateINodeXAttrs(INode inode,
List<XAttr> xAttrs, int snapshotId) throws QuotaExceededException {
if (xAttrs == null || xAttrs.isEmpty()) {
if (inode.getXAttrFeature() != null) {
inode.removeXAttrFeature(snapshotId);
}
return;
}
// Dedupe the xAttr name and save them into a new interned list
List<XAttr> internedXAttrs = Lists.newArrayListWithCapacity(xAttrs.size());
for (XAttr xAttr : xAttrs) {
final String name = xAttr.getName();
String internedName = internedNames.get(name);
if (internedName == null) {
internedName = name;
internedNames.put(internedName, internedName);
}
XAttr internedXAttr = new XAttr.Builder()
.setName(internedName)
.setNameSpace(xAttr.getNameSpace())
.setValue(xAttr.getValue())
.build();
internedXAttrs.add(internedXAttr);
}
// Save the list of interned xattrs
ImmutableList<XAttr> newXAttrs = ImmutableList.copyOf(internedXAttrs);
if (inode.getXAttrFeature() != null) {
inode.removeXAttrFeature(snapshotId);
}
inode.addXAttrFeature(new XAttrFeature(newXAttrs), snapshotId);
}
}
| 3,849 | 34.321101 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogInputException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
/**
* Thrown when there's a failure to read an edit log op from disk when loading
* edits.
*/
@InterfaceAudience.Private
public class EditLogInputException extends IOException {
private static final long serialVersionUID = 1L;
private final long numEditsLoaded;
public EditLogInputException(String message, Throwable cause,
long numEditsLoaded) {
super(message, cause);
this.numEditsLoaded = numEditsLoaded;
}
public long getNumEditsLoaded() {
return numEditsLoaded;
}
}
| 1,460 | 30.76087 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException;
import java.net.URL;
import java.security.PrivilegedExceptionAction;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.ServletUtil;
/** Redirect queries about the hosted filesystem to an appropriate datanode.
* @see org.apache.hadoop.hdfs.web.HftpFileSystem
*/
@InterfaceAudience.Private
public class FileDataServlet extends DfsServlet {
/** For java.io.Serializable */
private static final long serialVersionUID = 1L;
/** Create a redirection URL */
private URL createRedirectURL(String path, String encodedPath, HdfsFileStatus status,
UserGroupInformation ugi, ClientProtocol nnproxy, HttpServletRequest request, String dt)
throws IOException {
String scheme = request.getScheme();
final LocatedBlocks blks = nnproxy.getBlockLocations(
status.getFullPath(new Path(path)).toUri().getPath(), 0, 1);
final Configuration conf = NameNodeHttpServer.getConfFromContext(
getServletContext());
final DatanodeID host = pickSrcDatanode(blks, status, conf);
final String hostname;
if (host instanceof DatanodeInfo) {
hostname = host.getHostName();
} else {
hostname = host.getIpAddr();
}
int port = "https".equals(scheme) ? host.getInfoSecurePort() : host
.getInfoPort();
String dtParam = "";
if (dt != null) {
dtParam = JspHelper.getDelegationTokenUrlParam(dt);
}
// Add namenode address to the url params
NameNode nn = NameNodeHttpServer.getNameNodeFromContext(
getServletContext());
String addr = nn.getNameNodeAddressHostPortString();
String addrParam = JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, addr);
return new URL(scheme, hostname, port,
"/streamFile" + encodedPath + '?' +
"ugi=" + ServletUtil.encodeQueryValue(ugi.getShortUserName()) +
dtParam + addrParam);
}
/** Select a datanode to service this request.
* Currently, this looks at no more than the first five blocks of a file,
* selecting a datanode randomly from the most represented.
* @param conf
*/
private DatanodeID pickSrcDatanode(LocatedBlocks blks, HdfsFileStatus i,
Configuration conf) throws IOException {
if (i.getLen() == 0 || blks.getLocatedBlocks().size() <= 0) {
// pick a random datanode
NameNode nn = NameNodeHttpServer.getNameNodeFromContext(
getServletContext());
return NamenodeJspHelper.getRandomDatanode(nn);
}
return JspHelper.bestNode(blks, conf);
}
/**
* Service a GET request as described below.
* Request:
* {@code
* GET http://<nn>:<port>/data[/<path>] HTTP/1.1
* }
*/
@Override
public void doGet(final HttpServletRequest request,
final HttpServletResponse response)
throws IOException {
final Configuration conf = NameNodeHttpServer.getConfFromContext(
getServletContext());
final UserGroupInformation ugi = getUGI(request, conf);
try {
ugi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws IOException {
ClientProtocol nn = createNameNodeProxy();
final String path = ServletUtil.getDecodedPath(request, "/data");
final String encodedPath = ServletUtil.getRawPath(request, "/data");
String delegationToken = request
.getParameter(JspHelper.DELEGATION_PARAMETER_NAME);
HdfsFileStatus info = nn.getFileInfo(path);
if (info != null && !info.isDir()) {
response.sendRedirect(createRedirectURL(path, encodedPath,
info, ugi, nn, request, delegationToken).toString());
} else if (info == null) {
response.sendError(400, "File not found " + path);
} else {
response.sendError(400, path + ": is a directory");
}
return null;
}
});
} catch (IOException e) {
response.sendError(400, e.getMessage());
} catch (InterruptedException e) {
response.sendError(400, e.getMessage());
}
}
}
| 5,499 | 36.931034 | 94 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.Closeable;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.Storage.FormatConfirmable;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
/**
* A JournalManager is responsible for managing a single place of storing
* edit logs. It may correspond to multiple files, a backup node, etc.
* Even when the actual underlying storage is rolled, or failed and restored,
* each conceptual place of storage corresponds to exactly one instance of
* this class, which is created when the EditLog is first opened.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public interface JournalManager extends Closeable, LogsPurgeable,
FormatConfirmable {
/**
* Format the underlying storage, removing any previously
* stored data.
*/
void format(NamespaceInfo ns) throws IOException;
/**
* Begin writing to a new segment of the log stream, which starts at
* the given transaction ID.
*/
EditLogOutputStream startLogSegment(long txId, int layoutVersion)
throws IOException;
/**
* Mark the log segment that spans from firstTxId to lastTxId
* as finalized and complete.
*/
void finalizeLogSegment(long firstTxId, long lastTxId) throws IOException;
/**
* Set the amount of memory that this stream should use to buffer edits
*/
void setOutputBufferCapacity(int size);
/**
* Recover segments which have not been finalized.
*/
void recoverUnfinalizedSegments() throws IOException;
/**
* Perform any steps that must succeed across all JournalManagers involved in
* an upgrade before proceeding onto the actual upgrade stage. If a call to
* any JM's doPreUpgrade method fails, then doUpgrade will not be called for
* any JM.
*/
void doPreUpgrade() throws IOException;
/**
* Perform the actual upgrade of the JM. After this is completed, the NN can
* begin to use the new upgraded metadata. This metadata may later be either
* finalized or rolled back to the previous state.
*
* @param storage info about the new upgraded versions.
*/
void doUpgrade(Storage storage) throws IOException;
/**
* Finalize the upgrade. JMs should purge any state that they had been keeping
* around during the upgrade process. After this is completed, rollback is no
* longer allowed.
*/
void doFinalize() throws IOException;
/**
* Return true if this JM can roll back to the previous storage state, false
* otherwise. The NN will refuse to run the rollback operation unless at least
* one JM or fsimage storage directory can roll back.
*
* @param storage the storage info for the current state
* @param prevStorage the storage info for the previous (unupgraded) state
* @param targetLayoutVersion the layout version we intend to roll back to
* @return true if this JM can roll back, false otherwise.
*/
boolean canRollBack(StorageInfo storage, StorageInfo prevStorage,
int targetLayoutVersion) throws IOException;
/**
* Perform the rollback to the previous FS state. JMs which do not need to
* roll back their state should just return without error.
*/
void doRollback() throws IOException;
/**
* @return the CTime of the journal manager.
*/
long getJournalCTime() throws IOException;
/**
* Discard the segments whose first txid is >= the given txid.
* @param startTxId The given txid should be right at the segment boundary,
* i.e., it should be the first txid of some segment, if segment corresponding
* to the txid exists.
*/
void discardSegments(long startTxId) throws IOException;
/**
* Close the journal manager, freeing any resources it may hold.
*/
@Override
void close() throws IOException;
/**
* Indicate that a journal is cannot be used to load a certain range of
* edits.
* This exception occurs in the case of a gap in the transactions, or a
* corrupt edit file.
*/
public static class CorruptionException extends IOException {
static final long serialVersionUID = -4687802717006172702L;
public CorruptionException(String reason) {
super(reason);
}
}
}
| 5,299 | 34.810811 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.Arrays;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.NameNodeProxies;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.protocol.JournalInfo;
import org.apache.hadoop.hdfs.server.protocol.JournalProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.UserGroupInformation;
/**
* An implementation of the abstract class {@link EditLogOutputStream},
* which streams edits to a backup node.
*
* @see org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol#journal
* (org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration,
* int, int, byte[])
*/
class EditLogBackupOutputStream extends EditLogOutputStream {
private static final Log LOG = LogFactory.getLog(EditLogFileOutputStream.class);
static final int DEFAULT_BUFFER_SIZE = 256;
private final JournalProtocol backupNode; // RPC proxy to backup node
private final NamenodeRegistration bnRegistration; // backup node registration
private final JournalInfo journalInfo; // active node registration
private final DataOutputBuffer out; // serialized output sent to backup node
private EditsDoubleBuffer doubleBuf;
EditLogBackupOutputStream(NamenodeRegistration bnReg, // backup node
JournalInfo journalInfo) // active name-node
throws IOException {
super();
this.bnRegistration = bnReg;
this.journalInfo = journalInfo;
InetSocketAddress bnAddress =
NetUtils.createSocketAddr(bnRegistration.getAddress());
try {
this.backupNode = NameNodeProxies.createNonHAProxy(new HdfsConfiguration(),
bnAddress, JournalProtocol.class, UserGroupInformation.getCurrentUser(),
true).getProxy();
} catch(IOException e) {
Storage.LOG.error("Error connecting to: " + bnAddress, e);
throw e;
}
this.doubleBuf = new EditsDoubleBuffer(DEFAULT_BUFFER_SIZE);
this.out = new DataOutputBuffer(DEFAULT_BUFFER_SIZE);
}
@Override // EditLogOutputStream
public void write(FSEditLogOp op) throws IOException {
doubleBuf.writeOp(op);
}
@Override
public void writeRaw(byte[] bytes, int offset, int length) throws IOException {
throw new IOException("Not supported");
}
/**
* There is no persistent storage. Just clear the buffers.
*/
@Override // EditLogOutputStream
public void create(int layoutVersion) throws IOException {
assert doubleBuf.isFlushed() : "previous data is not flushed yet";
this.doubleBuf = new EditsDoubleBuffer(DEFAULT_BUFFER_SIZE);
}
@Override // EditLogOutputStream
public void close() throws IOException {
// close should have been called after all pending transactions
// have been flushed & synced.
int size = doubleBuf.countBufferedBytes();
if (size != 0) {
throw new IOException("BackupEditStream has " + size +
" records still to be flushed and cannot be closed.");
}
RPC.stopProxy(backupNode); // stop the RPC threads
doubleBuf.close();
doubleBuf = null;
}
@Override
public void abort() throws IOException {
RPC.stopProxy(backupNode);
doubleBuf = null;
}
@Override // EditLogOutputStream
public void setReadyToFlush() throws IOException {
doubleBuf.setReadyToFlush();
}
@Override // EditLogOutputStream
protected void flushAndSync(boolean durable) throws IOException {
assert out.getLength() == 0 : "Output buffer is not empty";
if (doubleBuf.isFlushed()) {
LOG.info("Nothing to flush");
return;
}
int numReadyTxns = doubleBuf.countReadyTxns();
long firstTxToFlush = doubleBuf.getFirstReadyTxId();
doubleBuf.flushTo(out);
if (out.getLength() > 0) {
assert numReadyTxns > 0;
byte[] data = Arrays.copyOf(out.getData(), out.getLength());
out.reset();
assert out.getLength() == 0 : "Output buffer is not empty";
backupNode.journal(journalInfo, 0, firstTxToFlush, numReadyTxns, data);
}
}
/**
* Get backup node registration.
*/
NamenodeRegistration getRegistration() {
return bnRegistration;
}
void startLogSegment(long txId) throws IOException {
backupNode.startLogSegment(journalInfo, 0, txId);
}
}
| 5,432 | 34.279221 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_ADD;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_APPEND;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_ADD_BLOCK;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_ADD_CACHE_DIRECTIVE;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_ADD_CACHE_POOL;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_ALLOCATE_BLOCK_ID;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_ALLOW_SNAPSHOT;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_CANCEL_DELEGATION_TOKEN;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_CLEAR_NS_QUOTA;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_CLOSE;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_CONCAT_DELETE;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_CREATE_SNAPSHOT;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_DELETE;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_DELETE_SNAPSHOT;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_DISALLOW_SNAPSHOT;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_END_LOG_SEGMENT;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_GET_DELEGATION_TOKEN;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_INVALID;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_MKDIR;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_MODIFY_CACHE_DIRECTIVE;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_MODIFY_CACHE_POOL;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_REASSIGN_LEASE;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_REMOVE_CACHE_DIRECTIVE;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_REMOVE_CACHE_POOL;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_REMOVE_XATTR;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_RENAME;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_RENAME_OLD;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_RENAME_SNAPSHOT;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_RENEW_DELEGATION_TOKEN;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_SET_ACL;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_ROLLING_UPGRADE_FINALIZE;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_ROLLING_UPGRADE_START;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_SET_GENSTAMP_V1;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_SET_GENSTAMP_V2;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_SET_NS_QUOTA;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_SET_OWNER;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_SET_PERMISSIONS;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_SET_QUOTA;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_SET_REPLICATION;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_SET_XATTR;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_START_LOG_SEGMENT;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_SYMLINK;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_TIMES;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_TRUNCATE;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_UPDATE_BLOCKS;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_UPDATE_MASTER_KEY;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_SET_STORAGE_POLICY;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.OP_SET_QUOTA_BY_STORAGETYPE;
import java.io.DataInput;
import java.io.DataInputStream;
import java.io.DataOutput;
import java.io.DataOutputStream;
import java.io.EOFException;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.EnumMap;
import java.util.List;
import java.util.zip.CheckedInputStream;
import java.util.zip.Checksum;
import org.apache.commons.codec.DecoderException;
import org.apache.commons.codec.binary.Hex;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.ChecksumException;
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.fs.XAttrCodec;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclEntryScope;
import org.apache.hadoop.fs.permission.AclEntryType;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DeprecatedUTF8;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
import org.apache.hadoop.hdfs.protocol.proto.EditLogProtos.AclEditLogProto;
import org.apache.hadoop.hdfs.protocol.proto.EditLogProtos.XAttrEditLogProto;
import org.apache.hadoop.hdfs.protocolPB.PBHelper;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.util.XMLUtils;
import org.apache.hadoop.hdfs.util.XMLUtils.InvalidXmlException;
import org.apache.hadoop.hdfs.util.XMLUtils.Stanza;
import org.apache.hadoop.io.ArrayWritable;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableFactories;
import org.apache.hadoop.io.WritableFactory;
import org.apache.hadoop.ipc.ClientId;
import org.apache.hadoop.ipc.RpcConstants;
import org.apache.hadoop.security.token.delegation.DelegationKey;
import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.StringUtils;
import org.xml.sax.ContentHandler;
import org.xml.sax.SAXException;
import org.xml.sax.helpers.AttributesImpl;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
/**
* Helper classes for reading the ops from an InputStream.
* All ops derive from FSEditLogOp and are only
* instantiated from Reader#readOp()
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public abstract class FSEditLogOp {
public final FSEditLogOpCodes opCode;
long txid;
byte[] rpcClientId;
int rpcCallId;
final void reset() {
txid = HdfsServerConstants.INVALID_TXID;
rpcClientId = RpcConstants.DUMMY_CLIENT_ID;
rpcCallId = RpcConstants.INVALID_CALL_ID;
resetSubFields();
}
abstract void resetSubFields();
final public static class OpInstanceCache {
private final EnumMap<FSEditLogOpCodes, FSEditLogOp> inst =
new EnumMap<FSEditLogOpCodes, FSEditLogOp>(FSEditLogOpCodes.class);
public OpInstanceCache() {
inst.put(OP_ADD, new AddOp());
inst.put(OP_CLOSE, new CloseOp());
inst.put(OP_SET_REPLICATION, new SetReplicationOp());
inst.put(OP_CONCAT_DELETE, new ConcatDeleteOp());
inst.put(OP_RENAME_OLD, new RenameOldOp());
inst.put(OP_DELETE, new DeleteOp());
inst.put(OP_MKDIR, new MkdirOp());
inst.put(OP_SET_GENSTAMP_V1, new SetGenstampV1Op());
inst.put(OP_SET_PERMISSIONS, new SetPermissionsOp());
inst.put(OP_SET_OWNER, new SetOwnerOp());
inst.put(OP_SET_NS_QUOTA, new SetNSQuotaOp());
inst.put(OP_CLEAR_NS_QUOTA, new ClearNSQuotaOp());
inst.put(OP_SET_QUOTA, new SetQuotaOp());
inst.put(OP_TIMES, new TimesOp());
inst.put(OP_SYMLINK, new SymlinkOp());
inst.put(OP_RENAME, new RenameOp());
inst.put(OP_REASSIGN_LEASE, new ReassignLeaseOp());
inst.put(OP_GET_DELEGATION_TOKEN, new GetDelegationTokenOp());
inst.put(OP_RENEW_DELEGATION_TOKEN, new RenewDelegationTokenOp());
inst.put(OP_CANCEL_DELEGATION_TOKEN, new CancelDelegationTokenOp());
inst.put(OP_UPDATE_MASTER_KEY, new UpdateMasterKeyOp());
inst.put(OP_START_LOG_SEGMENT, new LogSegmentOp(OP_START_LOG_SEGMENT));
inst.put(OP_END_LOG_SEGMENT, new LogSegmentOp(OP_END_LOG_SEGMENT));
inst.put(OP_UPDATE_BLOCKS, new UpdateBlocksOp());
inst.put(OP_TRUNCATE, new TruncateOp());
inst.put(OP_ALLOW_SNAPSHOT, new AllowSnapshotOp());
inst.put(OP_DISALLOW_SNAPSHOT, new DisallowSnapshotOp());
inst.put(OP_CREATE_SNAPSHOT, new CreateSnapshotOp());
inst.put(OP_DELETE_SNAPSHOT, new DeleteSnapshotOp());
inst.put(OP_RENAME_SNAPSHOT, new RenameSnapshotOp());
inst.put(OP_SET_GENSTAMP_V2, new SetGenstampV2Op());
inst.put(OP_ALLOCATE_BLOCK_ID, new AllocateBlockIdOp());
inst.put(OP_ADD_BLOCK, new AddBlockOp());
inst.put(OP_ADD_CACHE_DIRECTIVE,
new AddCacheDirectiveInfoOp());
inst.put(OP_MODIFY_CACHE_DIRECTIVE,
new ModifyCacheDirectiveInfoOp());
inst.put(OP_REMOVE_CACHE_DIRECTIVE,
new RemoveCacheDirectiveInfoOp());
inst.put(OP_ADD_CACHE_POOL, new AddCachePoolOp());
inst.put(OP_MODIFY_CACHE_POOL, new ModifyCachePoolOp());
inst.put(OP_REMOVE_CACHE_POOL, new RemoveCachePoolOp());
inst.put(OP_SET_ACL, new SetAclOp());
inst.put(OP_ROLLING_UPGRADE_START, new RollingUpgradeOp(
OP_ROLLING_UPGRADE_START, "start"));
inst.put(OP_ROLLING_UPGRADE_FINALIZE, new RollingUpgradeOp(
OP_ROLLING_UPGRADE_FINALIZE, "finalize"));
inst.put(OP_SET_XATTR, new SetXAttrOp());
inst.put(OP_REMOVE_XATTR, new RemoveXAttrOp());
inst.put(OP_SET_STORAGE_POLICY, new SetStoragePolicyOp());
inst.put(OP_APPEND, new AppendOp());
inst.put(OP_SET_QUOTA_BY_STORAGETYPE, new SetQuotaByStorageTypeOp());
}
public FSEditLogOp get(FSEditLogOpCodes opcode) {
return inst.get(opcode);
}
}
private static ImmutableMap<String, FsAction> fsActionMap() {
ImmutableMap.Builder<String, FsAction> b = ImmutableMap.builder();
for (FsAction v : FsAction.values())
b.put(v.SYMBOL, v);
return b.build();
}
private static final ImmutableMap<String, FsAction> FSACTION_SYMBOL_MAP
= fsActionMap();
/**
* Constructor for an EditLog Op. EditLog ops cannot be constructed
* directly, but only through Reader#readOp.
*/
@VisibleForTesting
protected FSEditLogOp(FSEditLogOpCodes opCode) {
this.opCode = opCode;
reset();
}
public long getTransactionId() {
Preconditions.checkState(txid != HdfsServerConstants.INVALID_TXID);
return txid;
}
public String getTransactionIdStr() {
return (txid == HdfsServerConstants.INVALID_TXID) ? "(none)" : "" + txid;
}
public boolean hasTransactionId() {
return (txid != HdfsServerConstants.INVALID_TXID);
}
public void setTransactionId(long txid) {
this.txid = txid;
}
public boolean hasRpcIds() {
return rpcClientId != RpcConstants.DUMMY_CLIENT_ID
&& rpcCallId != RpcConstants.INVALID_CALL_ID;
}
/** this has to be called after calling {@link #hasRpcIds()} */
public byte[] getClientId() {
Preconditions.checkState(rpcClientId != RpcConstants.DUMMY_CLIENT_ID);
return rpcClientId;
}
public void setRpcClientId(byte[] clientId) {
this.rpcClientId = clientId;
}
/** this has to be called after calling {@link #hasRpcIds()} */
public int getCallId() {
Preconditions.checkState(rpcCallId != RpcConstants.INVALID_CALL_ID);
return rpcCallId;
}
public void setRpcCallId(int callId) {
this.rpcCallId = callId;
}
abstract void readFields(DataInputStream in, int logVersion)
throws IOException;
public abstract void writeFields(DataOutputStream out)
throws IOException;
static interface BlockListUpdatingOp {
Block[] getBlocks();
String getPath();
boolean shouldCompleteLastBlock();
}
private static void writeRpcIds(final byte[] clientId, final int callId,
DataOutputStream out) throws IOException {
FSImageSerialization.writeBytes(clientId, out);
FSImageSerialization.writeInt(callId, out);
}
void readRpcIds(DataInputStream in, int logVersion)
throws IOException {
if (NameNodeLayoutVersion.supports(
LayoutVersion.Feature.EDITLOG_SUPPORT_RETRYCACHE, logVersion)) {
this.rpcClientId = FSImageSerialization.readBytes(in);
this.rpcCallId = FSImageSerialization.readInt(in);
}
}
void readRpcIdsFromXml(Stanza st) {
this.rpcClientId = st.hasChildren("RPC_CLIENTID") ?
ClientId.toBytes(st.getValue("RPC_CLIENTID"))
: RpcConstants.DUMMY_CLIENT_ID;
this.rpcCallId = st.hasChildren("RPC_CALLID") ?
Integer.parseInt(st.getValue("RPC_CALLID"))
: RpcConstants.INVALID_CALL_ID;
}
private static void appendRpcIdsToString(final StringBuilder builder,
final byte[] clientId, final int callId) {
builder.append(", RpcClientId=");
builder.append(ClientId.toString(clientId));
builder.append(", RpcCallId=");
builder.append(callId);
}
private static void appendRpcIdsToXml(ContentHandler contentHandler,
final byte[] clientId, final int callId) throws SAXException {
XMLUtils.addSaxString(contentHandler, "RPC_CLIENTID",
ClientId.toString(clientId));
XMLUtils.addSaxString(contentHandler, "RPC_CALLID",
Integer.toString(callId));
}
private static final class AclEditLogUtil {
private static final int ACL_EDITLOG_ENTRY_HAS_NAME_OFFSET = 6;
private static final int ACL_EDITLOG_ENTRY_TYPE_OFFSET = 3;
private static final int ACL_EDITLOG_ENTRY_SCOPE_OFFSET = 5;
private static final int ACL_EDITLOG_PERM_MASK = 7;
private static final int ACL_EDITLOG_ENTRY_TYPE_MASK = 3;
private static final int ACL_EDITLOG_ENTRY_SCOPE_MASK = 1;
private static final FsAction[] FSACTION_VALUES = FsAction.values();
private static final AclEntryScope[] ACL_ENTRY_SCOPE_VALUES = AclEntryScope
.values();
private static final AclEntryType[] ACL_ENTRY_TYPE_VALUES = AclEntryType
.values();
private static List<AclEntry> read(DataInputStream in, int logVersion)
throws IOException {
if (!NameNodeLayoutVersion.supports(Feature.EXTENDED_ACL, logVersion)) {
return null;
}
int size = in.readInt();
if (size == 0) {
return null;
}
List<AclEntry> aclEntries = Lists.newArrayListWithCapacity(size);
for (int i = 0; i < size; ++i) {
int v = in.read();
int p = v & ACL_EDITLOG_PERM_MASK;
int t = (v >> ACL_EDITLOG_ENTRY_TYPE_OFFSET)
& ACL_EDITLOG_ENTRY_TYPE_MASK;
int s = (v >> ACL_EDITLOG_ENTRY_SCOPE_OFFSET)
& ACL_EDITLOG_ENTRY_SCOPE_MASK;
boolean hasName = ((v >> ACL_EDITLOG_ENTRY_HAS_NAME_OFFSET) & 1) == 1;
String name = hasName ? FSImageSerialization.readString(in) : null;
aclEntries.add(new AclEntry.Builder().setName(name)
.setPermission(FSACTION_VALUES[p])
.setScope(ACL_ENTRY_SCOPE_VALUES[s])
.setType(ACL_ENTRY_TYPE_VALUES[t]).build());
}
return aclEntries;
}
private static void write(List<AclEntry> aclEntries, DataOutputStream out)
throws IOException {
if (aclEntries == null) {
out.writeInt(0);
return;
}
out.writeInt(aclEntries.size());
for (AclEntry e : aclEntries) {
boolean hasName = e.getName() != null;
int v = (e.getScope().ordinal() << ACL_EDITLOG_ENTRY_SCOPE_OFFSET)
| (e.getType().ordinal() << ACL_EDITLOG_ENTRY_TYPE_OFFSET)
| e.getPermission().ordinal();
if (hasName) {
v |= 1 << ACL_EDITLOG_ENTRY_HAS_NAME_OFFSET;
}
out.write(v);
if (hasName) {
FSImageSerialization.writeString(e.getName(), out);
}
}
}
}
private static List<XAttr> readXAttrsFromEditLog(DataInputStream in,
int logVersion) throws IOException {
if (!NameNodeLayoutVersion.supports(NameNodeLayoutVersion.Feature.XATTRS,
logVersion)) {
return null;
}
XAttrEditLogProto proto = XAttrEditLogProto.parseDelimitedFrom(in);
return PBHelper.convertXAttrs(proto.getXAttrsList());
}
@SuppressWarnings("unchecked")
static abstract class AddCloseOp extends FSEditLogOp implements BlockListUpdatingOp {
int length;
long inodeId;
String path;
short replication;
long mtime;
long atime;
long blockSize;
Block[] blocks;
PermissionStatus permissions;
List<AclEntry> aclEntries;
List<XAttr> xAttrs;
String clientName;
String clientMachine;
boolean overwrite;
byte storagePolicyId;
private AddCloseOp(FSEditLogOpCodes opCode) {
super(opCode);
storagePolicyId = HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
assert(opCode == OP_ADD || opCode == OP_CLOSE || opCode == OP_APPEND);
}
@Override
void resetSubFields() {
length = 0;
inodeId = 0L;
path = null;
replication = 0;
mtime = 0L;
atime = 0L;
blockSize = 0L;
blocks = null;
permissions = null;
aclEntries = null;
xAttrs = null;
clientName = null;
clientMachine = null;
overwrite = false;
storagePolicyId = 0;
}
<T extends AddCloseOp> T setInodeId(long inodeId) {
this.inodeId = inodeId;
return (T)this;
}
<T extends AddCloseOp> T setPath(String path) {
this.path = path;
return (T)this;
}
@Override
public String getPath() {
return path;
}
<T extends AddCloseOp> T setReplication(short replication) {
this.replication = replication;
return (T)this;
}
<T extends AddCloseOp> T setModificationTime(long mtime) {
this.mtime = mtime;
return (T)this;
}
<T extends AddCloseOp> T setAccessTime(long atime) {
this.atime = atime;
return (T)this;
}
<T extends AddCloseOp> T setBlockSize(long blockSize) {
this.blockSize = blockSize;
return (T)this;
}
<T extends AddCloseOp> T setBlocks(Block[] blocks) {
if (blocks.length > MAX_BLOCKS) {
throw new RuntimeException("Can't have more than " + MAX_BLOCKS +
" in an AddCloseOp.");
}
this.blocks = blocks;
return (T)this;
}
@Override
public Block[] getBlocks() {
return blocks;
}
<T extends AddCloseOp> T setPermissionStatus(PermissionStatus permissions) {
this.permissions = permissions;
return (T)this;
}
<T extends AddCloseOp> T setAclEntries(List<AclEntry> aclEntries) {
this.aclEntries = aclEntries;
return (T)this;
}
<T extends AddCloseOp> T setXAttrs(List<XAttr> xAttrs) {
this.xAttrs = xAttrs;
return (T)this;
}
<T extends AddCloseOp> T setClientName(String clientName) {
this.clientName = clientName;
return (T)this;
}
<T extends AddCloseOp> T setClientMachine(String clientMachine) {
this.clientMachine = clientMachine;
return (T)this;
}
<T extends AddCloseOp> T setOverwrite(boolean overwrite) {
this.overwrite = overwrite;
return (T)this;
}
<T extends AddCloseOp> T setStoragePolicyId(byte storagePolicyId) {
this.storagePolicyId = storagePolicyId;
return (T)this;
}
@Override
public void writeFields(DataOutputStream out) throws IOException {
FSImageSerialization.writeLong(inodeId, out);
FSImageSerialization.writeString(path, out);
FSImageSerialization.writeShort(replication, out);
FSImageSerialization.writeLong(mtime, out);
FSImageSerialization.writeLong(atime, out);
FSImageSerialization.writeLong(blockSize, out);
new ArrayWritable(Block.class, blocks).write(out);
permissions.write(out);
if (this.opCode == OP_ADD) {
AclEditLogUtil.write(aclEntries, out);
XAttrEditLogProto.Builder b = XAttrEditLogProto.newBuilder();
b.addAllXAttrs(PBHelper.convertXAttrProto(xAttrs));
b.build().writeDelimitedTo(out);
FSImageSerialization.writeString(clientName,out);
FSImageSerialization.writeString(clientMachine,out);
FSImageSerialization.writeBoolean(overwrite, out);
FSImageSerialization.writeByte(storagePolicyId, out);
// write clientId and callId
writeRpcIds(rpcClientId, rpcCallId, out);
}
}
@Override
void readFields(DataInputStream in, int logVersion)
throws IOException {
if (!NameNodeLayoutVersion.supports(
LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
this.length = in.readInt();
}
if (NameNodeLayoutVersion.supports(
LayoutVersion.Feature.ADD_INODE_ID, logVersion)) {
this.inodeId = in.readLong();
} else {
// The inodeId should be updated when this editLogOp is applied
this.inodeId = HdfsConstants.GRANDFATHER_INODE_ID;
}
if ((-17 < logVersion && length != 4) ||
(logVersion <= -17 && length != 5 && !NameNodeLayoutVersion.supports(
LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion))) {
throw new IOException("Incorrect data format." +
" logVersion is " + logVersion +
" but writables.length is " +
length + ". ");
}
this.path = FSImageSerialization.readString(in);
if (NameNodeLayoutVersion.supports(
LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
this.replication = FSImageSerialization.readShort(in);
this.mtime = FSImageSerialization.readLong(in);
} else {
this.replication = readShort(in);
this.mtime = readLong(in);
}
if (NameNodeLayoutVersion.supports(
LayoutVersion.Feature.FILE_ACCESS_TIME, logVersion)) {
if (NameNodeLayoutVersion.supports(
LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
this.atime = FSImageSerialization.readLong(in);
} else {
this.atime = readLong(in);
}
} else {
this.atime = 0;
}
if (NameNodeLayoutVersion.supports(
LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
this.blockSize = FSImageSerialization.readLong(in);
} else {
this.blockSize = readLong(in);
}
this.blocks = readBlocks(in, logVersion);
this.permissions = PermissionStatus.read(in);
if (this.opCode == OP_ADD) {
aclEntries = AclEditLogUtil.read(in, logVersion);
this.xAttrs = readXAttrsFromEditLog(in, logVersion);
this.clientName = FSImageSerialization.readString(in);
this.clientMachine = FSImageSerialization.readString(in);
if (NameNodeLayoutVersion.supports(
NameNodeLayoutVersion.Feature.CREATE_OVERWRITE, logVersion)) {
this.overwrite = FSImageSerialization.readBoolean(in);
} else {
this.overwrite = false;
}
if (NameNodeLayoutVersion.supports(
NameNodeLayoutVersion.Feature.BLOCK_STORAGE_POLICY, logVersion)) {
this.storagePolicyId = FSImageSerialization.readByte(in);
} else {
this.storagePolicyId = HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
}
// read clientId and callId
readRpcIds(in, logVersion);
} else {
this.clientName = "";
this.clientMachine = "";
}
}
static final public int MAX_BLOCKS = 1024 * 1024 * 64;
private static Block[] readBlocks(
DataInputStream in,
int logVersion) throws IOException {
int numBlocks = in.readInt();
if (numBlocks < 0) {
throw new IOException("invalid negative number of blocks");
} else if (numBlocks > MAX_BLOCKS) {
throw new IOException("invalid number of blocks: " + numBlocks +
". The maximum number of blocks per file is " + MAX_BLOCKS);
}
Block[] blocks = new Block[numBlocks];
for (int i = 0; i < numBlocks; i++) {
Block blk = new Block();
blk.readFields(in);
blocks[i] = blk;
}
return blocks;
}
public String stringifyMembers() {
StringBuilder builder = new StringBuilder();
builder.append("[length=");
builder.append(length);
builder.append(", inodeId=");
builder.append(inodeId);
builder.append(", path=");
builder.append(path);
builder.append(", replication=");
builder.append(replication);
builder.append(", mtime=");
builder.append(mtime);
builder.append(", atime=");
builder.append(atime);
builder.append(", blockSize=");
builder.append(blockSize);
builder.append(", blocks=");
builder.append(Arrays.toString(blocks));
builder.append(", permissions=");
builder.append(permissions);
builder.append(", aclEntries=");
builder.append(aclEntries);
builder.append(", clientName=");
builder.append(clientName);
builder.append(", clientMachine=");
builder.append(clientMachine);
builder.append(", overwrite=");
builder.append(overwrite);
if (this.opCode == OP_ADD) {
appendRpcIdsToString(builder, rpcClientId, rpcCallId);
}
builder.append(", storagePolicyId=");
builder.append(storagePolicyId);
builder.append(", opCode=");
builder.append(opCode);
builder.append(", txid=");
builder.append(txid);
builder.append("]");
return builder.toString();
}
@Override
protected void toXml(ContentHandler contentHandler) throws SAXException {
XMLUtils.addSaxString(contentHandler, "LENGTH",
Integer.toString(length));
XMLUtils.addSaxString(contentHandler, "INODEID",
Long.toString(inodeId));
XMLUtils.addSaxString(contentHandler, "PATH", path);
XMLUtils.addSaxString(contentHandler, "REPLICATION",
Short.valueOf(replication).toString());
XMLUtils.addSaxString(contentHandler, "MTIME",
Long.toString(mtime));
XMLUtils.addSaxString(contentHandler, "ATIME",
Long.toString(atime));
XMLUtils.addSaxString(contentHandler, "BLOCKSIZE",
Long.toString(blockSize));
XMLUtils.addSaxString(contentHandler, "CLIENT_NAME", clientName);
XMLUtils.addSaxString(contentHandler, "CLIENT_MACHINE", clientMachine);
XMLUtils.addSaxString(contentHandler, "OVERWRITE",
Boolean.toString(overwrite));
for (Block b : blocks) {
FSEditLogOp.blockToXml(contentHandler, b);
}
FSEditLogOp.permissionStatusToXml(contentHandler, permissions);
if (this.opCode == OP_ADD) {
if (aclEntries != null) {
appendAclEntriesToXml(contentHandler, aclEntries);
}
appendRpcIdsToXml(contentHandler, rpcClientId, rpcCallId);
}
}
@Override
void fromXml(Stanza st) throws InvalidXmlException {
this.length = Integer.parseInt(st.getValue("LENGTH"));
this.inodeId = Long.parseLong(st.getValue("INODEID"));
this.path = st.getValue("PATH");
this.replication = Short.valueOf(st.getValue("REPLICATION"));
this.mtime = Long.parseLong(st.getValue("MTIME"));
this.atime = Long.parseLong(st.getValue("ATIME"));
this.blockSize = Long.parseLong(st.getValue("BLOCKSIZE"));
this.clientName = st.getValue("CLIENT_NAME");
this.clientMachine = st.getValue("CLIENT_MACHINE");
this.overwrite = Boolean.parseBoolean(st.getValueOrNull("OVERWRITE"));
if (st.hasChildren("BLOCK")) {
List<Stanza> blocks = st.getChildren("BLOCK");
this.blocks = new Block[blocks.size()];
for (int i = 0; i < blocks.size(); i++) {
this.blocks[i] = FSEditLogOp.blockFromXml(blocks.get(i));
}
} else {
this.blocks = new Block[0];
}
this.permissions = permissionStatusFromXml(st);
aclEntries = readAclEntriesFromXml(st);
readRpcIdsFromXml(st);
}
}
/**
* {@literal @AtMostOnce} for {@link ClientProtocol#create} and
* {@link ClientProtocol#append}
*/
static class AddOp extends AddCloseOp {
private AddOp() {
super(OP_ADD);
}
static AddOp getInstance(OpInstanceCache cache) {
return (AddOp) cache.get(OP_ADD);
}
@Override
public boolean shouldCompleteLastBlock() {
return false;
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("AddOp ");
builder.append(stringifyMembers());
return builder.toString();
}
}
/**
* Although {@link ClientProtocol#append} may also log a close op, we do
* not need to record the rpc ids here since a successful appendFile op will
* finally log an AddOp.
*/
static class CloseOp extends AddCloseOp {
private CloseOp() {
super(OP_CLOSE);
}
static CloseOp getInstance(OpInstanceCache cache) {
return (CloseOp)cache.get(OP_CLOSE);
}
@Override
public boolean shouldCompleteLastBlock() {
return true;
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("CloseOp ");
builder.append(stringifyMembers());
return builder.toString();
}
}
static class AppendOp extends FSEditLogOp {
String path;
String clientName;
String clientMachine;
boolean newBlock;
private AppendOp() {
super(OP_APPEND);
}
static AppendOp getInstance(OpInstanceCache cache) {
return (AppendOp) cache.get(OP_APPEND);
}
AppendOp setPath(String path) {
this.path = path;
return this;
}
AppendOp setClientName(String clientName) {
this.clientName = clientName;
return this;
}
AppendOp setClientMachine(String clientMachine) {
this.clientMachine = clientMachine;
return this;
}
AppendOp setNewBlock(boolean newBlock) {
this.newBlock = newBlock;
return this;
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("AppendOp ");
builder.append("[path=").append(path);
builder.append(", clientName=").append(clientName);
builder.append(", clientMachine=").append(clientMachine);
builder.append(", newBlock=").append(newBlock).append("]");
return builder.toString();
}
@Override
void resetSubFields() {
this.path = null;
this.clientName = null;
this.clientMachine = null;
this.newBlock = false;
}
@Override
void readFields(DataInputStream in, int logVersion) throws IOException {
this.path = FSImageSerialization.readString(in);
this.clientName = FSImageSerialization.readString(in);
this.clientMachine = FSImageSerialization.readString(in);
this.newBlock = FSImageSerialization.readBoolean(in);
readRpcIds(in, logVersion);
}
@Override
public void writeFields(DataOutputStream out) throws IOException {
FSImageSerialization.writeString(path, out);
FSImageSerialization.writeString(clientName, out);
FSImageSerialization.writeString(clientMachine, out);
FSImageSerialization.writeBoolean(newBlock, out);
writeRpcIds(rpcClientId, rpcCallId, out);
}
@Override
protected void toXml(ContentHandler contentHandler) throws SAXException {
XMLUtils.addSaxString(contentHandler, "PATH", path);
XMLUtils.addSaxString(contentHandler, "CLIENT_NAME", clientName);
XMLUtils.addSaxString(contentHandler, "CLIENT_MACHINE", clientMachine);
XMLUtils.addSaxString(contentHandler, "NEWBLOCK",
Boolean.toString(newBlock));
appendRpcIdsToXml(contentHandler, rpcClientId, rpcCallId);
}
@Override
void fromXml(Stanza st) throws InvalidXmlException {
this.path = st.getValue("PATH");
this.clientName = st.getValue("CLIENT_NAME");
this.clientMachine = st.getValue("CLIENT_MACHINE");
this.newBlock = Boolean.parseBoolean(st.getValue("NEWBLOCK"));
readRpcIdsFromXml(st);
}
}
static class AddBlockOp extends FSEditLogOp {
private String path;
private Block penultimateBlock;
private Block lastBlock;
private AddBlockOp() {
super(OP_ADD_BLOCK);
}
static AddBlockOp getInstance(OpInstanceCache cache) {
return (AddBlockOp) cache.get(OP_ADD_BLOCK);
}
@Override
void resetSubFields() {
path = null;
penultimateBlock = null;
lastBlock = null;
}
AddBlockOp setPath(String path) {
this.path = path;
return this;
}
public String getPath() {
return path;
}
AddBlockOp setPenultimateBlock(Block pBlock) {
this.penultimateBlock = pBlock;
return this;
}
Block getPenultimateBlock() {
return penultimateBlock;
}
AddBlockOp setLastBlock(Block lastBlock) {
this.lastBlock = lastBlock;
return this;
}
Block getLastBlock() {
return lastBlock;
}
@Override
public void writeFields(DataOutputStream out) throws IOException {
FSImageSerialization.writeString(path, out);
int size = penultimateBlock != null ? 2 : 1;
Block[] blocks = new Block[size];
if (penultimateBlock != null) {
blocks[0] = penultimateBlock;
}
blocks[size - 1] = lastBlock;
FSImageSerialization.writeCompactBlockArray(blocks, out);
// clientId and callId
writeRpcIds(rpcClientId, rpcCallId, out);
}
@Override
void readFields(DataInputStream in, int logVersion) throws IOException {
path = FSImageSerialization.readString(in);
Block[] blocks = FSImageSerialization.readCompactBlockArray(in,
logVersion);
Preconditions.checkState(blocks.length == 2 || blocks.length == 1);
penultimateBlock = blocks.length == 1 ? null : blocks[0];
lastBlock = blocks[blocks.length - 1];
readRpcIds(in, logVersion);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("AddBlockOp [path=")
.append(path)
.append(", penultimateBlock=")
.append(penultimateBlock == null ? "NULL" : penultimateBlock)
.append(", lastBlock=")
.append(lastBlock);
appendRpcIdsToString(sb, rpcClientId, rpcCallId);
sb.append("]");
return sb.toString();
}
@Override
protected void toXml(ContentHandler contentHandler) throws SAXException {
XMLUtils.addSaxString(contentHandler, "PATH", path);
if (penultimateBlock != null) {
FSEditLogOp.blockToXml(contentHandler, penultimateBlock);
}
FSEditLogOp.blockToXml(contentHandler, lastBlock);
appendRpcIdsToXml(contentHandler, rpcClientId, rpcCallId);
}
@Override
void fromXml(Stanza st) throws InvalidXmlException {
this.path = st.getValue("PATH");
List<Stanza> blocks = st.getChildren("BLOCK");
int size = blocks.size();
Preconditions.checkState(size == 1 || size == 2);
this.penultimateBlock = size == 2 ?
FSEditLogOp.blockFromXml(blocks.get(0)) : null;
this.lastBlock = FSEditLogOp.blockFromXml(blocks.get(size - 1));
readRpcIdsFromXml(st);
}
}
/**
* {@literal @AtMostOnce} for {@link ClientProtocol#updatePipeline}, but
* {@literal @Idempotent} for some other ops.
*/
static class UpdateBlocksOp extends FSEditLogOp implements BlockListUpdatingOp {
String path;
Block[] blocks;
private UpdateBlocksOp() {
super(OP_UPDATE_BLOCKS);
}
static UpdateBlocksOp getInstance(OpInstanceCache cache) {
return (UpdateBlocksOp)cache.get(OP_UPDATE_BLOCKS);
}
@Override
void resetSubFields() {
path = null;
blocks = null;
}
UpdateBlocksOp setPath(String path) {
this.path = path;
return this;
}
@Override
public String getPath() {
return path;
}
UpdateBlocksOp setBlocks(Block[] blocks) {
this.blocks = blocks;
return this;
}
@Override
public Block[] getBlocks() {
return blocks;
}
@Override
public
void writeFields(DataOutputStream out) throws IOException {
FSImageSerialization.writeString(path, out);
FSImageSerialization.writeCompactBlockArray(blocks, out);
// clientId and callId
writeRpcIds(rpcClientId, rpcCallId, out);
}
@Override
void readFields(DataInputStream in, int logVersion) throws IOException {
path = FSImageSerialization.readString(in);
this.blocks = FSImageSerialization.readCompactBlockArray(
in, logVersion);
readRpcIds(in, logVersion);
}
@Override
public boolean shouldCompleteLastBlock() {
return false;
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("UpdateBlocksOp [path=")
.append(path)
.append(", blocks=")
.append(Arrays.toString(blocks));
appendRpcIdsToString(sb, rpcClientId, rpcCallId);
sb.append("]");
return sb.toString();
}
@Override
protected void toXml(ContentHandler contentHandler) throws SAXException {
XMLUtils.addSaxString(contentHandler, "PATH", path);
for (Block b : blocks) {
FSEditLogOp.blockToXml(contentHandler, b);
}
appendRpcIdsToXml(contentHandler, rpcClientId, rpcCallId);
}
@Override void fromXml(Stanza st) throws InvalidXmlException {
this.path = st.getValue("PATH");
List<Stanza> blocks = st.getChildren("BLOCK");
this.blocks = new Block[blocks.size()];
for (int i = 0; i < blocks.size(); i++) {
this.blocks[i] = FSEditLogOp.blockFromXml(blocks.get(i));
}
readRpcIdsFromXml(st);
}
}
/** {@literal @Idempotent} for {@link ClientProtocol#setReplication} */
static class SetReplicationOp extends FSEditLogOp {
String path;
short replication;
private SetReplicationOp() {
super(OP_SET_REPLICATION);
}
static SetReplicationOp getInstance(OpInstanceCache cache) {
return (SetReplicationOp)cache.get(OP_SET_REPLICATION);
}
@Override
void resetSubFields() {
path = null;
replication = 0;
}
SetReplicationOp setPath(String path) {
this.path = path;
return this;
}
SetReplicationOp setReplication(short replication) {
this.replication = replication;
return this;
}
@Override
public
void writeFields(DataOutputStream out) throws IOException {
FSImageSerialization.writeString(path, out);
FSImageSerialization.writeShort(replication, out);
}
@Override
void readFields(DataInputStream in, int logVersion)
throws IOException {
this.path = FSImageSerialization.readString(in);
if (NameNodeLayoutVersion.supports(
LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
this.replication = FSImageSerialization.readShort(in);
} else {
this.replication = readShort(in);
}
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("SetReplicationOp [path=");
builder.append(path);
builder.append(", replication=");
builder.append(replication);
builder.append(", opCode=");
builder.append(opCode);
builder.append(", txid=");
builder.append(txid);
builder.append("]");
return builder.toString();
}
@Override
protected void toXml(ContentHandler contentHandler) throws SAXException {
XMLUtils.addSaxString(contentHandler, "PATH", path);
XMLUtils.addSaxString(contentHandler, "REPLICATION",
Short.valueOf(replication).toString());
}
@Override void fromXml(Stanza st) throws InvalidXmlException {
this.path = st.getValue("PATH");
this.replication = Short.valueOf(st.getValue("REPLICATION"));
}
}
/** {@literal @AtMostOnce} for {@link ClientProtocol#concat} */
static class ConcatDeleteOp extends FSEditLogOp {
int length;
String trg;
String[] srcs;
long timestamp;
final static public int MAX_CONCAT_SRC = 1024 * 1024;
private ConcatDeleteOp() {
super(OP_CONCAT_DELETE);
}
static ConcatDeleteOp getInstance(OpInstanceCache cache) {
return (ConcatDeleteOp)cache.get(OP_CONCAT_DELETE);
}
@Override
void resetSubFields() {
length = 0;
trg = null;
srcs = null;
timestamp = 0L;
}
ConcatDeleteOp setTarget(String trg) {
this.trg = trg;
return this;
}
ConcatDeleteOp setSources(String[] srcs) {
if (srcs.length > MAX_CONCAT_SRC) {
throw new RuntimeException("ConcatDeleteOp can only have " +
MAX_CONCAT_SRC + " sources at most.");
}
this.srcs = srcs;
return this;
}
ConcatDeleteOp setTimestamp(long timestamp) {
this.timestamp = timestamp;
return this;
}
@Override
public void writeFields(DataOutputStream out) throws IOException {
FSImageSerialization.writeString(trg, out);
DeprecatedUTF8 info[] = new DeprecatedUTF8[srcs.length];
int idx = 0;
for(int i=0; i<srcs.length; i++) {
info[idx++] = new DeprecatedUTF8(srcs[i]);
}
new ArrayWritable(DeprecatedUTF8.class, info).write(out);
FSImageSerialization.writeLong(timestamp, out);
// rpc ids
writeRpcIds(rpcClientId, rpcCallId, out);
}
@Override
void readFields(DataInputStream in, int logVersion)
throws IOException {
if (!NameNodeLayoutVersion.supports(
LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
this.length = in.readInt();
if (length < 3) { // trg, srcs.., timestamp
throw new IOException("Incorrect data format " +
"for ConcatDeleteOp.");
}
}
this.trg = FSImageSerialization.readString(in);
int srcSize = 0;
if (NameNodeLayoutVersion.supports(
LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
srcSize = in.readInt();
} else {
srcSize = this.length - 1 - 1; // trg and timestamp
}
if (srcSize < 0) {
throw new IOException("Incorrect data format. "
+ "ConcatDeleteOp cannot have a negative number of data " +
" sources.");
} else if (srcSize > MAX_CONCAT_SRC) {
throw new IOException("Incorrect data format. "
+ "ConcatDeleteOp can have at most " + MAX_CONCAT_SRC +
" sources, but we tried to have " + (length - 3) + " sources.");
}
this.srcs = new String [srcSize];
for(int i=0; i<srcSize;i++) {
srcs[i]= FSImageSerialization.readString(in);
}
if (NameNodeLayoutVersion.supports(
LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
this.timestamp = FSImageSerialization.readLong(in);
} else {
this.timestamp = readLong(in);
}
// read RPC ids if necessary
readRpcIds(in, logVersion);
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("ConcatDeleteOp [length=");
builder.append(length);
builder.append(", trg=");
builder.append(trg);
builder.append(", srcs=");
builder.append(Arrays.toString(srcs));
builder.append(", timestamp=");
builder.append(timestamp);
appendRpcIdsToString(builder, rpcClientId, rpcCallId);
builder.append(", opCode=");
builder.append(opCode);
builder.append(", txid=");
builder.append(txid);
builder.append("]");
return builder.toString();
}
@Override
protected void toXml(ContentHandler contentHandler) throws SAXException {
XMLUtils.addSaxString(contentHandler, "LENGTH",
Integer.toString(length));
XMLUtils.addSaxString(contentHandler, "TRG", trg);
XMLUtils.addSaxString(contentHandler, "TIMESTAMP",
Long.toString(timestamp));
contentHandler.startElement("", "", "SOURCES", new AttributesImpl());
for (int i = 0; i < srcs.length; ++i) {
XMLUtils.addSaxString(contentHandler,
"SOURCE" + (i + 1), srcs[i]);
}
contentHandler.endElement("", "", "SOURCES");
appendRpcIdsToXml(contentHandler, rpcClientId, rpcCallId);
}
@Override void fromXml(Stanza st) throws InvalidXmlException {
this.length = Integer.parseInt(st.getValue("LENGTH"));
this.trg = st.getValue("TRG");
this.timestamp = Long.parseLong(st.getValue("TIMESTAMP"));
List<Stanza> sources = st.getChildren("SOURCES");
int i = 0;
while (true) {
if (!sources.get(0).hasChildren("SOURCE" + (i + 1)))
break;
i++;
}
srcs = new String[i];
for (i = 0; i < srcs.length; i++) {
srcs[i] = sources.get(0).getValue("SOURCE" + (i + 1));
}
readRpcIdsFromXml(st);
}
}
/** {@literal @AtMostOnce} for {@link ClientProtocol#rename} */
static class RenameOldOp extends FSEditLogOp {
int length;
String src;
String dst;
long timestamp;
private RenameOldOp() {
super(OP_RENAME_OLD);
}
static RenameOldOp getInstance(OpInstanceCache cache) {
return (RenameOldOp)cache.get(OP_RENAME_OLD);
}
@Override
void resetSubFields() {
length = 0;
src = null;
dst = null;
timestamp = 0L;
}
RenameOldOp setSource(String src) {
this.src = src;
return this;
}
RenameOldOp setDestination(String dst) {
this.dst = dst;
return this;
}
RenameOldOp setTimestamp(long timestamp) {
this.timestamp = timestamp;
return this;
}
@Override
public
void writeFields(DataOutputStream out) throws IOException {
FSImageSerialization.writeString(src, out);
FSImageSerialization.writeString(dst, out);
FSImageSerialization.writeLong(timestamp, out);
writeRpcIds(rpcClientId, rpcCallId, out);
}
@Override
void readFields(DataInputStream in, int logVersion)
throws IOException {
if (!NameNodeLayoutVersion.supports(
LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
this.length = in.readInt();
if (this.length != 3) {
throw new IOException("Incorrect data format. "
+ "Old rename operation.");
}
}
this.src = FSImageSerialization.readString(in);
this.dst = FSImageSerialization.readString(in);
if (NameNodeLayoutVersion.supports(
LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
this.timestamp = FSImageSerialization.readLong(in);
} else {
this.timestamp = readLong(in);
}
// read RPC ids if necessary
readRpcIds(in, logVersion);
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("RenameOldOp [length=");
builder.append(length);
builder.append(", src=");
builder.append(src);
builder.append(", dst=");
builder.append(dst);
builder.append(", timestamp=");
builder.append(timestamp);
appendRpcIdsToString(builder, rpcClientId, rpcCallId);
builder.append(", opCode=");
builder.append(opCode);
builder.append(", txid=");
builder.append(txid);
builder.append("]");
return builder.toString();
}
@Override
protected void toXml(ContentHandler contentHandler) throws SAXException {
XMLUtils.addSaxString(contentHandler, "LENGTH",
Integer.toString(length));
XMLUtils.addSaxString(contentHandler, "SRC", src);
XMLUtils.addSaxString(contentHandler, "DST", dst);
XMLUtils.addSaxString(contentHandler, "TIMESTAMP",
Long.toString(timestamp));
appendRpcIdsToXml(contentHandler, rpcClientId, rpcCallId);
}
@Override
void fromXml(Stanza st) throws InvalidXmlException {
this.length = Integer.parseInt(st.getValue("LENGTH"));
this.src = st.getValue("SRC");
this.dst = st.getValue("DST");
this.timestamp = Long.parseLong(st.getValue("TIMESTAMP"));
readRpcIdsFromXml(st);
}
}
/** {@literal @AtMostOnce} for {@link ClientProtocol#delete} */
static class DeleteOp extends FSEditLogOp {
int length;
String path;
long timestamp;
private DeleteOp() {
super(OP_DELETE);
}
static DeleteOp getInstance(OpInstanceCache cache) {
return (DeleteOp)cache.get(OP_DELETE);
}
@Override
void resetSubFields() {
length = 0;
path = null;
timestamp = 0L;
}
DeleteOp setPath(String path) {
this.path = path;
return this;
}
DeleteOp setTimestamp(long timestamp) {
this.timestamp = timestamp;
return this;
}
@Override
public
void writeFields(DataOutputStream out) throws IOException {
FSImageSerialization.writeString(path, out);
FSImageSerialization.writeLong(timestamp, out);
writeRpcIds(rpcClientId, rpcCallId, out);
}
@Override
void readFields(DataInputStream in, int logVersion)
throws IOException {
if (!NameNodeLayoutVersion.supports(
LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
this.length = in.readInt();
if (this.length != 2) {
throw new IOException("Incorrect data format. " + "delete operation.");
}
}
this.path = FSImageSerialization.readString(in);
if (NameNodeLayoutVersion.supports(
LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
this.timestamp = FSImageSerialization.readLong(in);
} else {
this.timestamp = readLong(in);
}
// read RPC ids if necessary
readRpcIds(in, logVersion);
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("DeleteOp [length=");
builder.append(length);
builder.append(", path=");
builder.append(path);
builder.append(", timestamp=");
builder.append(timestamp);
appendRpcIdsToString(builder, rpcClientId, rpcCallId);
builder.append(", opCode=");
builder.append(opCode);
builder.append(", txid=");
builder.append(txid);
builder.append("]");
return builder.toString();
}
@Override
protected void toXml(ContentHandler contentHandler) throws SAXException {
XMLUtils.addSaxString(contentHandler, "LENGTH",
Integer.toString(length));
XMLUtils.addSaxString(contentHandler, "PATH", path);
XMLUtils.addSaxString(contentHandler, "TIMESTAMP",
Long.toString(timestamp));
appendRpcIdsToXml(contentHandler, rpcClientId, rpcCallId);
}
@Override void fromXml(Stanza st) throws InvalidXmlException {
this.length = Integer.parseInt(st.getValue("LENGTH"));
this.path = st.getValue("PATH");
this.timestamp = Long.parseLong(st.getValue("TIMESTAMP"));
readRpcIdsFromXml(st);
}
}
/** {@literal @Idempotent} for {@link ClientProtocol#mkdirs} */
static class MkdirOp extends FSEditLogOp {
int length;
long inodeId;
String path;
long timestamp;
PermissionStatus permissions;
List<AclEntry> aclEntries;
List<XAttr> xAttrs;
private MkdirOp() {
super(OP_MKDIR);
}
static MkdirOp getInstance(OpInstanceCache cache) {
return (MkdirOp)cache.get(OP_MKDIR);
}
@Override
void resetSubFields() {
length = 0;
inodeId = 0L;
path = null;
timestamp = 0L;
permissions = null;
aclEntries = null;
xAttrs = null;
}
MkdirOp setInodeId(long inodeId) {
this.inodeId = inodeId;
return this;
}
MkdirOp setPath(String path) {
this.path = path;
return this;
}
MkdirOp setTimestamp(long timestamp) {
this.timestamp = timestamp;
return this;
}
MkdirOp setPermissionStatus(PermissionStatus permissions) {
this.permissions = permissions;
return this;
}
MkdirOp setAclEntries(List<AclEntry> aclEntries) {
this.aclEntries = aclEntries;
return this;
}
MkdirOp setXAttrs(List<XAttr> xAttrs) {
this.xAttrs = xAttrs;
return this;
}
@Override
public
void writeFields(DataOutputStream out) throws IOException {
FSImageSerialization.writeLong(inodeId, out);
FSImageSerialization.writeString(path, out);
FSImageSerialization.writeLong(timestamp, out); // mtime
FSImageSerialization.writeLong(timestamp, out); // atime, unused at this
permissions.write(out);
AclEditLogUtil.write(aclEntries, out);
XAttrEditLogProto.Builder b = XAttrEditLogProto.newBuilder();
b.addAllXAttrs(PBHelper.convertXAttrProto(xAttrs));
b.build().writeDelimitedTo(out);
}
@Override
void readFields(DataInputStream in, int logVersion) throws IOException {
if (!NameNodeLayoutVersion.supports(
LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
this.length = in.readInt();
}
if (-17 < logVersion && length != 2 ||
logVersion <= -17 && length != 3
&& !NameNodeLayoutVersion.supports(
LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
throw new IOException("Incorrect data format. Mkdir operation.");
}
if (NameNodeLayoutVersion.supports(
LayoutVersion.Feature.ADD_INODE_ID, logVersion)) {
this.inodeId = FSImageSerialization.readLong(in);
} else {
// This id should be updated when this editLogOp is applied
this.inodeId = HdfsConstants.GRANDFATHER_INODE_ID;
}
this.path = FSImageSerialization.readString(in);
if (NameNodeLayoutVersion.supports(
LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
this.timestamp = FSImageSerialization.readLong(in);
} else {
this.timestamp = readLong(in);
}
// The disk format stores atimes for directories as well.
// However, currently this is not being updated/used because of
// performance reasons.
if (NameNodeLayoutVersion.supports(
LayoutVersion.Feature.FILE_ACCESS_TIME, logVersion)) {
if (NameNodeLayoutVersion.supports(
LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
FSImageSerialization.readLong(in);
} else {
readLong(in);
}
}
this.permissions = PermissionStatus.read(in);
aclEntries = AclEditLogUtil.read(in, logVersion);
xAttrs = readXAttrsFromEditLog(in, logVersion);
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("MkdirOp [length=");
builder.append(length);
builder.append(", inodeId=");
builder.append(inodeId);
builder.append(", path=");
builder.append(path);
builder.append(", timestamp=");
builder.append(timestamp);
builder.append(", permissions=");
builder.append(permissions);
builder.append(", aclEntries=");
builder.append(aclEntries);
builder.append(", opCode=");
builder.append(opCode);
builder.append(", txid=");
builder.append(txid);
builder.append(", xAttrs=");
builder.append(xAttrs);
builder.append("]");
return builder.toString();
}
@Override
protected void toXml(ContentHandler contentHandler) throws SAXException {
XMLUtils.addSaxString(contentHandler, "LENGTH",
Integer.toString(length));
XMLUtils.addSaxString(contentHandler, "INODEID",
Long.toString(inodeId));
XMLUtils.addSaxString(contentHandler, "PATH", path);
XMLUtils.addSaxString(contentHandler, "TIMESTAMP",
Long.toString(timestamp));
FSEditLogOp.permissionStatusToXml(contentHandler, permissions);
if (aclEntries != null) {
appendAclEntriesToXml(contentHandler, aclEntries);
}
if (xAttrs != null) {
appendXAttrsToXml(contentHandler, xAttrs);
}
}
@Override void fromXml(Stanza st) throws InvalidXmlException {
this.length = Integer.parseInt(st.getValue("LENGTH"));
this.inodeId = Long.parseLong(st.getValue("INODEID"));
this.path = st.getValue("PATH");
this.timestamp = Long.parseLong(st.getValue("TIMESTAMP"));
this.permissions = permissionStatusFromXml(st);
aclEntries = readAclEntriesFromXml(st);
xAttrs = readXAttrsFromXml(st);
}
}
/**
* The corresponding operations are either {@literal @Idempotent} (
* {@link ClientProtocol#updateBlockForPipeline},
* {@link ClientProtocol#recoverLease}, {@link ClientProtocol#addBlock}) or
* already bound with other editlog op which records rpc ids (
* {@link ClientProtocol#create}). Thus no need to record rpc ids here.
*/
static class SetGenstampV1Op extends FSEditLogOp {
long genStampV1;
private SetGenstampV1Op() {
super(OP_SET_GENSTAMP_V1);
}
static SetGenstampV1Op getInstance(OpInstanceCache cache) {
return (SetGenstampV1Op)cache.get(OP_SET_GENSTAMP_V1);
}
@Override
void resetSubFields() {
genStampV1 = 0L;
}
SetGenstampV1Op setGenerationStamp(long genStamp) {
this.genStampV1 = genStamp;
return this;
}
@Override
public
void writeFields(DataOutputStream out) throws IOException {
FSImageSerialization.writeLong(genStampV1, out);
}
@Override
void readFields(DataInputStream in, int logVersion)
throws IOException {
this.genStampV1 = FSImageSerialization.readLong(in);
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("SetGenstampOp [GenStamp=");
builder.append(genStampV1);
builder.append(", opCode=");
builder.append(opCode);
builder.append(", txid=");
builder.append(txid);
builder.append("]");
return builder.toString();
}
@Override
protected void toXml(ContentHandler contentHandler) throws SAXException {
XMLUtils.addSaxString(contentHandler, "GENSTAMP",
Long.toString(genStampV1));
}
@Override void fromXml(Stanza st) throws InvalidXmlException {
this.genStampV1 = Long.parseLong(st.getValue("GENSTAMP"));
}
}
/** Similar with {@link SetGenstampV1Op} */
static class SetGenstampV2Op extends FSEditLogOp {
long genStampV2;
private SetGenstampV2Op() {
super(OP_SET_GENSTAMP_V2);
}
static SetGenstampV2Op getInstance(OpInstanceCache cache) {
return (SetGenstampV2Op)cache.get(OP_SET_GENSTAMP_V2);
}
@Override
void resetSubFields() {
genStampV2 = 0L;
}
SetGenstampV2Op setGenerationStamp(long genStamp) {
this.genStampV2 = genStamp;
return this;
}
@Override
public
void writeFields(DataOutputStream out) throws IOException {
FSImageSerialization.writeLong(genStampV2, out);
}
@Override
void readFields(DataInputStream in, int logVersion)
throws IOException {
this.genStampV2 = FSImageSerialization.readLong(in);
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("SetGenstampV2Op [GenStampV2=");
builder.append(genStampV2);
builder.append(", opCode=");
builder.append(opCode);
builder.append(", txid=");
builder.append(txid);
builder.append("]");
return builder.toString();
}
@Override
protected void toXml(ContentHandler contentHandler) throws SAXException {
XMLUtils.addSaxString(contentHandler, "GENSTAMPV2",
Long.toString(genStampV2));
}
@Override void fromXml(Stanza st) throws InvalidXmlException {
this.genStampV2 = Long.parseLong(st.getValue("GENSTAMPV2"));
}
}
/** {@literal @Idempotent} for {@link ClientProtocol#addBlock} */
static class AllocateBlockIdOp extends FSEditLogOp {
long blockId;
private AllocateBlockIdOp() {
super(OP_ALLOCATE_BLOCK_ID);
}
static AllocateBlockIdOp getInstance(OpInstanceCache cache) {
return (AllocateBlockIdOp)cache.get(OP_ALLOCATE_BLOCK_ID);
}
@Override
void resetSubFields() {
blockId = 0L;
}
AllocateBlockIdOp setBlockId(long blockId) {
this.blockId = blockId;
return this;
}
@Override
public
void writeFields(DataOutputStream out) throws IOException {
FSImageSerialization.writeLong(blockId, out);
}
@Override
void readFields(DataInputStream in, int logVersion)
throws IOException {
this.blockId = FSImageSerialization.readLong(in);
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("AllocateBlockIdOp [blockId=");
builder.append(blockId);
builder.append(", opCode=");
builder.append(opCode);
builder.append(", txid=");
builder.append(txid);
builder.append("]");
return builder.toString();
}
@Override
protected void toXml(ContentHandler contentHandler) throws SAXException {
XMLUtils.addSaxString(contentHandler, "BLOCK_ID",
Long.toString(blockId));
}
@Override void fromXml(Stanza st) throws InvalidXmlException {
this.blockId = Long.parseLong(st.getValue("BLOCK_ID"));
}
}
/** {@literal @Idempotent} for {@link ClientProtocol#setPermission} */
static class SetPermissionsOp extends FSEditLogOp {
String src;
FsPermission permissions;
private SetPermissionsOp() {
super(OP_SET_PERMISSIONS);
}
static SetPermissionsOp getInstance(OpInstanceCache cache) {
return (SetPermissionsOp)cache.get(OP_SET_PERMISSIONS);
}
@Override
void resetSubFields() {
src = null;
permissions = null;
}
SetPermissionsOp setSource(String src) {
this.src = src;
return this;
}
SetPermissionsOp setPermissions(FsPermission permissions) {
this.permissions = permissions;
return this;
}
@Override
public
void writeFields(DataOutputStream out) throws IOException {
FSImageSerialization.writeString(src, out);
permissions.write(out);
}
@Override
void readFields(DataInputStream in, int logVersion)
throws IOException {
this.src = FSImageSerialization.readString(in);
this.permissions = FsPermission.read(in);
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("SetPermissionsOp [src=");
builder.append(src);
builder.append(", permissions=");
builder.append(permissions);
builder.append(", opCode=");
builder.append(opCode);
builder.append(", txid=");
builder.append(txid);
builder.append("]");
return builder.toString();
}
@Override
protected void toXml(ContentHandler contentHandler) throws SAXException {
XMLUtils.addSaxString(contentHandler, "SRC", src);
XMLUtils.addSaxString(contentHandler, "MODE",
Short.valueOf(permissions.toShort()).toString());
}
@Override void fromXml(Stanza st) throws InvalidXmlException {
this.src = st.getValue("SRC");
this.permissions = new FsPermission(
Short.valueOf(st.getValue("MODE")));
}
}
/** {@literal @Idempotent} for {@link ClientProtocol#setOwner} */
static class SetOwnerOp extends FSEditLogOp {
String src;
String username;
String groupname;
private SetOwnerOp() {
super(OP_SET_OWNER);
}
static SetOwnerOp getInstance(OpInstanceCache cache) {
return (SetOwnerOp)cache.get(OP_SET_OWNER);
}
@Override
void resetSubFields() {
src = null;
username = null;
groupname = null;
}
SetOwnerOp setSource(String src) {
this.src = src;
return this;
}
SetOwnerOp setUser(String username) {
this.username = username;
return this;
}
SetOwnerOp setGroup(String groupname) {
this.groupname = groupname;
return this;
}
@Override
public
void writeFields(DataOutputStream out) throws IOException {
FSImageSerialization.writeString(src, out);
FSImageSerialization.writeString(username == null ? "" : username, out);
FSImageSerialization.writeString(groupname == null ? "" : groupname, out);
}
@Override
void readFields(DataInputStream in, int logVersion)
throws IOException {
this.src = FSImageSerialization.readString(in);
this.username = FSImageSerialization.readString_EmptyAsNull(in);
this.groupname = FSImageSerialization.readString_EmptyAsNull(in);
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("SetOwnerOp [src=");
builder.append(src);
builder.append(", username=");
builder.append(username);
builder.append(", groupname=");
builder.append(groupname);
builder.append(", opCode=");
builder.append(opCode);
builder.append(", txid=");
builder.append(txid);
builder.append("]");
return builder.toString();
}
@Override
protected void toXml(ContentHandler contentHandler) throws SAXException {
XMLUtils.addSaxString(contentHandler, "SRC", src);
if (username != null) {
XMLUtils.addSaxString(contentHandler, "USERNAME", username);
}
if (groupname != null) {
XMLUtils.addSaxString(contentHandler, "GROUPNAME", groupname);
}
}
@Override void fromXml(Stanza st) throws InvalidXmlException {
this.src = st.getValue("SRC");
this.username = (st.hasChildren("USERNAME")) ?
st.getValue("USERNAME") : null;
this.groupname = (st.hasChildren("GROUPNAME")) ?
st.getValue("GROUPNAME") : null;
}
}
static class SetNSQuotaOp extends FSEditLogOp {
String src;
long nsQuota;
private SetNSQuotaOp() {
super(OP_SET_NS_QUOTA);
}
static SetNSQuotaOp getInstance(OpInstanceCache cache) {
return (SetNSQuotaOp)cache.get(OP_SET_NS_QUOTA);
}
@Override
void resetSubFields() {
src = null;
nsQuota = 0L;
}
@Override
public
void writeFields(DataOutputStream out) throws IOException {
throw new IOException("Deprecated");
}
@Override
void readFields(DataInputStream in, int logVersion)
throws IOException {
this.src = FSImageSerialization.readString(in);
this.nsQuota = FSImageSerialization.readLong(in);
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("SetNSQuotaOp [src=");
builder.append(src);
builder.append(", nsQuota=");
builder.append(nsQuota);
builder.append(", opCode=");
builder.append(opCode);
builder.append(", txid=");
builder.append(txid);
builder.append("]");
return builder.toString();
}
@Override
protected void toXml(ContentHandler contentHandler) throws SAXException {
XMLUtils.addSaxString(contentHandler, "SRC", src);
XMLUtils.addSaxString(contentHandler, "NSQUOTA",
Long.toString(nsQuota));
}
@Override void fromXml(Stanza st) throws InvalidXmlException {
this.src = st.getValue("SRC");
this.nsQuota = Long.parseLong(st.getValue("NSQUOTA"));
}
}
static class ClearNSQuotaOp extends FSEditLogOp {
String src;
private ClearNSQuotaOp() {
super(OP_CLEAR_NS_QUOTA);
}
static ClearNSQuotaOp getInstance(OpInstanceCache cache) {
return (ClearNSQuotaOp)cache.get(OP_CLEAR_NS_QUOTA);
}
@Override
void resetSubFields() {
src = null;
}
@Override
public
void writeFields(DataOutputStream out) throws IOException {
throw new IOException("Deprecated");
}
@Override
void readFields(DataInputStream in, int logVersion)
throws IOException {
this.src = FSImageSerialization.readString(in);
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("ClearNSQuotaOp [src=");
builder.append(src);
builder.append(", opCode=");
builder.append(opCode);
builder.append(", txid=");
builder.append(txid);
builder.append("]");
return builder.toString();
}
@Override
protected void toXml(ContentHandler contentHandler) throws SAXException {
XMLUtils.addSaxString(contentHandler, "SRC", src);
}
@Override void fromXml(Stanza st) throws InvalidXmlException {
this.src = st.getValue("SRC");
}
}
/** {@literal @Idempotent} for {@link ClientProtocol#setQuota} */
static class SetQuotaOp extends FSEditLogOp {
String src;
long nsQuota;
long dsQuota;
private SetQuotaOp() {
super(OP_SET_QUOTA);
}
static SetQuotaOp getInstance(OpInstanceCache cache) {
return (SetQuotaOp)cache.get(OP_SET_QUOTA);
}
@Override
void resetSubFields() {
src = null;
nsQuota = 0L;
dsQuota = 0L;
}
SetQuotaOp setSource(String src) {
this.src = src;
return this;
}
SetQuotaOp setNSQuota(long nsQuota) {
this.nsQuota = nsQuota;
return this;
}
SetQuotaOp setDSQuota(long dsQuota) {
this.dsQuota = dsQuota;
return this;
}
@Override
public
void writeFields(DataOutputStream out) throws IOException {
FSImageSerialization.writeString(src, out);
FSImageSerialization.writeLong(nsQuota, out);
FSImageSerialization.writeLong(dsQuota, out);
}
@Override
void readFields(DataInputStream in, int logVersion)
throws IOException {
this.src = FSImageSerialization.readString(in);
this.nsQuota = FSImageSerialization.readLong(in);
this.dsQuota = FSImageSerialization.readLong(in);
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("SetQuotaOp [src=");
builder.append(src);
builder.append(", nsQuota=");
builder.append(nsQuota);
builder.append(", dsQuota=");
builder.append(dsQuota);
builder.append(", opCode=");
builder.append(opCode);
builder.append(", txid=");
builder.append(txid);
builder.append("]");
return builder.toString();
}
@Override
protected void toXml(ContentHandler contentHandler) throws SAXException {
XMLUtils.addSaxString(contentHandler, "SRC", src);
XMLUtils.addSaxString(contentHandler, "NSQUOTA",
Long.toString(nsQuota));
XMLUtils.addSaxString(contentHandler, "DSQUOTA",
Long.toString(dsQuota));
}
@Override void fromXml(Stanza st) throws InvalidXmlException {
this.src = st.getValue("SRC");
this.nsQuota = Long.parseLong(st.getValue("NSQUOTA"));
this.dsQuota = Long.parseLong(st.getValue("DSQUOTA"));
}
}
/** {@literal @Idempotent} for {@link ClientProtocol#setQuota} */
static class SetQuotaByStorageTypeOp extends FSEditLogOp {
String src;
long dsQuota;
StorageType type;
private SetQuotaByStorageTypeOp() {
super(OP_SET_QUOTA_BY_STORAGETYPE);
}
static SetQuotaByStorageTypeOp getInstance(OpInstanceCache cache) {
return (SetQuotaByStorageTypeOp)cache.get(OP_SET_QUOTA_BY_STORAGETYPE);
}
@Override
void resetSubFields() {
src = null;
dsQuota = -1L;
type = StorageType.DEFAULT;
}
SetQuotaByStorageTypeOp setSource(String src) {
this.src = src;
return this;
}
SetQuotaByStorageTypeOp setQuotaByStorageType(long dsQuota, StorageType type) {
this.type = type;
this.dsQuota = dsQuota;
return this;
}
@Override
public
void writeFields(DataOutputStream out) throws IOException {
FSImageSerialization.writeString(src, out);
FSImageSerialization.writeInt(type.ordinal(), out);
FSImageSerialization.writeLong(dsQuota, out);
}
@Override
void readFields(DataInputStream in, int logVersion)
throws IOException {
this.src = FSImageSerialization.readString(in);
this.type = StorageType.parseStorageType(FSImageSerialization.readInt(in));
this.dsQuota = FSImageSerialization.readLong(in);
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("SetTypeQuotaOp [src=");
builder.append(src);
builder.append(", storageType=");
builder.append(type);
builder.append(", dsQuota=");
builder.append(dsQuota);
builder.append(", opCode=");
builder.append(opCode);
builder.append(", txid=");
builder.append(txid);
builder.append("]");
return builder.toString();
}
@Override
protected void toXml(ContentHandler contentHandler) throws SAXException {
XMLUtils.addSaxString(contentHandler, "SRC", src);
XMLUtils.addSaxString(contentHandler, "STORAGETYPE",
Integer.toString(type.ordinal()));
XMLUtils.addSaxString(contentHandler, "DSQUOTA",
Long.toString(dsQuota));
}
@Override void fromXml(Stanza st) throws InvalidXmlException {
this.src = st.getValue("SRC");
this.type = StorageType.parseStorageType(
Integer.parseInt(st.getValue("STORAGETYPE")));
this.dsQuota = Long.parseLong(st.getValue("DSQUOTA"));
}
}
/** {@literal @Idempotent} for {@link ClientProtocol#setTimes} */
static class TimesOp extends FSEditLogOp {
int length;
String path;
long mtime;
long atime;
private TimesOp() {
super(OP_TIMES);
}
static TimesOp getInstance(OpInstanceCache cache) {
return (TimesOp)cache.get(OP_TIMES);
}
@Override
void resetSubFields() {
length = 0;
path = null;
mtime = 0L;
atime = 0L;
}
TimesOp setPath(String path) {
this.path = path;
return this;
}
TimesOp setModificationTime(long mtime) {
this.mtime = mtime;
return this;
}
TimesOp setAccessTime(long atime) {
this.atime = atime;
return this;
}
@Override
public
void writeFields(DataOutputStream out) throws IOException {
FSImageSerialization.writeString(path, out);
FSImageSerialization.writeLong(mtime, out);
FSImageSerialization.writeLong(atime, out);
}
@Override
void readFields(DataInputStream in, int logVersion)
throws IOException {
if (!NameNodeLayoutVersion.supports(
LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
this.length = in.readInt();
if (length != 3) {
throw new IOException("Incorrect data format. " + "times operation.");
}
}
this.path = FSImageSerialization.readString(in);
if (NameNodeLayoutVersion.supports(
LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
this.mtime = FSImageSerialization.readLong(in);
this.atime = FSImageSerialization.readLong(in);
} else {
this.mtime = readLong(in);
this.atime = readLong(in);
}
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("TimesOp [length=");
builder.append(length);
builder.append(", path=");
builder.append(path);
builder.append(", mtime=");
builder.append(mtime);
builder.append(", atime=");
builder.append(atime);
builder.append(", opCode=");
builder.append(opCode);
builder.append(", txid=");
builder.append(txid);
builder.append("]");
return builder.toString();
}
@Override
protected void toXml(ContentHandler contentHandler) throws SAXException {
XMLUtils.addSaxString(contentHandler, "LENGTH",
Integer.toString(length));
XMLUtils.addSaxString(contentHandler, "PATH", path);
XMLUtils.addSaxString(contentHandler, "MTIME",
Long.toString(mtime));
XMLUtils.addSaxString(contentHandler, "ATIME",
Long.toString(atime));
}
@Override void fromXml(Stanza st) throws InvalidXmlException {
this.length = Integer.parseInt(st.getValue("LENGTH"));
this.path = st.getValue("PATH");
this.mtime = Long.parseLong(st.getValue("MTIME"));
this.atime = Long.parseLong(st.getValue("ATIME"));
}
}
/** {@literal @AtMostOnce} for {@link ClientProtocol#createSymlink} */
static class SymlinkOp extends FSEditLogOp {
int length;
long inodeId;
String path;
String value;
long mtime;
long atime;
PermissionStatus permissionStatus;
private SymlinkOp() {
super(OP_SYMLINK);
}
static SymlinkOp getInstance(OpInstanceCache cache) {
return (SymlinkOp)cache.get(OP_SYMLINK);
}
@Override
void resetSubFields() {
length = 0;
inodeId = 0L;
path = null;
value = null;
mtime = 0L;
atime = 0L;
permissionStatus = null;
}
SymlinkOp setId(long inodeId) {
this.inodeId = inodeId;
return this;
}
SymlinkOp setPath(String path) {
this.path = path;
return this;
}
SymlinkOp setValue(String value) {
this.value = value;
return this;
}
SymlinkOp setModificationTime(long mtime) {
this.mtime = mtime;
return this;
}
SymlinkOp setAccessTime(long atime) {
this.atime = atime;
return this;
}
SymlinkOp setPermissionStatus(PermissionStatus permissionStatus) {
this.permissionStatus = permissionStatus;
return this;
}
@Override
public void writeFields(DataOutputStream out) throws IOException {
FSImageSerialization.writeLong(inodeId, out);
FSImageSerialization.writeString(path, out);
FSImageSerialization.writeString(value, out);
FSImageSerialization.writeLong(mtime, out);
FSImageSerialization.writeLong(atime, out);
permissionStatus.write(out);
writeRpcIds(rpcClientId, rpcCallId, out);
}
@Override
void readFields(DataInputStream in, int logVersion)
throws IOException {
if (!NameNodeLayoutVersion.supports(
LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
this.length = in.readInt();
if (this.length != 4) {
throw new IOException("Incorrect data format. "
+ "symlink operation.");
}
}
if (NameNodeLayoutVersion.supports(
LayoutVersion.Feature.ADD_INODE_ID, logVersion)) {
this.inodeId = FSImageSerialization.readLong(in);
} else {
// This id should be updated when the editLogOp is applied
this.inodeId = HdfsConstants.GRANDFATHER_INODE_ID;
}
this.path = FSImageSerialization.readString(in);
this.value = FSImageSerialization.readString(in);
if (NameNodeLayoutVersion.supports(
LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
this.mtime = FSImageSerialization.readLong(in);
this.atime = FSImageSerialization.readLong(in);
} else {
this.mtime = readLong(in);
this.atime = readLong(in);
}
this.permissionStatus = PermissionStatus.read(in);
// read RPC ids if necessary
readRpcIds(in, logVersion);
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("SymlinkOp [length=");
builder.append(length);
builder.append(", inodeId=");
builder.append(inodeId);
builder.append(", path=");
builder.append(path);
builder.append(", value=");
builder.append(value);
builder.append(", mtime=");
builder.append(mtime);
builder.append(", atime=");
builder.append(atime);
builder.append(", permissionStatus=");
builder.append(permissionStatus);
appendRpcIdsToString(builder, rpcClientId, rpcCallId);
builder.append(", opCode=");
builder.append(opCode);
builder.append(", txid=");
builder.append(txid);
builder.append("]");
return builder.toString();
}
@Override
protected void toXml(ContentHandler contentHandler) throws SAXException {
XMLUtils.addSaxString(contentHandler, "LENGTH",
Integer.toString(length));
XMLUtils.addSaxString(contentHandler, "INODEID",
Long.toString(inodeId));
XMLUtils.addSaxString(contentHandler, "PATH", path);
XMLUtils.addSaxString(contentHandler, "VALUE", value);
XMLUtils.addSaxString(contentHandler, "MTIME",
Long.toString(mtime));
XMLUtils.addSaxString(contentHandler, "ATIME",
Long.toString(atime));
FSEditLogOp.permissionStatusToXml(contentHandler, permissionStatus);
appendRpcIdsToXml(contentHandler, rpcClientId, rpcCallId);
}
@Override
void fromXml(Stanza st) throws InvalidXmlException {
this.length = Integer.parseInt(st.getValue("LENGTH"));
this.inodeId = Long.parseLong(st.getValue("INODEID"));
this.path = st.getValue("PATH");
this.value = st.getValue("VALUE");
this.mtime = Long.parseLong(st.getValue("MTIME"));
this.atime = Long.parseLong(st.getValue("ATIME"));
this.permissionStatus = permissionStatusFromXml(st);
readRpcIdsFromXml(st);
}
}
/** {@literal @AtMostOnce} for {@link ClientProtocol#rename2} */
static class RenameOp extends FSEditLogOp {
int length;
String src;
String dst;
long timestamp;
Rename[] options;
private RenameOp() {
super(OP_RENAME);
}
static RenameOp getInstance(OpInstanceCache cache) {
return (RenameOp)cache.get(OP_RENAME);
}
@Override
void resetSubFields() {
length = 0;
src = null;
dst = null;
timestamp = 0L;
options = null;
}
RenameOp setSource(String src) {
this.src = src;
return this;
}
RenameOp setDestination(String dst) {
this.dst = dst;
return this;
}
RenameOp setTimestamp(long timestamp) {
this.timestamp = timestamp;
return this;
}
RenameOp setOptions(Rename[] options) {
this.options = options;
return this;
}
@Override
public
void writeFields(DataOutputStream out) throws IOException {
FSImageSerialization.writeString(src, out);
FSImageSerialization.writeString(dst, out);
FSImageSerialization.writeLong(timestamp, out);
toBytesWritable(options).write(out);
writeRpcIds(rpcClientId, rpcCallId, out);
}
@Override
void readFields(DataInputStream in, int logVersion)
throws IOException {
if (!NameNodeLayoutVersion.supports(
LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
this.length = in.readInt();
if (this.length != 3) {
throw new IOException("Incorrect data format. " + "Rename operation.");
}
}
this.src = FSImageSerialization.readString(in);
this.dst = FSImageSerialization.readString(in);
if (NameNodeLayoutVersion.supports(
LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
this.timestamp = FSImageSerialization.readLong(in);
} else {
this.timestamp = readLong(in);
}
this.options = readRenameOptions(in);
// read RPC ids if necessary
readRpcIds(in, logVersion);
}
private static Rename[] readRenameOptions(DataInputStream in) throws IOException {
BytesWritable writable = new BytesWritable();
writable.readFields(in);
byte[] bytes = writable.getBytes();
Rename[] options = new Rename[bytes.length];
for (int i = 0; i < bytes.length; i++) {
options[i] = Rename.valueOf(bytes[i]);
}
return options;
}
static BytesWritable toBytesWritable(Rename... options) {
byte[] bytes = new byte[options.length];
for (int i = 0; i < options.length; i++) {
bytes[i] = options[i].value();
}
return new BytesWritable(bytes);
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("RenameOp [length=");
builder.append(length);
builder.append(", src=");
builder.append(src);
builder.append(", dst=");
builder.append(dst);
builder.append(", timestamp=");
builder.append(timestamp);
builder.append(", options=");
builder.append(Arrays.toString(options));
appendRpcIdsToString(builder, rpcClientId, rpcCallId);
builder.append(", opCode=");
builder.append(opCode);
builder.append(", txid=");
builder.append(txid);
builder.append("]");
return builder.toString();
}
@Override
protected void toXml(ContentHandler contentHandler) throws SAXException {
XMLUtils.addSaxString(contentHandler, "LENGTH",
Integer.toString(length));
XMLUtils.addSaxString(contentHandler, "SRC", src);
XMLUtils.addSaxString(contentHandler, "DST", dst);
XMLUtils.addSaxString(contentHandler, "TIMESTAMP",
Long.toString(timestamp));
StringBuilder bld = new StringBuilder();
String prefix = "";
for (Rename r : options) {
bld.append(prefix).append(r.toString());
prefix = "|";
}
XMLUtils.addSaxString(contentHandler, "OPTIONS", bld.toString());
appendRpcIdsToXml(contentHandler, rpcClientId, rpcCallId);
}
@Override void fromXml(Stanza st) throws InvalidXmlException {
this.length = Integer.parseInt(st.getValue("LENGTH"));
this.src = st.getValue("SRC");
this.dst = st.getValue("DST");
this.timestamp = Long.parseLong(st.getValue("TIMESTAMP"));
String opts = st.getValue("OPTIONS");
String o[] = opts.split("\\|");
this.options = new Rename[o.length];
for (int i = 0; i < o.length; i++) {
if (o[i].equals(""))
continue;
try {
this.options[i] = Rename.valueOf(o[i]);
} finally {
if (this.options[i] == null) {
System.err.println("error parsing Rename value: \"" + o[i] + "\"");
}
}
}
readRpcIdsFromXml(st);
}
}
static class TruncateOp extends FSEditLogOp {
String src;
String clientName;
String clientMachine;
long newLength;
long timestamp;
Block truncateBlock;
private TruncateOp() {
super(OP_TRUNCATE);
}
static TruncateOp getInstance(OpInstanceCache cache) {
return (TruncateOp)cache.get(OP_TRUNCATE);
}
@Override
void resetSubFields() {
src = null;
clientName = null;
clientMachine = null;
newLength = 0L;
timestamp = 0L;
}
TruncateOp setPath(String src) {
this.src = src;
return this;
}
TruncateOp setClientName(String clientName) {
this.clientName = clientName;
return this;
}
TruncateOp setClientMachine(String clientMachine) {
this.clientMachine = clientMachine;
return this;
}
TruncateOp setNewLength(long newLength) {
this.newLength = newLength;
return this;
}
TruncateOp setTimestamp(long timestamp) {
this.timestamp = timestamp;
return this;
}
TruncateOp setTruncateBlock(Block truncateBlock) {
this.truncateBlock = truncateBlock;
return this;
}
@Override
void readFields(DataInputStream in, int logVersion) throws IOException {
src = FSImageSerialization.readString(in);
clientName = FSImageSerialization.readString(in);
clientMachine = FSImageSerialization.readString(in);
newLength = FSImageSerialization.readLong(in);
timestamp = FSImageSerialization.readLong(in);
Block[] blocks =
FSImageSerialization.readCompactBlockArray(in, logVersion);
assert blocks.length <= 1 : "Truncate op should have 1 or 0 blocks";
truncateBlock = (blocks.length == 0) ? null : blocks[0];
}
@Override
public void writeFields(DataOutputStream out) throws IOException {
FSImageSerialization.writeString(src, out);
FSImageSerialization.writeString(clientName, out);
FSImageSerialization.writeString(clientMachine, out);
FSImageSerialization.writeLong(newLength, out);
FSImageSerialization.writeLong(timestamp, out);
int size = truncateBlock != null ? 1 : 0;
Block[] blocks = new Block[size];
if (truncateBlock != null) {
blocks[0] = truncateBlock;
}
FSImageSerialization.writeCompactBlockArray(blocks, out);
}
@Override
protected void toXml(ContentHandler contentHandler) throws SAXException {
XMLUtils.addSaxString(contentHandler, "SRC", src);
XMLUtils.addSaxString(contentHandler, "CLIENTNAME", clientName);
XMLUtils.addSaxString(contentHandler, "CLIENTMACHINE", clientMachine);
XMLUtils.addSaxString(contentHandler, "NEWLENGTH",
Long.toString(newLength));
XMLUtils.addSaxString(contentHandler, "TIMESTAMP",
Long.toString(timestamp));
if(truncateBlock != null)
FSEditLogOp.blockToXml(contentHandler, truncateBlock);
}
@Override
void fromXml(Stanza st) throws InvalidXmlException {
this.src = st.getValue("SRC");
this.clientName = st.getValue("CLIENTNAME");
this.clientMachine = st.getValue("CLIENTMACHINE");
this.newLength = Long.parseLong(st.getValue("NEWLENGTH"));
this.timestamp = Long.parseLong(st.getValue("TIMESTAMP"));
if (st.hasChildren("BLOCK"))
this.truncateBlock = FSEditLogOp.blockFromXml(st);
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("TruncateOp [src=");
builder.append(src);
builder.append(", clientName=");
builder.append(clientName);
builder.append(", clientMachine=");
builder.append(clientMachine);
builder.append(", newLength=");
builder.append(newLength);
builder.append(", timestamp=");
builder.append(timestamp);
builder.append(", truncateBlock=");
builder.append(truncateBlock);
builder.append(", opCode=");
builder.append(opCode);
builder.append(", txid=");
builder.append(txid);
builder.append("]");
return builder.toString();
}
}
/**
* {@literal @Idempotent} for {@link ClientProtocol#recoverLease}. In the
* meanwhile, startFile and appendFile both have their own corresponding
* editlog op.
*/
static class ReassignLeaseOp extends FSEditLogOp {
String leaseHolder;
String path;
String newHolder;
private ReassignLeaseOp() {
super(OP_REASSIGN_LEASE);
}
static ReassignLeaseOp getInstance(OpInstanceCache cache) {
return (ReassignLeaseOp)cache.get(OP_REASSIGN_LEASE);
}
@Override
void resetSubFields() {
leaseHolder = null;
path = null;
newHolder = null;
}
ReassignLeaseOp setLeaseHolder(String leaseHolder) {
this.leaseHolder = leaseHolder;
return this;
}
ReassignLeaseOp setPath(String path) {
this.path = path;
return this;
}
ReassignLeaseOp setNewHolder(String newHolder) {
this.newHolder = newHolder;
return this;
}
@Override
public
void writeFields(DataOutputStream out) throws IOException {
FSImageSerialization.writeString(leaseHolder, out);
FSImageSerialization.writeString(path, out);
FSImageSerialization.writeString(newHolder, out);
}
@Override
void readFields(DataInputStream in, int logVersion)
throws IOException {
this.leaseHolder = FSImageSerialization.readString(in);
this.path = FSImageSerialization.readString(in);
this.newHolder = FSImageSerialization.readString(in);
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("ReassignLeaseOp [leaseHolder=");
builder.append(leaseHolder);
builder.append(", path=");
builder.append(path);
builder.append(", newHolder=");
builder.append(newHolder);
builder.append(", opCode=");
builder.append(opCode);
builder.append(", txid=");
builder.append(txid);
builder.append("]");
return builder.toString();
}
@Override
protected void toXml(ContentHandler contentHandler) throws SAXException {
XMLUtils.addSaxString(contentHandler, "LEASEHOLDER", leaseHolder);
XMLUtils.addSaxString(contentHandler, "PATH", path);
XMLUtils.addSaxString(contentHandler, "NEWHOLDER", newHolder);
}
@Override void fromXml(Stanza st) throws InvalidXmlException {
this.leaseHolder = st.getValue("LEASEHOLDER");
this.path = st.getValue("PATH");
this.newHolder = st.getValue("NEWHOLDER");
}
}
/** {@literal @Idempotent} for {@link ClientProtocol#getDelegationToken} */
static class GetDelegationTokenOp extends FSEditLogOp {
DelegationTokenIdentifier token;
long expiryTime;
private GetDelegationTokenOp() {
super(OP_GET_DELEGATION_TOKEN);
}
static GetDelegationTokenOp getInstance(OpInstanceCache cache) {
return (GetDelegationTokenOp)cache.get(OP_GET_DELEGATION_TOKEN);
}
@Override
void resetSubFields() {
token = null;
expiryTime = 0L;
}
GetDelegationTokenOp setDelegationTokenIdentifier(
DelegationTokenIdentifier token) {
this.token = token;
return this;
}
GetDelegationTokenOp setExpiryTime(long expiryTime) {
this.expiryTime = expiryTime;
return this;
}
@Override
public
void writeFields(DataOutputStream out) throws IOException {
token.write(out);
FSImageSerialization.writeLong(expiryTime, out);
}
@Override
void readFields(DataInputStream in, int logVersion)
throws IOException {
this.token = new DelegationTokenIdentifier();
this.token.readFields(in);
if (NameNodeLayoutVersion.supports(
LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
this.expiryTime = FSImageSerialization.readLong(in);
} else {
this.expiryTime = readLong(in);
}
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("GetDelegationTokenOp [token=");
builder.append(token);
builder.append(", expiryTime=");
builder.append(expiryTime);
builder.append(", opCode=");
builder.append(opCode);
builder.append(", txid=");
builder.append(txid);
builder.append("]");
return builder.toString();
}
@Override
protected void toXml(ContentHandler contentHandler) throws SAXException {
FSEditLogOp.delegationTokenToXml(contentHandler, token);
XMLUtils.addSaxString(contentHandler, "EXPIRY_TIME",
Long.toString(expiryTime));
}
@Override void fromXml(Stanza st) throws InvalidXmlException {
this.token = delegationTokenFromXml(st.getChildren(
"DELEGATION_TOKEN_IDENTIFIER").get(0));
this.expiryTime = Long.parseLong(st.getValue("EXPIRY_TIME"));
}
}
/** {@literal @Idempotent} for {@link ClientProtocol#renewDelegationToken} */
static class RenewDelegationTokenOp extends FSEditLogOp {
DelegationTokenIdentifier token;
long expiryTime;
private RenewDelegationTokenOp() {
super(OP_RENEW_DELEGATION_TOKEN);
}
static RenewDelegationTokenOp getInstance(OpInstanceCache cache) {
return (RenewDelegationTokenOp)cache.get(OP_RENEW_DELEGATION_TOKEN);
}
@Override
void resetSubFields() {
token = null;
expiryTime = 0L;
}
RenewDelegationTokenOp setDelegationTokenIdentifier(
DelegationTokenIdentifier token) {
this.token = token;
return this;
}
RenewDelegationTokenOp setExpiryTime(long expiryTime) {
this.expiryTime = expiryTime;
return this;
}
@Override
public
void writeFields(DataOutputStream out) throws IOException {
token.write(out);
FSImageSerialization.writeLong(expiryTime, out);
}
@Override
void readFields(DataInputStream in, int logVersion)
throws IOException {
this.token = new DelegationTokenIdentifier();
this.token.readFields(in);
if (NameNodeLayoutVersion.supports(
LayoutVersion.Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
this.expiryTime = FSImageSerialization.readLong(in);
} else {
this.expiryTime = readLong(in);
}
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("RenewDelegationTokenOp [token=");
builder.append(token);
builder.append(", expiryTime=");
builder.append(expiryTime);
builder.append(", opCode=");
builder.append(opCode);
builder.append(", txid=");
builder.append(txid);
builder.append("]");
return builder.toString();
}
@Override
protected void toXml(ContentHandler contentHandler) throws SAXException {
FSEditLogOp.delegationTokenToXml(contentHandler, token);
XMLUtils.addSaxString(contentHandler, "EXPIRY_TIME",
Long.toString(expiryTime));
}
@Override void fromXml(Stanza st) throws InvalidXmlException {
this.token = delegationTokenFromXml(st.getChildren(
"DELEGATION_TOKEN_IDENTIFIER").get(0));
this.expiryTime = Long.parseLong(st.getValue("EXPIRY_TIME"));
}
}
/** {@literal @Idempotent} for {@link ClientProtocol#cancelDelegationToken} */
static class CancelDelegationTokenOp extends FSEditLogOp {
DelegationTokenIdentifier token;
private CancelDelegationTokenOp() {
super(OP_CANCEL_DELEGATION_TOKEN);
}
static CancelDelegationTokenOp getInstance(OpInstanceCache cache) {
return (CancelDelegationTokenOp)cache.get(OP_CANCEL_DELEGATION_TOKEN);
}
@Override
void resetSubFields() {
token = null;
}
CancelDelegationTokenOp setDelegationTokenIdentifier(
DelegationTokenIdentifier token) {
this.token = token;
return this;
}
@Override
public
void writeFields(DataOutputStream out) throws IOException {
token.write(out);
}
@Override
void readFields(DataInputStream in, int logVersion)
throws IOException {
this.token = new DelegationTokenIdentifier();
this.token.readFields(in);
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("CancelDelegationTokenOp [token=");
builder.append(token);
builder.append(", opCode=");
builder.append(opCode);
builder.append(", txid=");
builder.append(txid);
builder.append("]");
return builder.toString();
}
@Override
protected void toXml(ContentHandler contentHandler) throws SAXException {
FSEditLogOp.delegationTokenToXml(contentHandler, token);
}
@Override void fromXml(Stanza st) throws InvalidXmlException {
this.token = delegationTokenFromXml(st.getChildren(
"DELEGATION_TOKEN_IDENTIFIER").get(0));
}
}
static class UpdateMasterKeyOp extends FSEditLogOp {
DelegationKey key;
private UpdateMasterKeyOp() {
super(OP_UPDATE_MASTER_KEY);
}
static UpdateMasterKeyOp getInstance(OpInstanceCache cache) {
return (UpdateMasterKeyOp)cache.get(OP_UPDATE_MASTER_KEY);
}
@Override
void resetSubFields() {
key = null;
}
UpdateMasterKeyOp setDelegationKey(DelegationKey key) {
this.key = key;
return this;
}
@Override
public
void writeFields(DataOutputStream out) throws IOException {
key.write(out);
}
@Override
void readFields(DataInputStream in, int logVersion)
throws IOException {
this.key = new DelegationKey();
this.key.readFields(in);
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("UpdateMasterKeyOp [key=");
builder.append(key);
builder.append(", opCode=");
builder.append(opCode);
builder.append(", txid=");
builder.append(txid);
builder.append("]");
return builder.toString();
}
@Override
protected void toXml(ContentHandler contentHandler) throws SAXException {
FSEditLogOp.delegationKeyToXml(contentHandler, key);
}
@Override void fromXml(Stanza st) throws InvalidXmlException {
this.key = delegationKeyFromXml(st.getChildren(
"DELEGATION_KEY").get(0));
}
}
static class LogSegmentOp extends FSEditLogOp {
private LogSegmentOp(FSEditLogOpCodes code) {
super(code);
assert code == OP_START_LOG_SEGMENT ||
code == OP_END_LOG_SEGMENT : "Bad op: " + code;
}
static LogSegmentOp getInstance(OpInstanceCache cache,
FSEditLogOpCodes code) {
return (LogSegmentOp)cache.get(code);
}
@Override
void resetSubFields() {
// no data stored in these ops yet
}
@Override
public void readFields(DataInputStream in, int logVersion)
throws IOException {
// no data stored in these ops yet
}
@Override
public
void writeFields(DataOutputStream out) throws IOException {
// no data stored
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("LogSegmentOp [opCode=");
builder.append(opCode);
builder.append(", txid=");
builder.append(txid);
builder.append("]");
return builder.toString();
}
@Override
protected void toXml(ContentHandler contentHandler) throws SAXException {
// no data stored
}
@Override void fromXml(Stanza st) throws InvalidXmlException {
// do nothing
}
}
static class InvalidOp extends FSEditLogOp {
private InvalidOp() {
super(OP_INVALID);
}
static InvalidOp getInstance(OpInstanceCache cache) {
return (InvalidOp)cache.get(OP_INVALID);
}
@Override
void resetSubFields() {
}
@Override
public
void writeFields(DataOutputStream out) throws IOException {
}
@Override
void readFields(DataInputStream in, int logVersion)
throws IOException {
// nothing to read
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("InvalidOp [opCode=");
builder.append(opCode);
builder.append(", txid=");
builder.append(txid);
builder.append("]");
return builder.toString();
}
@Override
protected void toXml(ContentHandler contentHandler) throws SAXException {
// no data stored
}
@Override void fromXml(Stanza st) throws InvalidXmlException {
// do nothing
}
}
/**
* Operation corresponding to creating a snapshot.
* {@literal @AtMostOnce} for {@link ClientProtocol#createSnapshot}.
*/
static class CreateSnapshotOp extends FSEditLogOp {
String snapshotRoot;
String snapshotName;
public CreateSnapshotOp() {
super(OP_CREATE_SNAPSHOT);
}
static CreateSnapshotOp getInstance(OpInstanceCache cache) {
return (CreateSnapshotOp)cache.get(OP_CREATE_SNAPSHOT);
}
@Override
void resetSubFields() {
snapshotRoot = null;
snapshotName = null;
}
CreateSnapshotOp setSnapshotName(String snapName) {
this.snapshotName = snapName;
return this;
}
public CreateSnapshotOp setSnapshotRoot(String snapRoot) {
snapshotRoot = snapRoot;
return this;
}
@Override
void readFields(DataInputStream in, int logVersion) throws IOException {
snapshotRoot = FSImageSerialization.readString(in);
snapshotName = FSImageSerialization.readString(in);
// read RPC ids if necessary
readRpcIds(in, logVersion);
}
@Override
public void writeFields(DataOutputStream out) throws IOException {
FSImageSerialization.writeString(snapshotRoot, out);
FSImageSerialization.writeString(snapshotName, out);
writeRpcIds(rpcClientId, rpcCallId, out);
}
@Override
protected void toXml(ContentHandler contentHandler) throws SAXException {
XMLUtils.addSaxString(contentHandler, "SNAPSHOTROOT", snapshotRoot);
XMLUtils.addSaxString(contentHandler, "SNAPSHOTNAME", snapshotName);
appendRpcIdsToXml(contentHandler, rpcClientId, rpcCallId);
}
@Override
void fromXml(Stanza st) throws InvalidXmlException {
snapshotRoot = st.getValue("SNAPSHOTROOT");
snapshotName = st.getValue("SNAPSHOTNAME");
readRpcIdsFromXml(st);
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("CreateSnapshotOp [snapshotRoot=");
builder.append(snapshotRoot);
builder.append(", snapshotName=");
builder.append(snapshotName);
appendRpcIdsToString(builder, rpcClientId, rpcCallId);
builder.append("]");
return builder.toString();
}
}
/**
* Operation corresponding to delete a snapshot.
* {@literal @AtMostOnce} for {@link ClientProtocol#deleteSnapshot}.
*/
static class DeleteSnapshotOp extends FSEditLogOp {
String snapshotRoot;
String snapshotName;
DeleteSnapshotOp() {
super(OP_DELETE_SNAPSHOT);
}
static DeleteSnapshotOp getInstance(OpInstanceCache cache) {
return (DeleteSnapshotOp)cache.get(OP_DELETE_SNAPSHOT);
}
@Override
void resetSubFields() {
snapshotRoot = null;
snapshotName = null;
}
DeleteSnapshotOp setSnapshotName(String snapName) {
this.snapshotName = snapName;
return this;
}
DeleteSnapshotOp setSnapshotRoot(String snapRoot) {
snapshotRoot = snapRoot;
return this;
}
@Override
void readFields(DataInputStream in, int logVersion) throws IOException {
snapshotRoot = FSImageSerialization.readString(in);
snapshotName = FSImageSerialization.readString(in);
// read RPC ids if necessary
readRpcIds(in, logVersion);
}
@Override
public void writeFields(DataOutputStream out) throws IOException {
FSImageSerialization.writeString(snapshotRoot, out);
FSImageSerialization.writeString(snapshotName, out);
writeRpcIds(rpcClientId, rpcCallId, out);
}
@Override
protected void toXml(ContentHandler contentHandler) throws SAXException {
XMLUtils.addSaxString(contentHandler, "SNAPSHOTROOT", snapshotRoot);
XMLUtils.addSaxString(contentHandler, "SNAPSHOTNAME", snapshotName);
appendRpcIdsToXml(contentHandler, rpcClientId, rpcCallId);
}
@Override
void fromXml(Stanza st) throws InvalidXmlException {
snapshotRoot = st.getValue("SNAPSHOTROOT");
snapshotName = st.getValue("SNAPSHOTNAME");
readRpcIdsFromXml(st);
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("DeleteSnapshotOp [snapshotRoot=");
builder.append(snapshotRoot);
builder.append(", snapshotName=");
builder.append(snapshotName);
appendRpcIdsToString(builder, rpcClientId, rpcCallId);
builder.append("]");
return builder.toString();
}
}
/**
* Operation corresponding to rename a snapshot.
* {@literal @AtMostOnce} for {@link ClientProtocol#renameSnapshot}.
*/
static class RenameSnapshotOp extends FSEditLogOp {
String snapshotRoot;
String snapshotOldName;
String snapshotNewName;
RenameSnapshotOp() {
super(OP_RENAME_SNAPSHOT);
}
static RenameSnapshotOp getInstance(OpInstanceCache cache) {
return (RenameSnapshotOp) cache.get(OP_RENAME_SNAPSHOT);
}
@Override
void resetSubFields() {
snapshotRoot = null;
snapshotOldName = null;
snapshotNewName = null;
}
RenameSnapshotOp setSnapshotOldName(String snapshotOldName) {
this.snapshotOldName = snapshotOldName;
return this;
}
RenameSnapshotOp setSnapshotNewName(String snapshotNewName) {
this.snapshotNewName = snapshotNewName;
return this;
}
RenameSnapshotOp setSnapshotRoot(String snapshotRoot) {
this.snapshotRoot = snapshotRoot;
return this;
}
@Override
void readFields(DataInputStream in, int logVersion) throws IOException {
snapshotRoot = FSImageSerialization.readString(in);
snapshotOldName = FSImageSerialization.readString(in);
snapshotNewName = FSImageSerialization.readString(in);
// read RPC ids if necessary
readRpcIds(in, logVersion);
}
@Override
public void writeFields(DataOutputStream out) throws IOException {
FSImageSerialization.writeString(snapshotRoot, out);
FSImageSerialization.writeString(snapshotOldName, out);
FSImageSerialization.writeString(snapshotNewName, out);
writeRpcIds(rpcClientId, rpcCallId, out);
}
@Override
protected void toXml(ContentHandler contentHandler) throws SAXException {
XMLUtils.addSaxString(contentHandler, "SNAPSHOTROOT", snapshotRoot);
XMLUtils.addSaxString(contentHandler, "SNAPSHOTOLDNAME", snapshotOldName);
XMLUtils.addSaxString(contentHandler, "SNAPSHOTNEWNAME", snapshotNewName);
appendRpcIdsToXml(contentHandler, rpcClientId, rpcCallId);
}
@Override
void fromXml(Stanza st) throws InvalidXmlException {
snapshotRoot = st.getValue("SNAPSHOTROOT");
snapshotOldName = st.getValue("SNAPSHOTOLDNAME");
snapshotNewName = st.getValue("SNAPSHOTNEWNAME");
readRpcIdsFromXml(st);
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("RenameSnapshotOp [snapshotRoot=");
builder.append(snapshotRoot);
builder.append(", snapshotOldName=");
builder.append(snapshotOldName);
builder.append(", snapshotNewName=");
builder.append(snapshotNewName);
appendRpcIdsToString(builder, rpcClientId, rpcCallId);
builder.append("]");
return builder.toString();
}
}
/**
* Operation corresponding to allow creating snapshot on a directory
*/
static class AllowSnapshotOp extends FSEditLogOp { // @Idempotent
String snapshotRoot;
public AllowSnapshotOp() {
super(OP_ALLOW_SNAPSHOT);
}
public AllowSnapshotOp(String snapRoot) {
super(OP_ALLOW_SNAPSHOT);
snapshotRoot = snapRoot;
}
static AllowSnapshotOp getInstance(OpInstanceCache cache) {
return (AllowSnapshotOp) cache.get(OP_ALLOW_SNAPSHOT);
}
@Override
void resetSubFields() {
snapshotRoot = null;
}
public AllowSnapshotOp setSnapshotRoot(String snapRoot) {
snapshotRoot = snapRoot;
return this;
}
@Override
void readFields(DataInputStream in, int logVersion) throws IOException {
snapshotRoot = FSImageSerialization.readString(in);
}
@Override
public void writeFields(DataOutputStream out) throws IOException {
FSImageSerialization.writeString(snapshotRoot, out);
}
@Override
protected void toXml(ContentHandler contentHandler) throws SAXException {
XMLUtils.addSaxString(contentHandler, "SNAPSHOTROOT", snapshotRoot);
}
@Override
void fromXml(Stanza st) throws InvalidXmlException {
snapshotRoot = st.getValue("SNAPSHOTROOT");
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("AllowSnapshotOp [snapshotRoot=");
builder.append(snapshotRoot);
builder.append("]");
return builder.toString();
}
}
/**
* Operation corresponding to disallow creating snapshot on a directory
*/
static class DisallowSnapshotOp extends FSEditLogOp { // @Idempotent
String snapshotRoot;
public DisallowSnapshotOp() {
super(OP_DISALLOW_SNAPSHOT);
}
public DisallowSnapshotOp(String snapRoot) {
super(OP_DISALLOW_SNAPSHOT);
snapshotRoot = snapRoot;
}
static DisallowSnapshotOp getInstance(OpInstanceCache cache) {
return (DisallowSnapshotOp) cache.get(OP_DISALLOW_SNAPSHOT);
}
void resetSubFields() {
snapshotRoot = null;
}
public DisallowSnapshotOp setSnapshotRoot(String snapRoot) {
snapshotRoot = snapRoot;
return this;
}
@Override
void readFields(DataInputStream in, int logVersion) throws IOException {
snapshotRoot = FSImageSerialization.readString(in);
}
@Override
public void writeFields(DataOutputStream out) throws IOException {
FSImageSerialization.writeString(snapshotRoot, out);
}
@Override
protected void toXml(ContentHandler contentHandler) throws SAXException {
XMLUtils.addSaxString(contentHandler, "SNAPSHOTROOT", snapshotRoot);
}
@Override
void fromXml(Stanza st) throws InvalidXmlException {
snapshotRoot = st.getValue("SNAPSHOTROOT");
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("DisallowSnapshotOp [snapshotRoot=");
builder.append(snapshotRoot);
builder.append("]");
return builder.toString();
}
}
/**
* {@literal @AtMostOnce} for
* {@link ClientProtocol#addCacheDirective}
*/
static class AddCacheDirectiveInfoOp extends FSEditLogOp {
CacheDirectiveInfo directive;
public AddCacheDirectiveInfoOp() {
super(OP_ADD_CACHE_DIRECTIVE);
}
static AddCacheDirectiveInfoOp getInstance(OpInstanceCache cache) {
return (AddCacheDirectiveInfoOp) cache
.get(OP_ADD_CACHE_DIRECTIVE);
}
@Override
void resetSubFields() {
directive = null;
}
public AddCacheDirectiveInfoOp setDirective(
CacheDirectiveInfo directive) {
this.directive = directive;
assert(directive.getId() != null);
assert(directive.getPath() != null);
assert(directive.getReplication() != null);
assert(directive.getPool() != null);
assert(directive.getExpiration() != null);
return this;
}
@Override
void readFields(DataInputStream in, int logVersion) throws IOException {
directive = FSImageSerialization.readCacheDirectiveInfo(in);
readRpcIds(in, logVersion);
}
@Override
public void writeFields(DataOutputStream out) throws IOException {
FSImageSerialization.writeCacheDirectiveInfo(out, directive);
writeRpcIds(rpcClientId, rpcCallId, out);
}
@Override
protected void toXml(ContentHandler contentHandler) throws SAXException {
FSImageSerialization.writeCacheDirectiveInfo(contentHandler, directive);
appendRpcIdsToXml(contentHandler, rpcClientId, rpcCallId);
}
@Override
void fromXml(Stanza st) throws InvalidXmlException {
directive = FSImageSerialization.readCacheDirectiveInfo(st);
readRpcIdsFromXml(st);
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("AddCacheDirectiveInfo [");
builder.append("id=" + directive.getId() + ",");
builder.append("path=" + directive.getPath().toUri().getPath() + ",");
builder.append("replication=" + directive.getReplication() + ",");
builder.append("pool=" + directive.getPool() + ",");
builder.append("expiration=" + directive.getExpiration().getMillis());
appendRpcIdsToString(builder, rpcClientId, rpcCallId);
builder.append("]");
return builder.toString();
}
}
/**
* {@literal @AtMostOnce} for
* {@link ClientProtocol#modifyCacheDirective}
*/
static class ModifyCacheDirectiveInfoOp extends FSEditLogOp {
CacheDirectiveInfo directive;
public ModifyCacheDirectiveInfoOp() {
super(OP_MODIFY_CACHE_DIRECTIVE);
}
static ModifyCacheDirectiveInfoOp getInstance(OpInstanceCache cache) {
return (ModifyCacheDirectiveInfoOp) cache
.get(OP_MODIFY_CACHE_DIRECTIVE);
}
@Override
void resetSubFields() {
directive = null;
}
public ModifyCacheDirectiveInfoOp setDirective(
CacheDirectiveInfo directive) {
this.directive = directive;
assert(directive.getId() != null);
return this;
}
@Override
void readFields(DataInputStream in, int logVersion) throws IOException {
this.directive = FSImageSerialization.readCacheDirectiveInfo(in);
readRpcIds(in, logVersion);
}
@Override
public void writeFields(DataOutputStream out) throws IOException {
FSImageSerialization.writeCacheDirectiveInfo(out, directive);
writeRpcIds(rpcClientId, rpcCallId, out);
}
@Override
protected void toXml(ContentHandler contentHandler) throws SAXException {
FSImageSerialization.writeCacheDirectiveInfo(contentHandler, directive);
appendRpcIdsToXml(contentHandler, rpcClientId, rpcCallId);
}
@Override
void fromXml(Stanza st) throws InvalidXmlException {
this.directive = FSImageSerialization.readCacheDirectiveInfo(st);
readRpcIdsFromXml(st);
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("ModifyCacheDirectiveInfoOp[");
builder.append("id=").append(directive.getId());
if (directive.getPath() != null) {
builder.append(",").append("path=").append(directive.getPath());
}
if (directive.getReplication() != null) {
builder.append(",").append("replication=").
append(directive.getReplication());
}
if (directive.getPool() != null) {
builder.append(",").append("pool=").append(directive.getPool());
}
if (directive.getExpiration() != null) {
builder.append(",").append("expiration=").
append(directive.getExpiration().getMillis());
}
appendRpcIdsToString(builder, rpcClientId, rpcCallId);
builder.append("]");
return builder.toString();
}
}
/**
* {@literal @AtMostOnce} for
* {@link ClientProtocol#removeCacheDirective}
*/
static class RemoveCacheDirectiveInfoOp extends FSEditLogOp {
long id;
public RemoveCacheDirectiveInfoOp() {
super(OP_REMOVE_CACHE_DIRECTIVE);
}
static RemoveCacheDirectiveInfoOp getInstance(OpInstanceCache cache) {
return (RemoveCacheDirectiveInfoOp) cache
.get(OP_REMOVE_CACHE_DIRECTIVE);
}
@Override
void resetSubFields() {
id = 0L;
}
public RemoveCacheDirectiveInfoOp setId(long id) {
this.id = id;
return this;
}
@Override
void readFields(DataInputStream in, int logVersion) throws IOException {
this.id = FSImageSerialization.readLong(in);
readRpcIds(in, logVersion);
}
@Override
public void writeFields(DataOutputStream out) throws IOException {
FSImageSerialization.writeLong(id, out);
writeRpcIds(rpcClientId, rpcCallId, out);
}
@Override
protected void toXml(ContentHandler contentHandler) throws SAXException {
XMLUtils.addSaxString(contentHandler, "ID", Long.toString(id));
appendRpcIdsToXml(contentHandler, rpcClientId, rpcCallId);
}
@Override
void fromXml(Stanza st) throws InvalidXmlException {
this.id = Long.parseLong(st.getValue("ID"));
readRpcIdsFromXml(st);
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("RemoveCacheDirectiveInfo [");
builder.append("id=" + Long.toString(id));
appendRpcIdsToString(builder, rpcClientId, rpcCallId);
builder.append("]");
return builder.toString();
}
}
/** {@literal @AtMostOnce} for {@link ClientProtocol#addCachePool} */
static class AddCachePoolOp extends FSEditLogOp {
CachePoolInfo info;
public AddCachePoolOp() {
super(OP_ADD_CACHE_POOL);
}
static AddCachePoolOp getInstance(OpInstanceCache cache) {
return (AddCachePoolOp) cache.get(OP_ADD_CACHE_POOL);
}
@Override
void resetSubFields() {
info = null;
}
public AddCachePoolOp setPool(CachePoolInfo info) {
this.info = info;
assert(info.getPoolName() != null);
assert(info.getOwnerName() != null);
assert(info.getGroupName() != null);
assert(info.getMode() != null);
assert(info.getLimit() != null);
return this;
}
@Override
void readFields(DataInputStream in, int logVersion) throws IOException {
info = FSImageSerialization.readCachePoolInfo(in);
readRpcIds(in, logVersion);
}
@Override
public void writeFields(DataOutputStream out) throws IOException {
FSImageSerialization.writeCachePoolInfo(out, info);
writeRpcIds(rpcClientId, rpcCallId, out);
}
@Override
protected void toXml(ContentHandler contentHandler) throws SAXException {
FSImageSerialization.writeCachePoolInfo(contentHandler, info);
appendRpcIdsToXml(contentHandler, rpcClientId, rpcCallId);
}
@Override
void fromXml(Stanza st) throws InvalidXmlException {
this.info = FSImageSerialization.readCachePoolInfo(st);
readRpcIdsFromXml(st);
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("AddCachePoolOp [");
builder.append("poolName=" + info.getPoolName() + ",");
builder.append("ownerName=" + info.getOwnerName() + ",");
builder.append("groupName=" + info.getGroupName() + ",");
builder.append("mode=" + Short.toString(info.getMode().toShort()) + ",");
builder.append("limit=" + Long.toString(info.getLimit()));
appendRpcIdsToString(builder, rpcClientId, rpcCallId);
builder.append("]");
return builder.toString();
}
}
/** {@literal @AtMostOnce} for {@link ClientProtocol#modifyCachePool} */
static class ModifyCachePoolOp extends FSEditLogOp {
CachePoolInfo info;
public ModifyCachePoolOp() {
super(OP_MODIFY_CACHE_POOL);
}
static ModifyCachePoolOp getInstance(OpInstanceCache cache) {
return (ModifyCachePoolOp) cache.get(OP_MODIFY_CACHE_POOL);
}
@Override
void resetSubFields() {
info = null;
}
public ModifyCachePoolOp setInfo(CachePoolInfo info) {
this.info = info;
return this;
}
@Override
void readFields(DataInputStream in, int logVersion) throws IOException {
info = FSImageSerialization.readCachePoolInfo(in);
readRpcIds(in, logVersion);
}
@Override
public void writeFields(DataOutputStream out) throws IOException {
FSImageSerialization.writeCachePoolInfo(out, info);
writeRpcIds(rpcClientId, rpcCallId, out);
}
@Override
protected void toXml(ContentHandler contentHandler) throws SAXException {
FSImageSerialization.writeCachePoolInfo(contentHandler, info);
appendRpcIdsToXml(contentHandler, rpcClientId, rpcCallId);
}
@Override
void fromXml(Stanza st) throws InvalidXmlException {
this.info = FSImageSerialization.readCachePoolInfo(st);
readRpcIdsFromXml(st);
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("ModifyCachePoolOp [");
ArrayList<String> fields = new ArrayList<String>(5);
if (info.getPoolName() != null) {
fields.add("poolName=" + info.getPoolName());
}
if (info.getOwnerName() != null) {
fields.add("ownerName=" + info.getOwnerName());
}
if (info.getGroupName() != null) {
fields.add("groupName=" + info.getGroupName());
}
if (info.getMode() != null) {
fields.add("mode=" + info.getMode().toString());
}
if (info.getLimit() != null) {
fields.add("limit=" + info.getLimit());
}
builder.append(Joiner.on(",").join(fields));
appendRpcIdsToString(builder, rpcClientId, rpcCallId);
builder.append("]");
return builder.toString();
}
}
/** {@literal @AtMostOnce} for {@link ClientProtocol#removeCachePool} */
static class RemoveCachePoolOp extends FSEditLogOp {
String poolName;
public RemoveCachePoolOp() {
super(OP_REMOVE_CACHE_POOL);
}
static RemoveCachePoolOp getInstance(OpInstanceCache cache) {
return (RemoveCachePoolOp) cache.get(OP_REMOVE_CACHE_POOL);
}
@Override
void resetSubFields() {
poolName = null;
}
public RemoveCachePoolOp setPoolName(String poolName) {
this.poolName = poolName;
return this;
}
@Override
void readFields(DataInputStream in, int logVersion) throws IOException {
poolName = FSImageSerialization.readString(in);
readRpcIds(in, logVersion);
}
@Override
public void writeFields(DataOutputStream out) throws IOException {
FSImageSerialization.writeString(poolName, out);
writeRpcIds(rpcClientId, rpcCallId, out);
}
@Override
protected void toXml(ContentHandler contentHandler) throws SAXException {
XMLUtils.addSaxString(contentHandler, "POOLNAME", poolName);
appendRpcIdsToXml(contentHandler, rpcClientId, rpcCallId);
}
@Override
void fromXml(Stanza st) throws InvalidXmlException {
this.poolName = st.getValue("POOLNAME");
readRpcIdsFromXml(st);
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("RemoveCachePoolOp [");
builder.append("poolName=" + poolName);
appendRpcIdsToString(builder, rpcClientId, rpcCallId);
builder.append("]");
return builder.toString();
}
}
static class RemoveXAttrOp extends FSEditLogOp {
List<XAttr> xAttrs;
String src;
private RemoveXAttrOp() {
super(OP_REMOVE_XATTR);
}
static RemoveXAttrOp getInstance() {
return new RemoveXAttrOp();
}
@Override
void resetSubFields() {
xAttrs = null;
src = null;
}
@Override
void readFields(DataInputStream in, int logVersion) throws IOException {
XAttrEditLogProto p = XAttrEditLogProto.parseDelimitedFrom(in);
src = p.getSrc();
xAttrs = PBHelper.convertXAttrs(p.getXAttrsList());
readRpcIds(in, logVersion);
}
@Override
public void writeFields(DataOutputStream out) throws IOException {
XAttrEditLogProto.Builder b = XAttrEditLogProto.newBuilder();
if (src != null) {
b.setSrc(src);
}
b.addAllXAttrs(PBHelper.convertXAttrProto(xAttrs));
b.build().writeDelimitedTo(out);
// clientId and callId
writeRpcIds(rpcClientId, rpcCallId, out);
}
@Override
protected void toXml(ContentHandler contentHandler) throws SAXException {
XMLUtils.addSaxString(contentHandler, "SRC", src);
appendXAttrsToXml(contentHandler, xAttrs);
appendRpcIdsToXml(contentHandler, rpcClientId, rpcCallId);
}
@Override
void fromXml(Stanza st) throws InvalidXmlException {
src = st.getValue("SRC");
xAttrs = readXAttrsFromXml(st);
readRpcIdsFromXml(st);
}
}
static class SetXAttrOp extends FSEditLogOp {
List<XAttr> xAttrs;
String src;
private SetXAttrOp() {
super(OP_SET_XATTR);
}
static SetXAttrOp getInstance() {
return new SetXAttrOp();
}
@Override
void resetSubFields() {
xAttrs = null;
src = null;
}
@Override
void readFields(DataInputStream in, int logVersion) throws IOException {
XAttrEditLogProto p = XAttrEditLogProto.parseDelimitedFrom(in);
src = p.getSrc();
xAttrs = PBHelper.convertXAttrs(p.getXAttrsList());
readRpcIds(in, logVersion);
}
@Override
public void writeFields(DataOutputStream out) throws IOException {
XAttrEditLogProto.Builder b = XAttrEditLogProto.newBuilder();
if (src != null) {
b.setSrc(src);
}
b.addAllXAttrs(PBHelper.convertXAttrProto(xAttrs));
b.build().writeDelimitedTo(out);
// clientId and callId
writeRpcIds(rpcClientId, rpcCallId, out);
}
@Override
protected void toXml(ContentHandler contentHandler) throws SAXException {
XMLUtils.addSaxString(contentHandler, "SRC", src);
appendXAttrsToXml(contentHandler, xAttrs);
appendRpcIdsToXml(contentHandler, rpcClientId, rpcCallId);
}
@Override
void fromXml(Stanza st) throws InvalidXmlException {
src = st.getValue("SRC");
xAttrs = readXAttrsFromXml(st);
readRpcIdsFromXml(st);
}
}
static class SetAclOp extends FSEditLogOp {
List<AclEntry> aclEntries = Lists.newArrayList();
String src;
private SetAclOp() {
super(OP_SET_ACL);
}
static SetAclOp getInstance() {
return new SetAclOp();
}
@Override
void resetSubFields() {
aclEntries = null;
src = null;
}
@Override
void readFields(DataInputStream in, int logVersion) throws IOException {
AclEditLogProto p = AclEditLogProto.parseDelimitedFrom(in);
if (p == null) {
throw new IOException("Failed to read fields from SetAclOp");
}
src = p.getSrc();
aclEntries = PBHelper.convertAclEntry(p.getEntriesList());
}
@Override
public void writeFields(DataOutputStream out) throws IOException {
AclEditLogProto.Builder b = AclEditLogProto.newBuilder();
if (src != null)
b.setSrc(src);
b.addAllEntries(PBHelper.convertAclEntryProto(aclEntries));
b.build().writeDelimitedTo(out);
}
@Override
protected void toXml(ContentHandler contentHandler) throws SAXException {
XMLUtils.addSaxString(contentHandler, "SRC", src);
appendAclEntriesToXml(contentHandler, aclEntries);
}
@Override
void fromXml(Stanza st) throws InvalidXmlException {
src = st.getValue("SRC");
aclEntries = readAclEntriesFromXml(st);
if (aclEntries == null) {
aclEntries = Lists.newArrayList();
}
}
}
static private short readShort(DataInputStream in) throws IOException {
return Short.parseShort(FSImageSerialization.readString(in));
}
static private long readLong(DataInputStream in) throws IOException {
return Long.parseLong(FSImageSerialization.readString(in));
}
/**
* A class to read in blocks stored in the old format. The only two
* fields in the block were blockid and length.
*/
static class BlockTwo implements Writable {
long blkid;
long len;
static { // register a ctor
WritableFactories.setFactory
(BlockTwo.class,
new WritableFactory() {
@Override
public Writable newInstance() { return new BlockTwo(); }
});
}
BlockTwo() {
blkid = 0;
len = 0;
}
/////////////////////////////////////
// Writable
/////////////////////////////////////
@Override
public void write(DataOutput out) throws IOException {
out.writeLong(blkid);
out.writeLong(len);
}
@Override
public void readFields(DataInput in) throws IOException {
this.blkid = in.readLong();
this.len = in.readLong();
}
}
/**
* Operation corresponding to upgrade
*/
static class RollingUpgradeOp extends FSEditLogOp { // @Idempotent
private final String name;
private long time;
public RollingUpgradeOp(FSEditLogOpCodes code, String name) {
super(code);
this.name = StringUtils.toUpperCase(name);
}
static RollingUpgradeOp getStartInstance(OpInstanceCache cache) {
return (RollingUpgradeOp) cache.get(OP_ROLLING_UPGRADE_START);
}
static RollingUpgradeOp getFinalizeInstance(OpInstanceCache cache) {
return (RollingUpgradeOp) cache.get(OP_ROLLING_UPGRADE_FINALIZE);
}
@Override
void resetSubFields() {
time = 0L;
}
long getTime() {
return time;
}
void setTime(long time) {
this.time = time;
}
@Override
void readFields(DataInputStream in, int logVersion) throws IOException {
time = in.readLong();
}
@Override
public void writeFields(DataOutputStream out) throws IOException {
FSImageSerialization.writeLong(time, out);
}
@Override
protected void toXml(ContentHandler contentHandler) throws SAXException {
XMLUtils.addSaxString(contentHandler, name + "TIME",
Long.toString(time));
}
@Override
void fromXml(Stanza st) throws InvalidXmlException {
this.time = Long.parseLong(st.getValue(name + "TIME"));
}
@Override
public String toString() {
return new StringBuilder().append("RollingUpgradeOp [").append(name)
.append(", time=").append(time).append("]").toString();
}
static class RollbackException extends IOException {
private static final long serialVersionUID = 1L;
}
}
/** {@literal @Idempotent} for {@link ClientProtocol#setStoragePolicy} */
static class SetStoragePolicyOp extends FSEditLogOp {
String path;
byte policyId;
private SetStoragePolicyOp() {
super(OP_SET_STORAGE_POLICY);
}
static SetStoragePolicyOp getInstance(OpInstanceCache cache) {
return (SetStoragePolicyOp) cache.get(OP_SET_STORAGE_POLICY);
}
@Override
void resetSubFields() {
path = null;
policyId = 0;
}
SetStoragePolicyOp setPath(String path) {
this.path = path;
return this;
}
SetStoragePolicyOp setPolicyId(byte policyId) {
this.policyId = policyId;
return this;
}
@Override
public void writeFields(DataOutputStream out) throws IOException {
FSImageSerialization.writeString(path, out);
out.writeByte(policyId);
}
@Override
void readFields(DataInputStream in, int logVersion)
throws IOException {
this.path = FSImageSerialization.readString(in);
this.policyId = in.readByte();
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("SetStoragePolicyOp [path=");
builder.append(path);
builder.append(", policyId=");
builder.append(policyId);
builder.append(", opCode=");
builder.append(opCode);
builder.append(", txid=");
builder.append(txid);
builder.append("]");
return builder.toString();
}
@Override
protected void toXml(ContentHandler contentHandler) throws SAXException {
XMLUtils.addSaxString(contentHandler, "PATH", path);
XMLUtils.addSaxString(contentHandler, "POLICYID",
Byte.valueOf(policyId).toString());
}
@Override
void fromXml(Stanza st) throws InvalidXmlException {
this.path = st.getValue("PATH");
this.policyId = Byte.valueOf(st.getValue("POLICYID"));
}
}
/**
* Class for writing editlog ops
*/
public static class Writer {
private final DataOutputBuffer buf;
private final Checksum checksum;
public Writer(DataOutputBuffer out) {
this.buf = out;
this.checksum = DataChecksum.newCrc32();
}
/**
* Write an operation to the output stream
*
* @param op The operation to write
* @throws IOException if an error occurs during writing.
*/
public void writeOp(FSEditLogOp op) throws IOException {
int start = buf.getLength();
// write the op code first to make padding and terminator verification
// work
buf.writeByte(op.opCode.getOpCode());
buf.writeInt(0); // write 0 for the length first
buf.writeLong(op.txid);
op.writeFields(buf);
int end = buf.getLength();
// write the length back: content of the op + 4 bytes checksum - op_code
int length = end - start - 1;
buf.writeInt(length, start + 1);
checksum.reset();
checksum.update(buf.getData(), start, end-start);
int sum = (int)checksum.getValue();
buf.writeInt(sum);
}
}
/**
* Class for reading editlog ops from a stream
*/
public static class Reader {
private final DataInputStream in;
private final StreamLimiter limiter;
private final int logVersion;
private final Checksum checksum;
private final OpInstanceCache cache;
private int maxOpSize;
private final boolean supportEditLogLength;
/**
* Construct the reader
* @param in The stream to read from.
* @param logVersion The version of the data coming from the stream.
*/
public Reader(DataInputStream in, StreamLimiter limiter, int logVersion) {
this.logVersion = logVersion;
if (NameNodeLayoutVersion.supports(
LayoutVersion.Feature.EDITS_CHESKUM, logVersion)) {
this.checksum = DataChecksum.newCrc32();
} else {
this.checksum = null;
}
// It is possible that the logVersion is actually a future layoutversion
// during the rolling upgrade (e.g., the NN gets upgraded first). We
// assume future layout will also support length of editlog op.
this.supportEditLogLength = NameNodeLayoutVersion.supports(
NameNodeLayoutVersion.Feature.EDITLOG_LENGTH, logVersion)
|| logVersion < NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION;
if (this.checksum != null) {
this.in = new DataInputStream(
new CheckedInputStream(in, this.checksum));
} else {
this.in = in;
}
this.limiter = limiter;
this.cache = new OpInstanceCache();
this.maxOpSize = DFSConfigKeys.DFS_NAMENODE_MAX_OP_SIZE_DEFAULT;
}
public void setMaxOpSize(int maxOpSize) {
this.maxOpSize = maxOpSize;
}
/**
* Read an operation from the input stream.
*
* Note that the objects returned from this method may be re-used by future
* calls to the same method.
*
* @param skipBrokenEdits If true, attempt to skip over damaged parts of
* the input stream, rather than throwing an IOException
* @return the operation read from the stream, or null at the end of the
* file
* @throws IOException on error. This function should only throw an
* exception when skipBrokenEdits is false.
*/
public FSEditLogOp readOp(boolean skipBrokenEdits) throws IOException {
while (true) {
try {
return decodeOp();
} catch (IOException e) {
in.reset();
if (!skipBrokenEdits) {
throw e;
}
} catch (RuntimeException e) {
// FSEditLogOp#decodeOp is not supposed to throw RuntimeException.
// However, we handle it here for recovery mode, just to be more
// robust.
in.reset();
if (!skipBrokenEdits) {
throw e;
}
} catch (Throwable e) {
in.reset();
if (!skipBrokenEdits) {
throw new IOException("got unexpected exception " +
e.getMessage(), e);
}
}
// Move ahead one byte and re-try the decode process.
if (in.skip(1) < 1) {
return null;
}
}
}
private void verifyTerminator() throws IOException {
/** The end of the edit log should contain only 0x00 or 0xff bytes.
* If it contains other bytes, the log itself may be corrupt.
* It is important to check this; if we don't, a stray OP_INVALID byte
* could make us stop reading the edit log halfway through, and we'd never
* know that we had lost data.
*/
byte[] buf = new byte[4096];
limiter.clearLimit();
int numRead = -1, idx = 0;
while (true) {
try {
numRead = -1;
idx = 0;
numRead = in.read(buf);
if (numRead == -1) {
return;
}
while (idx < numRead) {
if ((buf[idx] != (byte)0) && (buf[idx] != (byte)-1)) {
throw new IOException("Read extra bytes after " +
"the terminator!");
}
idx++;
}
} finally {
// After reading each group of bytes, we reposition the mark one
// byte before the next group. Similarly, if there is an error, we
// want to reposition the mark one byte before the error
if (numRead != -1) {
in.reset();
IOUtils.skipFully(in, idx);
in.mark(buf.length + 1);
IOUtils.skipFully(in, 1);
}
}
}
}
/**
* Read an opcode from the input stream.
*
* @return the opcode, or null on EOF.
*
* If an exception is thrown, the stream's mark will be set to the first
* problematic byte. This usually means the beginning of the opcode.
*/
private FSEditLogOp decodeOp() throws IOException {
limiter.setLimit(maxOpSize);
in.mark(maxOpSize);
if (checksum != null) {
checksum.reset();
}
byte opCodeByte;
try {
opCodeByte = in.readByte();
} catch (EOFException eof) {
// EOF at an opcode boundary is expected.
return null;
}
FSEditLogOpCodes opCode = FSEditLogOpCodes.fromByte(opCodeByte);
if (opCode == OP_INVALID) {
verifyTerminator();
return null;
}
FSEditLogOp op = cache.get(opCode);
if (op == null) {
throw new IOException("Read invalid opcode " + opCode);
}
if (supportEditLogLength) {
in.readInt();
}
if (NameNodeLayoutVersion.supports(
LayoutVersion.Feature.STORED_TXIDS, logVersion)) {
// Read the txid
op.setTransactionId(in.readLong());
} else {
op.setTransactionId(HdfsServerConstants.INVALID_TXID);
}
op.readFields(in, logVersion);
validateChecksum(in, checksum, op.txid);
return op;
}
/**
* Similar with decodeOp(), but instead of doing the real decoding, we skip
* the content of the op if the length of the editlog is supported.
* @return the last txid of the segment, or INVALID_TXID on exception
*/
public long scanOp() throws IOException {
if (supportEditLogLength) {
limiter.setLimit(maxOpSize);
in.mark(maxOpSize);
final byte opCodeByte;
try {
opCodeByte = in.readByte(); // op code
} catch (EOFException e) {
return HdfsServerConstants.INVALID_TXID;
}
FSEditLogOpCodes opCode = FSEditLogOpCodes.fromByte(opCodeByte);
if (opCode == OP_INVALID) {
verifyTerminator();
return HdfsServerConstants.INVALID_TXID;
}
int length = in.readInt(); // read the length of the op
long txid = in.readLong(); // read the txid
// skip the remaining content
IOUtils.skipFully(in, length - 8);
// TODO: do we want to verify checksum for JN? For now we don't.
return txid;
} else {
FSEditLogOp op = decodeOp();
return op == null ? HdfsServerConstants.INVALID_TXID : op.getTransactionId();
}
}
/**
* Validate a transaction's checksum
*/
private void validateChecksum(DataInputStream in,
Checksum checksum,
long txid)
throws IOException {
if (checksum != null) {
int calculatedChecksum = (int)checksum.getValue();
int readChecksum = in.readInt(); // read in checksum
if (readChecksum != calculatedChecksum) {
throw new ChecksumException(
"Transaction is corrupt. Calculated checksum is " +
calculatedChecksum + " but read checksum " + readChecksum, txid);
}
}
}
}
public void outputToXml(ContentHandler contentHandler) throws SAXException {
contentHandler.startElement("", "", "RECORD", new AttributesImpl());
XMLUtils.addSaxString(contentHandler, "OPCODE", opCode.toString());
contentHandler.startElement("", "", "DATA", new AttributesImpl());
XMLUtils.addSaxString(contentHandler, "TXID", "" + txid);
toXml(contentHandler);
contentHandler.endElement("", "", "DATA");
contentHandler.endElement("", "", "RECORD");
}
protected abstract void toXml(ContentHandler contentHandler)
throws SAXException;
abstract void fromXml(Stanza st) throws InvalidXmlException;
public void decodeXml(Stanza st) throws InvalidXmlException {
this.txid = Long.parseLong(st.getValue("TXID"));
fromXml(st);
}
public static void blockToXml(ContentHandler contentHandler, Block block)
throws SAXException {
contentHandler.startElement("", "", "BLOCK", new AttributesImpl());
XMLUtils.addSaxString(contentHandler, "BLOCK_ID",
Long.toString(block.getBlockId()));
XMLUtils.addSaxString(contentHandler, "NUM_BYTES",
Long.toString(block.getNumBytes()));
XMLUtils.addSaxString(contentHandler, "GENSTAMP",
Long.toString(block.getGenerationStamp()));
contentHandler.endElement("", "", "BLOCK");
}
public static Block blockFromXml(Stanza st)
throws InvalidXmlException {
long blockId = Long.parseLong(st.getValue("BLOCK_ID"));
long numBytes = Long.parseLong(st.getValue("NUM_BYTES"));
long generationStamp = Long.parseLong(st.getValue("GENSTAMP"));
return new Block(blockId, numBytes, generationStamp);
}
public static void delegationTokenToXml(ContentHandler contentHandler,
DelegationTokenIdentifier token) throws SAXException {
contentHandler.startElement("", "", "DELEGATION_TOKEN_IDENTIFIER", new AttributesImpl());
XMLUtils.addSaxString(contentHandler, "KIND", token.getKind().toString());
XMLUtils.addSaxString(contentHandler, "SEQUENCE_NUMBER",
Integer.toString(token.getSequenceNumber()));
XMLUtils.addSaxString(contentHandler, "OWNER",
token.getOwner().toString());
XMLUtils.addSaxString(contentHandler, "RENEWER",
token.getRenewer().toString());
XMLUtils.addSaxString(contentHandler, "REALUSER",
token.getRealUser().toString());
XMLUtils.addSaxString(contentHandler, "ISSUE_DATE",
Long.toString(token.getIssueDate()));
XMLUtils.addSaxString(contentHandler, "MAX_DATE",
Long.toString(token.getMaxDate()));
XMLUtils.addSaxString(contentHandler, "MASTER_KEY_ID",
Integer.toString(token.getMasterKeyId()));
contentHandler.endElement("", "", "DELEGATION_TOKEN_IDENTIFIER");
}
public static DelegationTokenIdentifier delegationTokenFromXml(Stanza st)
throws InvalidXmlException {
String kind = st.getValue("KIND");
if (!kind.equals(DelegationTokenIdentifier.
HDFS_DELEGATION_KIND.toString())) {
throw new InvalidXmlException("can't understand " +
"DelegationTokenIdentifier KIND " + kind);
}
int seqNum = Integer.parseInt(st.getValue("SEQUENCE_NUMBER"));
String owner = st.getValue("OWNER");
String renewer = st.getValue("RENEWER");
String realuser = st.getValue("REALUSER");
long issueDate = Long.parseLong(st.getValue("ISSUE_DATE"));
long maxDate = Long.parseLong(st.getValue("MAX_DATE"));
int masterKeyId = Integer.parseInt(st.getValue("MASTER_KEY_ID"));
DelegationTokenIdentifier token =
new DelegationTokenIdentifier(new Text(owner),
new Text(renewer), new Text(realuser));
token.setSequenceNumber(seqNum);
token.setIssueDate(issueDate);
token.setMaxDate(maxDate);
token.setMasterKeyId(masterKeyId);
return token;
}
public static void delegationKeyToXml(ContentHandler contentHandler,
DelegationKey key) throws SAXException {
contentHandler.startElement("", "", "DELEGATION_KEY", new AttributesImpl());
XMLUtils.addSaxString(contentHandler, "KEY_ID",
Integer.toString(key.getKeyId()));
XMLUtils.addSaxString(contentHandler, "EXPIRY_DATE",
Long.toString(key.getExpiryDate()));
if (key.getEncodedKey() != null) {
XMLUtils.addSaxString(contentHandler, "KEY",
Hex.encodeHexString(key.getEncodedKey()));
}
contentHandler.endElement("", "", "DELEGATION_KEY");
}
public static DelegationKey delegationKeyFromXml(Stanza st)
throws InvalidXmlException {
int keyId = Integer.parseInt(st.getValue("KEY_ID"));
long expiryDate = Long.parseLong(st.getValue("EXPIRY_DATE"));
byte key[] = null;
try {
key = Hex.decodeHex(st.getValue("KEY").toCharArray());
} catch (DecoderException e) {
throw new InvalidXmlException(e.toString());
} catch (InvalidXmlException e) {
}
return new DelegationKey(keyId, expiryDate, key);
}
public static void permissionStatusToXml(ContentHandler contentHandler,
PermissionStatus perm) throws SAXException {
contentHandler.startElement("", "", "PERMISSION_STATUS", new AttributesImpl());
XMLUtils.addSaxString(contentHandler, "USERNAME", perm.getUserName());
XMLUtils.addSaxString(contentHandler, "GROUPNAME", perm.getGroupName());
fsPermissionToXml(contentHandler, perm.getPermission());
contentHandler.endElement("", "", "PERMISSION_STATUS");
}
public static PermissionStatus permissionStatusFromXml(Stanza st)
throws InvalidXmlException {
Stanza status = st.getChildren("PERMISSION_STATUS").get(0);
String username = status.getValue("USERNAME");
String groupname = status.getValue("GROUPNAME");
FsPermission mode = fsPermissionFromXml(status);
return new PermissionStatus(username, groupname, mode);
}
public static void fsPermissionToXml(ContentHandler contentHandler,
FsPermission mode) throws SAXException {
XMLUtils.addSaxString(contentHandler, "MODE", Short.valueOf(mode.toShort())
.toString());
}
public static FsPermission fsPermissionFromXml(Stanza st)
throws InvalidXmlException {
short mode = Short.valueOf(st.getValue("MODE"));
return new FsPermission(mode);
}
private static void fsActionToXml(ContentHandler contentHandler, FsAction v)
throws SAXException {
XMLUtils.addSaxString(contentHandler, "PERM", v.SYMBOL);
}
private static FsAction fsActionFromXml(Stanza st) throws InvalidXmlException {
FsAction v = FSACTION_SYMBOL_MAP.get(st.getValue("PERM"));
if (v == null)
throw new InvalidXmlException("Invalid value for FsAction");
return v;
}
private static void appendAclEntriesToXml(ContentHandler contentHandler,
List<AclEntry> aclEntries) throws SAXException {
for (AclEntry e : aclEntries) {
contentHandler.startElement("", "", "ENTRY", new AttributesImpl());
XMLUtils.addSaxString(contentHandler, "SCOPE", e.getScope().name());
XMLUtils.addSaxString(contentHandler, "TYPE", e.getType().name());
if (e.getName() != null) {
XMLUtils.addSaxString(contentHandler, "NAME", e.getName());
}
fsActionToXml(contentHandler, e.getPermission());
contentHandler.endElement("", "", "ENTRY");
}
}
private static List<AclEntry> readAclEntriesFromXml(Stanza st) {
List<AclEntry> aclEntries = Lists.newArrayList();
if (!st.hasChildren("ENTRY"))
return null;
List<Stanza> stanzas = st.getChildren("ENTRY");
for (Stanza s : stanzas) {
AclEntry e = new AclEntry.Builder()
.setScope(AclEntryScope.valueOf(s.getValue("SCOPE")))
.setType(AclEntryType.valueOf(s.getValue("TYPE")))
.setName(s.getValueOrNull("NAME"))
.setPermission(fsActionFromXml(s)).build();
aclEntries.add(e);
}
return aclEntries;
}
private static void appendXAttrsToXml(ContentHandler contentHandler,
List<XAttr> xAttrs) throws SAXException {
for (XAttr xAttr: xAttrs) {
contentHandler.startElement("", "", "XATTR", new AttributesImpl());
XMLUtils.addSaxString(contentHandler, "NAMESPACE",
xAttr.getNameSpace().toString());
XMLUtils.addSaxString(contentHandler, "NAME", xAttr.getName());
if (xAttr.getValue() != null) {
try {
XMLUtils.addSaxString(contentHandler, "VALUE",
XAttrCodec.encodeValue(xAttr.getValue(), XAttrCodec.HEX));
} catch (IOException e) {
throw new SAXException(e);
}
}
contentHandler.endElement("", "", "XATTR");
}
}
private static List<XAttr> readXAttrsFromXml(Stanza st)
throws InvalidXmlException {
if (!st.hasChildren("XATTR")) {
return null;
}
List<Stanza> stanzas = st.getChildren("XATTR");
List<XAttr> xattrs = Lists.newArrayListWithCapacity(stanzas.size());
for (Stanza a: stanzas) {
XAttr.Builder builder = new XAttr.Builder();
builder.setNameSpace(XAttr.NameSpace.valueOf(a.getValue("NAMESPACE"))).
setName(a.getValue("NAME"));
String v = a.getValueOrNull("VALUE");
if (v != null) {
try {
builder.setValue(XAttrCodec.decodeValue(v));
} catch (IOException e) {
throw new InvalidXmlException(e.toString());
}
}
xattrs.add(builder.build());
}
return xattrs;
}
}
| 156,640 | 30.397274 | 98 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.util.Time.monotonicNow;
import java.io.DataInput;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.security.DigestInputStream;
import java.security.DigestOutputStream;
import java.security.MessageDigest;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import org.apache.commons.logging.Log;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathIsNotDirectoryException;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LayoutFlags;
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature;
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.Counter;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.Step;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StepType;
import org.apache.hadoop.hdfs.util.ReadOnlyList;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.MD5Hash;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.util.StringUtils;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
/**
* Contains inner classes for reading or writing the on-disk format for
* FSImages.
*
* In particular, the format of the FSImage looks like:
* <pre>
* FSImage {
* layoutVersion: int, namespaceID: int, numberItemsInFSDirectoryTree: long,
* namesystemGenerationStampV1: long, namesystemGenerationStampV2: long,
* generationStampAtBlockIdSwitch:long, lastAllocatedBlockId:
* long transactionID: long, snapshotCounter: int, numberOfSnapshots: int,
* numOfSnapshottableDirs: int,
* {FSDirectoryTree, FilesUnderConstruction, SecretManagerState} (can be compressed)
* }
*
* FSDirectoryTree (if {@link Feature#FSIMAGE_NAME_OPTIMIZATION} is supported) {
* INodeInfo of root, numberOfChildren of root: int
* [list of INodeInfo of root's children],
* [list of INodeDirectoryInfo of root's directory children]
* }
*
* FSDirectoryTree (if {@link Feature#FSIMAGE_NAME_OPTIMIZATION} not supported){
* [list of INodeInfo of INodes in topological order]
* }
*
* INodeInfo {
* {
* localName: short + byte[]
* } when {@link Feature#FSIMAGE_NAME_OPTIMIZATION} is supported
* or
* {
* fullPath: byte[]
* } when {@link Feature#FSIMAGE_NAME_OPTIMIZATION} is not supported
* replicationFactor: short, modificationTime: long,
* accessTime: long, preferredBlockSize: long,
* numberOfBlocks: int (-1 for INodeDirectory, -2 for INodeSymLink),
* {
* nsQuota: long, dsQuota: long,
* {
* isINodeSnapshottable: byte,
* isINodeWithSnapshot: byte (if isINodeSnapshottable is false)
* } (when {@link Feature#SNAPSHOT} is supported),
* fsPermission: short, PermissionStatus
* } for INodeDirectory
* or
* {
* symlinkString, fsPermission: short, PermissionStatus
* } for INodeSymlink
* or
* {
* [list of BlockInfo]
* [list of FileDiff]
* {
* isINodeFileUnderConstructionSnapshot: byte,
* {clientName: short + byte[], clientMachine: short + byte[]} (when
* isINodeFileUnderConstructionSnapshot is true),
* } (when {@link Feature#SNAPSHOT} is supported and writing snapshotINode),
* fsPermission: short, PermissionStatus
* } for INodeFile
* }
*
* INodeDirectoryInfo {
* fullPath of the directory: short + byte[],
* numberOfChildren: int, [list of INodeInfo of children INode],
* {
* numberOfSnapshots: int,
* [list of Snapshot] (when NumberOfSnapshots is positive),
* numberOfDirectoryDiffs: int,
* [list of DirectoryDiff] (NumberOfDirectoryDiffs is positive),
* number of children that are directories,
* [list of INodeDirectoryInfo of the directory children] (includes
* snapshot copies of deleted sub-directories)
* } (when {@link Feature#SNAPSHOT} is supported),
* }
*
* Snapshot {
* snapshotID: int, root of Snapshot: INodeDirectoryInfo (its local name is
* the name of the snapshot)
* }
*
* DirectoryDiff {
* full path of the root of the associated Snapshot: short + byte[],
* childrenSize: int,
* isSnapshotRoot: byte,
* snapshotINodeIsNotNull: byte (when isSnapshotRoot is false),
* snapshotINode: INodeDirectory (when SnapshotINodeIsNotNull is true), Diff
* }
*
* Diff {
* createdListSize: int, [Local name of INode in created list],
* deletedListSize: int, [INode in deleted list: INodeInfo]
* }
*
* FileDiff {
* full path of the root of the associated Snapshot: short + byte[],
* fileSize: long,
* snapshotINodeIsNotNull: byte,
* snapshotINode: INodeFile (when SnapshotINodeIsNotNull is true), Diff
* }
* </pre>
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class FSImageFormat {
private static final Log LOG = FSImage.LOG;
// Static-only class
private FSImageFormat() {}
interface AbstractLoader {
MD5Hash getLoadedImageMd5();
long getLoadedImageTxId();
}
static class LoaderDelegator implements AbstractLoader {
private AbstractLoader impl;
private final Configuration conf;
private final FSNamesystem fsn;
LoaderDelegator(Configuration conf, FSNamesystem fsn) {
this.conf = conf;
this.fsn = fsn;
}
@Override
public MD5Hash getLoadedImageMd5() {
return impl.getLoadedImageMd5();
}
@Override
public long getLoadedImageTxId() {
return impl.getLoadedImageTxId();
}
public void load(File file, boolean requireSameLayoutVersion)
throws IOException {
Preconditions.checkState(impl == null, "Image already loaded!");
FileInputStream is = null;
try {
is = new FileInputStream(file);
byte[] magic = new byte[FSImageUtil.MAGIC_HEADER.length];
IOUtils.readFully(is, magic, 0, magic.length);
if (Arrays.equals(magic, FSImageUtil.MAGIC_HEADER)) {
FSImageFormatProtobuf.Loader loader = new FSImageFormatProtobuf.Loader(
conf, fsn, requireSameLayoutVersion);
impl = loader;
loader.load(file);
} else {
Loader loader = new Loader(conf, fsn);
impl = loader;
loader.load(file);
}
} finally {
IOUtils.cleanup(LOG, is);
}
}
}
/**
* Construct a loader class to load the image. It chooses the loader based on
* the layout version.
*/
public static LoaderDelegator newLoader(Configuration conf, FSNamesystem fsn) {
return new LoaderDelegator(conf, fsn);
}
/**
* A one-shot class responsible for loading an image. The load() function
* should be called once, after which the getter methods may be used to retrieve
* information about the image that was loaded, if loading was successful.
*/
public static class Loader implements AbstractLoader {
private final Configuration conf;
/** which namesystem this loader is working for */
private final FSNamesystem namesystem;
/** Set to true once a file has been loaded using this loader. */
private boolean loaded = false;
/** The transaction ID of the last edit represented by the loaded file */
private long imgTxId;
/** The MD5 sum of the loaded file */
private MD5Hash imgDigest;
private Map<Integer, Snapshot> snapshotMap = null;
private final ReferenceMap referenceMap = new ReferenceMap();
Loader(Configuration conf, FSNamesystem namesystem) {
this.conf = conf;
this.namesystem = namesystem;
}
/**
* Return the MD5 checksum of the image that has been loaded.
* @throws IllegalStateException if load() has not yet been called.
*/
@Override
public MD5Hash getLoadedImageMd5() {
checkLoaded();
return imgDigest;
}
@Override
public long getLoadedImageTxId() {
checkLoaded();
return imgTxId;
}
/**
* Throw IllegalStateException if load() has not yet been called.
*/
private void checkLoaded() {
if (!loaded) {
throw new IllegalStateException("Image not yet loaded!");
}
}
/**
* Throw IllegalStateException if load() has already been called.
*/
private void checkNotLoaded() {
if (loaded) {
throw new IllegalStateException("Image already loaded!");
}
}
public void load(File curFile) throws IOException {
checkNotLoaded();
assert curFile != null : "curFile is null";
StartupProgress prog = NameNode.getStartupProgress();
Step step = new Step(StepType.INODES);
prog.beginStep(Phase.LOADING_FSIMAGE, step);
long startTime = monotonicNow();
//
// Load in bits
//
MessageDigest digester = MD5Hash.getDigester();
DigestInputStream fin = new DigestInputStream(
new FileInputStream(curFile), digester);
DataInputStream in = new DataInputStream(fin);
try {
// read image version: first appeared in version -1
int imgVersion = in.readInt();
if (getLayoutVersion() != imgVersion) {
throw new InconsistentFSStateException(curFile,
"imgVersion " + imgVersion +
" expected to be " + getLayoutVersion());
}
boolean supportSnapshot = NameNodeLayoutVersion.supports(
LayoutVersion.Feature.SNAPSHOT, imgVersion);
if (NameNodeLayoutVersion.supports(
LayoutVersion.Feature.ADD_LAYOUT_FLAGS, imgVersion)) {
LayoutFlags.read(in);
}
// read namespaceID: first appeared in version -2
in.readInt();
long numFiles = in.readLong();
// read in the last generation stamp for legacy blocks.
long genstamp = in.readLong();
namesystem.getBlockIdManager().setGenerationStampV1(genstamp);
if (NameNodeLayoutVersion.supports(
LayoutVersion.Feature.SEQUENTIAL_BLOCK_ID, imgVersion)) {
// read the starting generation stamp for sequential block IDs
genstamp = in.readLong();
namesystem.getBlockIdManager().setGenerationStampV2(genstamp);
// read the last generation stamp for blocks created after
// the switch to sequential block IDs.
long stampAtIdSwitch = in.readLong();
namesystem.getBlockIdManager().setGenerationStampV1Limit(stampAtIdSwitch);
// read the max sequential block ID.
long maxSequentialBlockId = in.readLong();
namesystem.getBlockIdManager().setLastAllocatedBlockId(maxSequentialBlockId);
} else {
long startingGenStamp = namesystem.getBlockIdManager()
.upgradeGenerationStampToV2();
// This is an upgrade.
LOG.info("Upgrading to sequential block IDs. Generation stamp " +
"for new blocks set to " + startingGenStamp);
}
// read the transaction ID of the last edit represented by
// this image
if (NameNodeLayoutVersion.supports(
LayoutVersion.Feature.STORED_TXIDS, imgVersion)) {
imgTxId = in.readLong();
} else {
imgTxId = 0;
}
// read the last allocated inode id in the fsimage
if (NameNodeLayoutVersion.supports(
LayoutVersion.Feature.ADD_INODE_ID, imgVersion)) {
long lastInodeId = in.readLong();
namesystem.dir.resetLastInodeId(lastInodeId);
if (LOG.isDebugEnabled()) {
LOG.debug("load last allocated InodeId from fsimage:" + lastInodeId);
}
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("Old layout version doesn't have inode id."
+ " Will assign new id for each inode.");
}
}
if (supportSnapshot) {
snapshotMap = namesystem.getSnapshotManager().read(in, this);
}
// read compression related info
FSImageCompression compression;
if (NameNodeLayoutVersion.supports(
LayoutVersion.Feature.FSIMAGE_COMPRESSION, imgVersion)) {
compression = FSImageCompression.readCompressionHeader(conf, in);
} else {
compression = FSImageCompression.createNoopCompression();
}
in = compression.unwrapInputStream(fin);
LOG.info("Loading image file " + curFile + " using " + compression);
// load all inodes
LOG.info("Number of files = " + numFiles);
prog.setTotal(Phase.LOADING_FSIMAGE, step, numFiles);
Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step);
if (NameNodeLayoutVersion.supports(
LayoutVersion.Feature.FSIMAGE_NAME_OPTIMIZATION, imgVersion)) {
if (supportSnapshot) {
loadLocalNameINodesWithSnapshot(numFiles, in, counter);
} else {
loadLocalNameINodes(numFiles, in, counter);
}
} else {
loadFullNameINodes(numFiles, in, counter);
}
loadFilesUnderConstruction(in, supportSnapshot, counter);
prog.endStep(Phase.LOADING_FSIMAGE, step);
// Now that the step is finished, set counter equal to total to adjust
// for possible under-counting due to reference inodes.
prog.setCount(Phase.LOADING_FSIMAGE, step, numFiles);
loadSecretManagerState(in);
loadCacheManagerState(in);
// make sure to read to the end of file
boolean eof = (in.read() == -1);
assert eof : "Should have reached the end of image file " + curFile;
} finally {
in.close();
}
imgDigest = new MD5Hash(digester.digest());
loaded = true;
LOG.info("Image file " + curFile + " of size " + curFile.length()
+ " bytes loaded in " + (monotonicNow() - startTime) / 1000
+ " seconds.");
}
/** Update the root node's attributes */
private void updateRootAttr(INodeWithAdditionalFields root) {
final QuotaCounts q = root.getQuotaCounts();
final long nsQuota = q.getNameSpace();
final long dsQuota = q.getStorageSpace();
FSDirectory fsDir = namesystem.dir;
if (nsQuota != -1 || dsQuota != -1) {
fsDir.rootDir.getDirectoryWithQuotaFeature().setQuota(nsQuota, dsQuota);
}
fsDir.rootDir.cloneModificationTime(root);
fsDir.rootDir.clonePermissionStatus(root);
}
/**
* Load fsimage files when 1) only local names are stored,
* and 2) snapshot is supported.
*
* @param numFiles number of files expected to be read
* @param in Image input stream
* @param counter Counter to increment for namenode startup progress
*/
private void loadLocalNameINodesWithSnapshot(long numFiles, DataInput in,
Counter counter) throws IOException {
assert NameNodeLayoutVersion.supports(
LayoutVersion.Feature.FSIMAGE_NAME_OPTIMIZATION, getLayoutVersion());
assert NameNodeLayoutVersion.supports(
LayoutVersion.Feature.SNAPSHOT, getLayoutVersion());
// load root
loadRoot(in, counter);
// load rest of the nodes recursively
loadDirectoryWithSnapshot(in, counter);
}
/**
* load fsimage files assuming only local names are stored. Used when
* snapshots are not supported by the layout version.
*
* @param numFiles number of files expected to be read
* @param in image input stream
* @param counter Counter to increment for namenode startup progress
* @throws IOException
*/
private void loadLocalNameINodes(long numFiles, DataInput in, Counter counter)
throws IOException {
assert NameNodeLayoutVersion.supports(
LayoutVersion.Feature.FSIMAGE_NAME_OPTIMIZATION, getLayoutVersion());
assert numFiles > 0;
// load root
loadRoot(in, counter);
// have loaded the first file (the root)
numFiles--;
// load rest of the nodes directory by directory
while (numFiles > 0) {
numFiles -= loadDirectory(in, counter);
}
if (numFiles != 0) {
throw new IOException("Read unexpect number of files: " + -numFiles);
}
}
/**
* Load information about root, and use the information to update the root
* directory of NameSystem.
* @param in The {@link DataInput} instance to read.
* @param counter Counter to increment for namenode startup progress
*/
private void loadRoot(DataInput in, Counter counter)
throws IOException {
// load root
if (in.readShort() != 0) {
throw new IOException("First node is not root");
}
final INodeDirectory root = loadINode(null, false, in, counter)
.asDirectory();
// update the root's attributes
updateRootAttr(root);
}
/** Load children nodes for the parent directory. */
private int loadChildren(INodeDirectory parent, DataInput in,
Counter counter) throws IOException {
int numChildren = in.readInt();
for (int i = 0; i < numChildren; i++) {
// load single inode
INode newNode = loadINodeWithLocalName(false, in, true, counter);
addToParent(parent, newNode);
}
return numChildren;
}
/**
* Load a directory when snapshot is supported.
* @param in The {@link DataInput} instance to read.
* @param counter Counter to increment for namenode startup progress
*/
private void loadDirectoryWithSnapshot(DataInput in, Counter counter)
throws IOException {
// Step 1. Identify the parent INode
long inodeId = in.readLong();
final INodeDirectory parent = this.namesystem.dir.getInode(inodeId)
.asDirectory();
// Check if the whole subtree has been saved (for reference nodes)
boolean toLoadSubtree = referenceMap.toProcessSubtree(parent.getId());
if (!toLoadSubtree) {
return;
}
// Step 2. Load snapshots if parent is snapshottable
int numSnapshots = in.readInt();
if (numSnapshots >= 0) {
// load snapshots and snapshotQuota
SnapshotFSImageFormat.loadSnapshotList(parent, numSnapshots, in, this);
if (parent.getDirectorySnapshottableFeature().getSnapshotQuota() > 0) {
// add the directory to the snapshottable directory list in
// SnapshotManager. Note that we only add root when its snapshot quota
// is positive.
this.namesystem.getSnapshotManager().addSnapshottable(parent);
}
}
// Step 3. Load children nodes under parent
loadChildren(parent, in, counter);
// Step 4. load Directory Diff List
SnapshotFSImageFormat.loadDirectoryDiffList(parent, in, this);
// Recursively load sub-directories, including snapshot copies of deleted
// directories
int numSubTree = in.readInt();
for (int i = 0; i < numSubTree; i++) {
loadDirectoryWithSnapshot(in, counter);
}
}
/**
* Load all children of a directory
*
* @param in input to load from
* @param counter Counter to increment for namenode startup progress
* @return number of child inodes read
* @throws IOException
*/
private int loadDirectory(DataInput in, Counter counter) throws IOException {
String parentPath = FSImageSerialization.readString(in);
// Rename .snapshot paths if we're doing an upgrade
parentPath = renameReservedPathsOnUpgrade(parentPath, getLayoutVersion());
final INodeDirectory parent = INodeDirectory.valueOf(
namesystem.dir.getINode(parentPath, true), parentPath);
return loadChildren(parent, in, counter);
}
/**
* load fsimage files assuming full path names are stored
*
* @param numFiles total number of files to load
* @param in data input stream
* @param counter Counter to increment for namenode startup progress
* @throws IOException if any error occurs
*/
private void loadFullNameINodes(long numFiles, DataInput in, Counter counter)
throws IOException {
byte[][] pathComponents;
byte[][] parentPath = {{}};
FSDirectory fsDir = namesystem.dir;
INodeDirectory parentINode = fsDir.rootDir;
for (long i = 0; i < numFiles; i++) {
pathComponents = FSImageSerialization.readPathComponents(in);
for (int j=0; j < pathComponents.length; j++) {
byte[] newComponent = renameReservedComponentOnUpgrade
(pathComponents[j], getLayoutVersion());
if (!Arrays.equals(newComponent, pathComponents[j])) {
String oldPath = DFSUtil.byteArray2PathString(pathComponents);
pathComponents[j] = newComponent;
String newPath = DFSUtil.byteArray2PathString(pathComponents);
LOG.info("Renaming reserved path " + oldPath + " to " + newPath);
}
}
final INode newNode = loadINode(
pathComponents[pathComponents.length-1], false, in, counter);
if (isRoot(pathComponents)) { // it is the root
// update the root's attributes
updateRootAttr(newNode.asDirectory());
continue;
}
namesystem.dir.addToInodeMap(newNode);
// check if the new inode belongs to the same parent
if(!isParent(pathComponents, parentPath)) {
parentINode = getParentINodeDirectory(pathComponents);
parentPath = getParent(pathComponents);
}
// add new inode
addToParent(parentINode, newNode);
}
}
private INodeDirectory getParentINodeDirectory(byte[][] pathComponents
) throws FileNotFoundException, PathIsNotDirectoryException,
UnresolvedLinkException {
if (pathComponents.length < 2) { // root
return null;
}
// Gets the parent INode
final INodesInPath inodes = namesystem.dir.getExistingPathINodes(
pathComponents);
return INodeDirectory.valueOf(inodes.getINode(-2), pathComponents);
}
/**
* Add the child node to parent and, if child is a file, update block map.
* This method is only used for image loading so that synchronization,
* modification time update and space count update are not needed.
*/
private void addToParent(INodeDirectory parent, INode child) {
FSDirectory fsDir = namesystem.dir;
if (parent == fsDir.rootDir) {
child.setLocalName(renameReservedRootComponentOnUpgrade(
child.getLocalNameBytes(), getLayoutVersion()));
}
// NOTE: This does not update space counts for parents
if (!parent.addChild(child)) {
return;
}
namesystem.dir.cacheName(child);
if (child.isFile()) {
updateBlocksMap(child.asFile());
}
}
public void updateBlocksMap(INodeFile file) {
// Add file->block mapping
final BlockInfo[] blocks = file.getBlocks();
if (blocks != null) {
final BlockManager bm = namesystem.getBlockManager();
for (int i = 0; i < blocks.length; i++) {
file.setBlock(i, bm.addBlockCollection(blocks[i], file));
}
}
}
/** @return The FSDirectory of the namesystem where the fsimage is loaded */
public FSDirectory getFSDirectoryInLoading() {
return namesystem.dir;
}
public INode loadINodeWithLocalName(boolean isSnapshotINode, DataInput in,
boolean updateINodeMap) throws IOException {
return loadINodeWithLocalName(isSnapshotINode, in, updateINodeMap, null);
}
public INode loadINodeWithLocalName(boolean isSnapshotINode,
DataInput in, boolean updateINodeMap, Counter counter)
throws IOException {
byte[] localName = FSImageSerialization.readLocalName(in);
localName =
renameReservedComponentOnUpgrade(localName, getLayoutVersion());
INode inode = loadINode(localName, isSnapshotINode, in, counter);
if (updateINodeMap) {
namesystem.dir.addToInodeMap(inode);
}
return inode;
}
/**
* load an inode from fsimage except for its name
*
* @param in data input stream from which image is read
* @param counter Counter to increment for namenode startup progress
* @return an inode
*/
@SuppressWarnings("deprecation")
INode loadINode(final byte[] localName, boolean isSnapshotINode,
DataInput in, Counter counter) throws IOException {
final int imgVersion = getLayoutVersion();
if (NameNodeLayoutVersion.supports(
LayoutVersion.Feature.SNAPSHOT, imgVersion)) {
namesystem.getFSDirectory().verifyINodeName(localName);
}
long inodeId = NameNodeLayoutVersion.supports(
LayoutVersion.Feature.ADD_INODE_ID, imgVersion) ? in.readLong()
: namesystem.dir.allocateNewInodeId();
final short replication = namesystem.getBlockManager().adjustReplication(
in.readShort());
final long modificationTime = in.readLong();
long atime = 0;
if (NameNodeLayoutVersion.supports(
LayoutVersion.Feature.FILE_ACCESS_TIME, imgVersion)) {
atime = in.readLong();
}
final long blockSize = in.readLong();
final int numBlocks = in.readInt();
if (numBlocks >= 0) {
// file
// read blocks
BlockInfo[] blocks = new BlockInfo[numBlocks];
for (int j = 0; j < numBlocks; j++) {
blocks[j] = new BlockInfoContiguous(replication);
blocks[j].readFields(in);
}
String clientName = "";
String clientMachine = "";
boolean underConstruction = false;
FileDiffList fileDiffs = null;
if (NameNodeLayoutVersion.supports(
LayoutVersion.Feature.SNAPSHOT, imgVersion)) {
// read diffs
fileDiffs = SnapshotFSImageFormat.loadFileDiffList(in, this);
if (isSnapshotINode) {
underConstruction = in.readBoolean();
if (underConstruction) {
clientName = FSImageSerialization.readString(in);
clientMachine = FSImageSerialization.readString(in);
// convert the last block to BlockUC
if (blocks.length > 0) {
BlockInfo lastBlk = blocks[blocks.length - 1];
blocks[blocks.length - 1] = new BlockInfoContiguousUnderConstruction(
lastBlk, replication);
}
}
}
}
final PermissionStatus permissions = PermissionStatus.read(in);
// return
if (counter != null) {
counter.increment();
}
final INodeFile file = new INodeFile(inodeId, localName, permissions,
modificationTime, atime, blocks, replication, blockSize, (byte)0);
if (underConstruction) {
file.toUnderConstruction(clientName, clientMachine);
}
return fileDiffs == null ? file : new INodeFile(file, fileDiffs);
} else if (numBlocks == -1) {
//directory
//read quotas
final long nsQuota = in.readLong();
long dsQuota = -1L;
if (NameNodeLayoutVersion.supports(
LayoutVersion.Feature.DISKSPACE_QUOTA, imgVersion)) {
dsQuota = in.readLong();
}
//read snapshot info
boolean snapshottable = false;
boolean withSnapshot = false;
if (NameNodeLayoutVersion.supports(
LayoutVersion.Feature.SNAPSHOT, imgVersion)) {
snapshottable = in.readBoolean();
if (!snapshottable) {
withSnapshot = in.readBoolean();
}
}
final PermissionStatus permissions = PermissionStatus.read(in);
//return
if (counter != null) {
counter.increment();
}
final INodeDirectory dir = new INodeDirectory(inodeId, localName,
permissions, modificationTime);
if (nsQuota >= 0 || dsQuota >= 0) {
dir.addDirectoryWithQuotaFeature(new DirectoryWithQuotaFeature.Builder().
nameSpaceQuota(nsQuota).storageSpaceQuota(dsQuota).build());
}
if (withSnapshot) {
dir.addSnapshotFeature(null);
}
if (snapshottable) {
dir.addSnapshottableFeature();
}
return dir;
} else if (numBlocks == -2) {
//symlink
if (!FileSystem.areSymlinksEnabled()) {
throw new IOException("Symlinks not supported - please remove symlink before upgrading to this version of HDFS");
}
final String symlink = Text.readString(in);
final PermissionStatus permissions = PermissionStatus.read(in);
if (counter != null) {
counter.increment();
}
return new INodeSymlink(inodeId, localName, permissions,
modificationTime, atime, symlink);
} else if (numBlocks == -3) {
//reference
// Intentionally do not increment counter, because it is too difficult at
// this point to assess whether or not this is a reference that counts
// toward quota.
final boolean isWithName = in.readBoolean();
// lastSnapshotId for WithName node, dstSnapshotId for DstReference node
int snapshotId = in.readInt();
final INodeReference.WithCount withCount
= referenceMap.loadINodeReferenceWithCount(isSnapshotINode, in, this);
if (isWithName) {
return new INodeReference.WithName(null, withCount, localName,
snapshotId);
} else {
final INodeReference ref = new INodeReference.DstReference(null,
withCount, snapshotId);
return ref;
}
}
throw new IOException("Unknown inode type: numBlocks=" + numBlocks);
}
/** Load {@link INodeFileAttributes}. */
public INodeFileAttributes loadINodeFileAttributes(DataInput in)
throws IOException {
final int layoutVersion = getLayoutVersion();
if (!NameNodeLayoutVersion.supports(
LayoutVersion.Feature.OPTIMIZE_SNAPSHOT_INODES, layoutVersion)) {
return loadINodeWithLocalName(true, in, false).asFile();
}
final byte[] name = FSImageSerialization.readLocalName(in);
final PermissionStatus permissions = PermissionStatus.read(in);
final long modificationTime = in.readLong();
final long accessTime = in.readLong();
final short replication = namesystem.getBlockManager().adjustReplication(
in.readShort());
final long preferredBlockSize = in.readLong();
return new INodeFileAttributes.SnapshotCopy(name, permissions, null, modificationTime,
accessTime, replication, preferredBlockSize, (byte) 0, null);
}
public INodeDirectoryAttributes loadINodeDirectoryAttributes(DataInput in)
throws IOException {
final int layoutVersion = getLayoutVersion();
if (!NameNodeLayoutVersion.supports(
LayoutVersion.Feature.OPTIMIZE_SNAPSHOT_INODES, layoutVersion)) {
return loadINodeWithLocalName(true, in, false).asDirectory();
}
final byte[] name = FSImageSerialization.readLocalName(in);
final PermissionStatus permissions = PermissionStatus.read(in);
final long modificationTime = in.readLong();
// Read quotas: quota by storage type does not need to be processed below.
// It is handled only in protobuf based FsImagePBINode class for newer
// fsImages. Tools using this class such as legacy-mode of offline image viewer
// should only load legacy FSImages without newer features.
final long nsQuota = in.readLong();
final long dsQuota = in.readLong();
return nsQuota == -1L && dsQuota == -1L ? new INodeDirectoryAttributes.SnapshotCopy(
name, permissions, null, modificationTime, null)
: new INodeDirectoryAttributes.CopyWithQuota(name, permissions,
null, modificationTime, nsQuota, dsQuota, null, null);
}
private void loadFilesUnderConstruction(DataInput in,
boolean supportSnapshot, Counter counter) throws IOException {
FSDirectory fsDir = namesystem.dir;
int size = in.readInt();
LOG.info("Number of files under construction = " + size);
for (int i = 0; i < size; i++) {
INodeFile cons = FSImageSerialization.readINodeUnderConstruction(in,
namesystem, getLayoutVersion());
counter.increment();
// verify that file exists in namespace
String path = cons.getLocalName();
INodeFile oldnode = null;
boolean inSnapshot = false;
if (path != null && FSDirectory.isReservedName(path) &&
NameNodeLayoutVersion.supports(
LayoutVersion.Feature.ADD_INODE_ID, getLayoutVersion())) {
// TODO: for HDFS-5428, we use reserved path for those INodeFileUC in
// snapshot. If we support INode ID in the layout version, we can use
// the inode id to find the oldnode.
oldnode = namesystem.dir.getInode(cons.getId()).asFile();
inSnapshot = true;
} else {
path = renameReservedPathsOnUpgrade(path, getLayoutVersion());
final INodesInPath iip = fsDir.getINodesInPath(path, true);
oldnode = INodeFile.valueOf(iip.getLastINode(), path);
}
FileUnderConstructionFeature uc = cons.getFileUnderConstructionFeature();
oldnode.toUnderConstruction(uc.getClientName(), uc.getClientMachine());
if (oldnode.numBlocks() > 0) {
BlockInfo ucBlock = cons.getLastBlock();
// we do not replace the inode, just replace the last block of oldnode
BlockInfo info = namesystem.getBlockManager().addBlockCollection(
ucBlock, oldnode);
oldnode.setBlock(oldnode.numBlocks() - 1, info);
}
if (!inSnapshot) {
namesystem.leaseManager.addLease(uc.getClientName(), oldnode.getId());
}
}
}
private void loadSecretManagerState(DataInput in)
throws IOException {
int imgVersion = getLayoutVersion();
if (!NameNodeLayoutVersion.supports(
LayoutVersion.Feature.DELEGATION_TOKEN, imgVersion)) {
//SecretManagerState is not available.
//This must not happen if security is turned on.
return;
}
namesystem.loadSecretManagerStateCompat(in);
}
private void loadCacheManagerState(DataInput in) throws IOException {
int imgVersion = getLayoutVersion();
if (!NameNodeLayoutVersion.supports(
LayoutVersion.Feature.CACHING, imgVersion)) {
return;
}
namesystem.getCacheManager().loadStateCompat(in);
}
private int getLayoutVersion() {
return namesystem.getFSImage().getStorage().getLayoutVersion();
}
private boolean isRoot(byte[][] path) {
return path.length == 1 &&
path[0] == null;
}
private boolean isParent(byte[][] path, byte[][] parent) {
if (path == null || parent == null)
return false;
if (parent.length == 0 || path.length != parent.length + 1)
return false;
boolean isParent = true;
for (int i = 0; i < parent.length; i++) {
isParent = isParent && Arrays.equals(path[i], parent[i]);
}
return isParent;
}
/**
* Return string representing the parent of the given path.
*/
String getParent(String path) {
return path.substring(0, path.lastIndexOf(Path.SEPARATOR));
}
byte[][] getParent(byte[][] path) {
byte[][] result = new byte[path.length - 1][];
for (int i = 0; i < result.length; i++) {
result[i] = new byte[path[i].length];
System.arraycopy(path[i], 0, result[i], 0, path[i].length);
}
return result;
}
public Snapshot getSnapshot(DataInput in) throws IOException {
return snapshotMap.get(in.readInt());
}
}
@VisibleForTesting
public static final TreeMap<String, String> renameReservedMap =
new TreeMap<String, String>();
/**
* Use the default key-value pairs that will be used to determine how to
* rename reserved paths on upgrade.
*/
@VisibleForTesting
public static void useDefaultRenameReservedPairs() {
renameReservedMap.clear();
for (String key: HdfsServerConstants.RESERVED_PATH_COMPONENTS) {
renameReservedMap.put(
key,
key + "." + HdfsServerConstants.NAMENODE_LAYOUT_VERSION + "."
+ "UPGRADE_RENAMED");
}
}
/**
* Set the key-value pairs that will be used to determine how to rename
* reserved paths on upgrade.
*/
@VisibleForTesting
public static void setRenameReservedPairs(String renameReserved) {
// Clear and set the default values
useDefaultRenameReservedPairs();
// Overwrite with provided values
setRenameReservedMapInternal(renameReserved);
}
private static void setRenameReservedMapInternal(String renameReserved) {
Collection<String> pairs =
StringUtils.getTrimmedStringCollection(renameReserved);
for (String p : pairs) {
String[] pair = StringUtils.split(p, '/', '=');
Preconditions.checkArgument(pair.length == 2,
"Could not parse key-value pair " + p);
String key = pair[0];
String value = pair[1];
Preconditions.checkArgument(DFSUtil.isReservedPathComponent(key),
"Unknown reserved path " + key);
Preconditions.checkArgument(DFSUtil.isValidNameForComponent(value),
"Invalid rename path for " + key + ": " + value);
LOG.info("Will rename reserved path " + key + " to " + value);
renameReservedMap.put(key, value);
}
}
/**
* When upgrading from an old version, the filesystem could contain paths
* that are now reserved in the new version (e.g. .snapshot). This renames
* these new reserved paths to a user-specified value to avoid collisions
* with the reserved name.
*
* @param path Old path potentially containing a reserved path
* @return New path with reserved path components renamed to user value
*/
static String renameReservedPathsOnUpgrade(String path,
final int layoutVersion) {
final String oldPath = path;
// If any known LVs aren't supported, we're doing an upgrade
if (!NameNodeLayoutVersion.supports(Feature.ADD_INODE_ID, layoutVersion)) {
String[] components = INode.getPathNames(path);
// Only need to worry about the root directory
if (components.length > 1) {
components[1] = DFSUtil.bytes2String(
renameReservedRootComponentOnUpgrade(
DFSUtil.string2Bytes(components[1]),
layoutVersion));
path = DFSUtil.strings2PathString(components);
}
}
if (!NameNodeLayoutVersion.supports(Feature.SNAPSHOT, layoutVersion)) {
String[] components = INode.getPathNames(path);
// Special case the root path
if (components.length == 0) {
return path;
}
for (int i=0; i<components.length; i++) {
components[i] = DFSUtil.bytes2String(
renameReservedComponentOnUpgrade(
DFSUtil.string2Bytes(components[i]),
layoutVersion));
}
path = DFSUtil.strings2PathString(components);
}
if (!path.equals(oldPath)) {
LOG.info("Upgrade process renamed reserved path " + oldPath + " to "
+ path);
}
return path;
}
private final static String RESERVED_ERROR_MSG =
FSDirectory.DOT_RESERVED_PATH_PREFIX + " is a reserved path and "
+ HdfsConstants.DOT_SNAPSHOT_DIR + " is a reserved path component in"
+ " this version of HDFS. Please rollback and delete or rename"
+ " this path, or upgrade with the "
+ StartupOption.RENAMERESERVED.getName()
+ " [key-value pairs]"
+ " option to automatically rename these paths during upgrade.";
/**
* Same as {@link #renameReservedPathsOnUpgrade(String)}, but for a single
* byte array path component.
*/
private static byte[] renameReservedComponentOnUpgrade(byte[] component,
final int layoutVersion) {
// If the LV doesn't support snapshots, we're doing an upgrade
if (!NameNodeLayoutVersion.supports(Feature.SNAPSHOT, layoutVersion)) {
if (Arrays.equals(component, HdfsServerConstants.DOT_SNAPSHOT_DIR_BYTES)) {
Preconditions.checkArgument(
renameReservedMap.containsKey(HdfsConstants.DOT_SNAPSHOT_DIR),
RESERVED_ERROR_MSG);
component =
DFSUtil.string2Bytes(renameReservedMap
.get(HdfsConstants.DOT_SNAPSHOT_DIR));
}
}
return component;
}
/**
* Same as {@link #renameReservedPathsOnUpgrade(String)}, but for a single
* byte array path component.
*/
private static byte[] renameReservedRootComponentOnUpgrade(byte[] component,
final int layoutVersion) {
// If the LV doesn't support inode IDs, we're doing an upgrade
if (!NameNodeLayoutVersion.supports(Feature.ADD_INODE_ID, layoutVersion)) {
if (Arrays.equals(component, FSDirectory.DOT_RESERVED)) {
Preconditions.checkArgument(
renameReservedMap.containsKey(FSDirectory.DOT_RESERVED_STRING),
RESERVED_ERROR_MSG);
final String renameString = renameReservedMap
.get(FSDirectory.DOT_RESERVED_STRING);
component =
DFSUtil.string2Bytes(renameString);
LOG.info("Renamed root path " + FSDirectory.DOT_RESERVED_STRING
+ " to " + renameString);
}
}
return component;
}
/**
* A one-shot class responsible for writing an image file.
* The write() function should be called once, after which the getter
* functions may be used to retrieve information about the file that was written.
*
* This is replaced by the PB-based FSImage. The class is to maintain
* compatibility for the external fsimage tool.
*/
@Deprecated
static class Saver {
private static final int LAYOUT_VERSION = -51;
public static final int CHECK_CANCEL_INTERVAL = 4096;
private final SaveNamespaceContext context;
/** Set to true once an image has been written */
private boolean saved = false;
private long checkCancelCounter = 0;
/** The MD5 checksum of the file that was written */
private MD5Hash savedDigest;
private final ReferenceMap referenceMap = new ReferenceMap();
private final Map<Long, INodeFile> snapshotUCMap =
new HashMap<Long, INodeFile>();
/** @throws IllegalStateException if the instance has not yet saved an image */
private void checkSaved() {
if (!saved) {
throw new IllegalStateException("FSImageSaver has not saved an image");
}
}
/** @throws IllegalStateException if the instance has already saved an image */
private void checkNotSaved() {
if (saved) {
throw new IllegalStateException("FSImageSaver has already saved an image");
}
}
Saver(SaveNamespaceContext context) {
this.context = context;
}
/**
* Return the MD5 checksum of the image file that was saved.
*/
MD5Hash getSavedDigest() {
checkSaved();
return savedDigest;
}
void save(File newFile, FSImageCompression compression) throws IOException {
checkNotSaved();
final FSNamesystem sourceNamesystem = context.getSourceNamesystem();
final INodeDirectory rootDir = sourceNamesystem.dir.rootDir;
final long numINodes = rootDir.getDirectoryWithQuotaFeature()
.getSpaceConsumed().getNameSpace();
String sdPath = newFile.getParentFile().getParentFile().getAbsolutePath();
Step step = new Step(StepType.INODES, sdPath);
StartupProgress prog = NameNode.getStartupProgress();
prog.beginStep(Phase.SAVING_CHECKPOINT, step);
prog.setTotal(Phase.SAVING_CHECKPOINT, step, numINodes);
Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
long startTime = monotonicNow();
//
// Write out data
//
MessageDigest digester = MD5Hash.getDigester();
FileOutputStream fout = new FileOutputStream(newFile);
DigestOutputStream fos = new DigestOutputStream(fout, digester);
DataOutputStream out = new DataOutputStream(fos);
try {
out.writeInt(LAYOUT_VERSION);
LayoutFlags.write(out);
// We use the non-locked version of getNamespaceInfo here since
// the coordinating thread of saveNamespace already has read-locked
// the namespace for us. If we attempt to take another readlock
// from the actual saver thread, there's a potential of a
// fairness-related deadlock. See the comments on HDFS-2223.
out.writeInt(sourceNamesystem.unprotectedGetNamespaceInfo()
.getNamespaceID());
out.writeLong(numINodes);
out.writeLong(sourceNamesystem.getBlockIdManager().getGenerationStampV1());
out.writeLong(sourceNamesystem.getBlockIdManager().getGenerationStampV2());
out.writeLong(sourceNamesystem.getBlockIdManager().getGenerationStampAtblockIdSwitch());
out.writeLong(sourceNamesystem.getBlockIdManager().getLastAllocatedBlockId());
out.writeLong(context.getTxId());
out.writeLong(sourceNamesystem.dir.getLastInodeId());
sourceNamesystem.getSnapshotManager().write(out);
// write compression info and set up compressed stream
out = compression.writeHeaderAndWrapStream(fos);
LOG.info("Saving image file " + newFile +
" using " + compression);
// save the root
saveINode2Image(rootDir, out, false, referenceMap, counter);
// save the rest of the nodes
saveImage(rootDir, out, true, false, counter);
prog.endStep(Phase.SAVING_CHECKPOINT, step);
// Now that the step is finished, set counter equal to total to adjust
// for possible under-counting due to reference inodes.
prog.setCount(Phase.SAVING_CHECKPOINT, step, numINodes);
// save files under construction
// TODO: for HDFS-5428, since we cannot break the compatibility of
// fsimage, we store part of the under-construction files that are only
// in snapshots in this "under-construction-file" section. As a
// temporary solution, we use "/.reserved/.inodes/<inodeid>" as their
// paths, so that when loading fsimage we do not put them into the lease
// map. In the future, we can remove this hack when we can bump the
// layout version.
saveFilesUnderConstruction(sourceNamesystem, out, snapshotUCMap);
context.checkCancelled();
sourceNamesystem.saveSecretManagerStateCompat(out, sdPath);
context.checkCancelled();
sourceNamesystem.getCacheManager().saveStateCompat(out, sdPath);
context.checkCancelled();
out.flush();
context.checkCancelled();
fout.getChannel().force(true);
} finally {
out.close();
}
saved = true;
// set md5 of the saved image
savedDigest = new MD5Hash(digester.digest());
LOG.info("Image file " + newFile + " of size " + newFile.length()
+ " bytes saved in " + (monotonicNow() - startTime) / 1000
+ " seconds.");
}
/**
* Save children INodes.
* @param children The list of children INodes
* @param out The DataOutputStream to write
* @param inSnapshot Whether the parent directory or its ancestor is in
* the deleted list of some snapshot (caused by rename or
* deletion)
* @param counter Counter to increment for namenode startup progress
* @return Number of children that are directory
*/
private int saveChildren(ReadOnlyList<INode> children,
DataOutputStream out, boolean inSnapshot, Counter counter)
throws IOException {
// Write normal children INode.
out.writeInt(children.size());
int dirNum = 0;
for(INode child : children) {
// print all children first
// TODO: for HDFS-5428, we cannot change the format/content of fsimage
// here, thus even if the parent directory is in snapshot, we still
// do not handle INodeUC as those stored in deleted list
saveINode2Image(child, out, false, referenceMap, counter);
if (child.isDirectory()) {
dirNum++;
} else if (inSnapshot && child.isFile()
&& child.asFile().isUnderConstruction()) {
this.snapshotUCMap.put(child.getId(), child.asFile());
}
if (checkCancelCounter++ % CHECK_CANCEL_INTERVAL == 0) {
context.checkCancelled();
}
}
return dirNum;
}
/**
* Save file tree image starting from the given root.
* This is a recursive procedure, which first saves all children and
* snapshot diffs of a current directory and then moves inside the
* sub-directories.
*
* @param current The current node
* @param out The DataoutputStream to write the image
* @param toSaveSubtree Whether or not to save the subtree to fsimage. For
* reference node, its subtree may already have been
* saved before.
* @param inSnapshot Whether the current directory is in snapshot
* @param counter Counter to increment for namenode startup progress
*/
private void saveImage(INodeDirectory current, DataOutputStream out,
boolean toSaveSubtree, boolean inSnapshot, Counter counter)
throws IOException {
// write the inode id of the directory
out.writeLong(current.getId());
if (!toSaveSubtree) {
return;
}
final ReadOnlyList<INode> children = current
.getChildrenList(Snapshot.CURRENT_STATE_ID);
int dirNum = 0;
List<INodeDirectory> snapshotDirs = null;
DirectoryWithSnapshotFeature sf = current.getDirectoryWithSnapshotFeature();
if (sf != null) {
snapshotDirs = new ArrayList<INodeDirectory>();
sf.getSnapshotDirectory(snapshotDirs);
dirNum += snapshotDirs.size();
}
// 2. Write INodeDirectorySnapshottable#snapshotsByNames to record all
// Snapshots
if (current.isDirectory() && current.asDirectory().isSnapshottable()) {
SnapshotFSImageFormat.saveSnapshots(current.asDirectory(), out);
} else {
out.writeInt(-1); // # of snapshots
}
// 3. Write children INode
dirNum += saveChildren(children, out, inSnapshot, counter);
// 4. Write DirectoryDiff lists, if there is any.
SnapshotFSImageFormat.saveDirectoryDiffList(current, out, referenceMap);
// Write sub-tree of sub-directories, including possible snapshots of
// deleted sub-directories
out.writeInt(dirNum); // the number of sub-directories
for(INode child : children) {
if(!child.isDirectory()) {
continue;
}
// make sure we only save the subtree under a reference node once
boolean toSave = child.isReference() ?
referenceMap.toProcessSubtree(child.getId()) : true;
saveImage(child.asDirectory(), out, toSave, inSnapshot, counter);
}
if (snapshotDirs != null) {
for (INodeDirectory subDir : snapshotDirs) {
// make sure we only save the subtree under a reference node once
boolean toSave = subDir.getParentReference() != null ?
referenceMap.toProcessSubtree(subDir.getId()) : true;
saveImage(subDir, out, toSave, true, counter);
}
}
}
/**
* Saves inode and increments progress counter.
*
* @param inode INode to save
* @param out DataOutputStream to receive inode
* @param writeUnderConstruction boolean true if this is under construction
* @param referenceMap ReferenceMap containing reference inodes
* @param counter Counter to increment for namenode startup progress
* @throws IOException thrown if there is an I/O error
*/
private void saveINode2Image(INode inode, DataOutputStream out,
boolean writeUnderConstruction, ReferenceMap referenceMap,
Counter counter) throws IOException {
FSImageSerialization.saveINode2Image(inode, out, writeUnderConstruction,
referenceMap);
// Intentionally do not increment counter for reference inodes, because it
// is too difficult at this point to assess whether or not this is a
// reference that counts toward quota.
if (!(inode instanceof INodeReference)) {
counter.increment();
}
}
/**
* Serializes leases.
*/
void saveFilesUnderConstruction(FSNamesystem fsn, DataOutputStream out,
Map<Long, INodeFile> snapshotUCMap) throws IOException {
// This is run by an inferior thread of saveNamespace, which holds a read
// lock on our behalf. If we took the read lock here, we could block
// for fairness if a writer is waiting on the lock.
final LeaseManager leaseManager = fsn.getLeaseManager();
final FSDirectory dir = fsn.getFSDirectory();
synchronized (leaseManager) {
Collection<Long> filesWithUC = leaseManager.getINodeIdWithLeases();
for (Long id : filesWithUC) {
// TODO: for HDFS-5428, because of rename operations, some
// under-construction files that are
// in the current fs directory can also be captured in the
// snapshotUCMap. We should remove them from the snapshotUCMap.
snapshotUCMap.remove(id);
}
out.writeInt(filesWithUC.size() + snapshotUCMap.size()); // write the size
for (Long id : filesWithUC) {
INodeFile file = dir.getInode(id).asFile();
String path = file.getFullPathName();
FSImageSerialization.writeINodeUnderConstruction(
out, file, path);
}
for (Map.Entry<Long, INodeFile> entry : snapshotUCMap.entrySet()) {
// for those snapshot INodeFileUC, we use "/.reserved/.inodes/<inodeid>"
// as their paths
StringBuilder b = new StringBuilder();
b.append(FSDirectory.DOT_RESERVED_PATH_PREFIX)
.append(Path.SEPARATOR).append(FSDirectory.DOT_INODES_STRING)
.append(Path.SEPARATOR).append(entry.getValue().getId());
FSImageSerialization.writeINodeUnderConstruction(
out, entry.getValue(), b.toString());
}
}
}
}
}
| 56,859 | 37.058902 | 122 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImagePreTransactionalStorageInspector.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.DataInputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
import org.apache.hadoop.io.IOUtils;
/**
* Inspects a FSImage storage directory in the "old" (pre-HDFS-1073) format.
* This format has the following data files:
* - fsimage
* - fsimage.ckpt (when checkpoint is being uploaded)
* - edits
* - edits.new (when logs are "rolled")
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
class FSImagePreTransactionalStorageInspector extends FSImageStorageInspector {
private static final Log LOG =
LogFactory.getLog(FSImagePreTransactionalStorageInspector.class);
/* Flag if there is at least one storage dir that doesn't contain the newest
* fstime */
private boolean hasOutOfDateStorageDirs = false;
/* Flag set false if there are any "previous" directories found */
private boolean isUpgradeFinalized = true;
private boolean needToSaveAfterRecovery = false;
// Track the name and edits dir with the latest times
private long latestNameCheckpointTime = Long.MIN_VALUE;
private long latestEditsCheckpointTime = Long.MIN_VALUE;
private StorageDirectory latestNameSD = null;
private StorageDirectory latestEditsSD = null;
/** Set to determine if all of storageDirectories share the same checkpoint */
final Set<Long> checkpointTimes = new HashSet<Long>();
private final List<String> imageDirs = new ArrayList<String>();
private final List<String> editsDirs = new ArrayList<String>();
@Override
void inspectDirectory(StorageDirectory sd) throws IOException {
// Was the file just formatted?
if (!sd.getVersionFile().exists()) {
hasOutOfDateStorageDirs = true;
return;
}
boolean imageExists = false;
boolean editsExists = false;
// Determine if sd is image, edits or both
if (sd.getStorageDirType().isOfType(NameNodeDirType.IMAGE)) {
imageExists = NNStorage.getStorageFile(sd, NameNodeFile.IMAGE).exists();
imageDirs.add(sd.getRoot().getCanonicalPath());
}
if (sd.getStorageDirType().isOfType(NameNodeDirType.EDITS)) {
editsExists = NNStorage.getStorageFile(sd, NameNodeFile.EDITS).exists();
editsDirs.add(sd.getRoot().getCanonicalPath());
}
long checkpointTime = readCheckpointTime(sd);
checkpointTimes.add(checkpointTime);
if (sd.getStorageDirType().isOfType(NameNodeDirType.IMAGE) &&
(latestNameCheckpointTime < checkpointTime) && imageExists) {
latestNameCheckpointTime = checkpointTime;
latestNameSD = sd;
}
if (sd.getStorageDirType().isOfType(NameNodeDirType.EDITS) &&
(latestEditsCheckpointTime < checkpointTime) && editsExists) {
latestEditsCheckpointTime = checkpointTime;
latestEditsSD = sd;
}
// check that we have a valid, non-default checkpointTime
if (checkpointTime <= 0L)
hasOutOfDateStorageDirs = true;
// set finalized flag
isUpgradeFinalized = isUpgradeFinalized && !sd.getPreviousDir().exists();
}
/**
* Determine the checkpoint time of the specified StorageDirectory
*
* @param sd StorageDirectory to check
* @return If file exists and can be read, last checkpoint time. If not, 0L.
* @throws IOException On errors processing file pointed to by sd
*/
static long readCheckpointTime(StorageDirectory sd) throws IOException {
File timeFile = NNStorage.getStorageFile(sd, NameNodeFile.TIME);
long timeStamp = 0L;
if (timeFile.exists() && FileUtil.canRead(timeFile)) {
DataInputStream in = new DataInputStream(new FileInputStream(timeFile));
try {
timeStamp = in.readLong();
in.close();
in = null;
} finally {
IOUtils.cleanup(LOG, in);
}
}
return timeStamp;
}
@Override
boolean isUpgradeFinalized() {
return isUpgradeFinalized;
}
@Override
List<FSImageFile> getLatestImages() throws IOException {
// We should have at least one image and one edits dirs
if (latestNameSD == null)
throw new IOException("Image file is not found in " + imageDirs);
if (latestEditsSD == null)
throw new IOException("Edits file is not found in " + editsDirs);
// Make sure we are loading image and edits from same checkpoint
if (latestNameCheckpointTime > latestEditsCheckpointTime
&& latestNameSD != latestEditsSD
&& latestNameSD.getStorageDirType() == NameNodeDirType.IMAGE
&& latestEditsSD.getStorageDirType() == NameNodeDirType.EDITS) {
// This is a rare failure when NN has image-only and edits-only
// storage directories, and fails right after saving images,
// in some of the storage directories, but before purging edits.
// See -NOTE- in saveNamespace().
LOG.error("This is a rare failure scenario!!!");
LOG.error("Image checkpoint time " + latestNameCheckpointTime +
" > edits checkpoint time " + latestEditsCheckpointTime);
LOG.error("Name-node will treat the image as the latest state of " +
"the namespace. Old edits will be discarded.");
} else if (latestNameCheckpointTime != latestEditsCheckpointTime) {
throw new IOException("Inconsistent storage detected, " +
"image and edits checkpoint times do not match. " +
"image checkpoint time = " + latestNameCheckpointTime +
"edits checkpoint time = " + latestEditsCheckpointTime);
}
needToSaveAfterRecovery = doRecovery();
FSImageFile file = new FSImageFile(latestNameSD,
NNStorage.getStorageFile(latestNameSD, NameNodeFile.IMAGE),
HdfsServerConstants.INVALID_TXID);
LinkedList<FSImageFile> ret = new LinkedList<FSImageFile>();
ret.add(file);
return ret;
}
@Override
boolean needToSave() {
return hasOutOfDateStorageDirs ||
checkpointTimes.size() != 1 ||
latestNameCheckpointTime > latestEditsCheckpointTime ||
needToSaveAfterRecovery;
}
boolean doRecovery() throws IOException {
LOG.debug(
"Performing recovery in "+ latestNameSD + " and " + latestEditsSD);
boolean needToSave = false;
File curFile =
NNStorage.getStorageFile(latestNameSD, NameNodeFile.IMAGE);
File ckptFile =
NNStorage.getStorageFile(latestNameSD, NameNodeFile.IMAGE_NEW);
//
// If we were in the midst of a checkpoint
//
if (ckptFile.exists()) {
needToSave = true;
if (NNStorage.getStorageFile(latestEditsSD, NameNodeFile.EDITS_NEW)
.exists()) {
//
// checkpointing migth have uploaded a new
// merged image, but we discard it here because we are
// not sure whether the entire merged image was uploaded
// before the namenode crashed.
//
if (!ckptFile.delete()) {
throw new IOException("Unable to delete " + ckptFile);
}
} else {
//
// checkpointing was in progress when the namenode
// shutdown. The fsimage.ckpt was created and the edits.new
// file was moved to edits. We complete that checkpoint by
// moving fsimage.new to fsimage. There is no need to
// update the fstime file here. renameTo fails on Windows
// if the destination file already exists.
//
if (!ckptFile.renameTo(curFile)) {
if (!curFile.delete())
LOG.warn("Unable to delete dir " + curFile + " before rename");
if (!ckptFile.renameTo(curFile)) {
throw new IOException("Unable to rename " + ckptFile +
" to " + curFile);
}
}
}
}
return needToSave;
}
/**
* @return a list with the paths to EDITS and EDITS_NEW (if it exists)
* in a given storage directory.
*/
static List<File> getEditsInStorageDir(StorageDirectory sd) {
ArrayList<File> files = new ArrayList<File>();
File edits = NNStorage.getStorageFile(sd, NameNodeFile.EDITS);
assert edits.exists() : "Expected edits file at " + edits;
files.add(edits);
File editsNew = NNStorage.getStorageFile(sd, NameNodeFile.EDITS_NEW);
if (editsNew.exists()) {
files.add(editsNew);
}
return files;
}
private List<File> getLatestEditsFiles() {
if (latestNameCheckpointTime > latestEditsCheckpointTime) {
// the image is already current, discard edits
LOG.debug(
"Name checkpoint time is newer than edits, not loading edits.");
return Collections.emptyList();
}
return getEditsInStorageDir(latestEditsSD);
}
@Override
long getMaxSeenTxId() {
return 0L;
}
static Iterable<EditLogInputStream> getEditLogStreams(NNStorage storage)
throws IOException {
FSImagePreTransactionalStorageInspector inspector
= new FSImagePreTransactionalStorageInspector();
storage.inspectStorageDirs(inspector);
List<EditLogInputStream> editStreams = new ArrayList<EditLogInputStream>();
for (File f : inspector.getLatestEditsFiles()) {
editStreams.add(new EditLogFileInputStream(f));
}
return editStreams;
}
}
| 10,742 | 36.044828 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StartupProgressServlet.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgressView;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.Step;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StepType;
import org.apache.hadoop.io.IOUtils;
import org.codehaus.jackson.JsonFactory;
import org.codehaus.jackson.JsonGenerator;
import org.apache.hadoop.classification.InterfaceAudience;
/**
* Servlet that provides a JSON representation of the namenode's current startup
* progress.
*/
@InterfaceAudience.Private
@SuppressWarnings("serial")
public class StartupProgressServlet extends DfsServlet {
private static final String COUNT = "count";
private static final String ELAPSED_TIME = "elapsedTime";
private static final String FILE = "file";
private static final String NAME = "name";
private static final String DESC = "desc";
private static final String PERCENT_COMPLETE = "percentComplete";
private static final String PHASES = "phases";
private static final String SIZE = "size";
private static final String STATUS = "status";
private static final String STEPS = "steps";
private static final String TOTAL = "total";
public static final String PATH_SPEC = "/startupProgress";
@Override
protected void doGet(HttpServletRequest req, HttpServletResponse resp)
throws IOException {
resp.setContentType("application/json; charset=UTF-8");
StartupProgress prog = NameNodeHttpServer.getStartupProgressFromContext(
getServletContext());
StartupProgressView view = prog.createView();
JsonGenerator json = new JsonFactory().createJsonGenerator(resp.getWriter());
try {
json.writeStartObject();
json.writeNumberField(ELAPSED_TIME, view.getElapsedTime());
json.writeNumberField(PERCENT_COMPLETE, view.getPercentComplete());
json.writeArrayFieldStart(PHASES);
for (Phase phase: view.getPhases()) {
json.writeStartObject();
json.writeStringField(NAME, phase.getName());
json.writeStringField(DESC, phase.getDescription());
json.writeStringField(STATUS, view.getStatus(phase).toString());
json.writeNumberField(PERCENT_COMPLETE, view.getPercentComplete(phase));
json.writeNumberField(ELAPSED_TIME, view.getElapsedTime(phase));
writeStringFieldIfNotNull(json, FILE, view.getFile(phase));
writeNumberFieldIfDefined(json, SIZE, view.getSize(phase));
json.writeArrayFieldStart(STEPS);
for (Step step: view.getSteps(phase)) {
json.writeStartObject();
StepType type = step.getType();
if (type != null) {
json.writeStringField(NAME, type.getName());
json.writeStringField(DESC, type.getDescription());
}
json.writeNumberField(COUNT, view.getCount(phase, step));
writeStringFieldIfNotNull(json, FILE, step.getFile());
writeNumberFieldIfDefined(json, SIZE, step.getSize());
json.writeNumberField(TOTAL, view.getTotal(phase, step));
json.writeNumberField(PERCENT_COMPLETE, view.getPercentComplete(phase,
step));
json.writeNumberField(ELAPSED_TIME, view.getElapsedTime(phase, step));
json.writeEndObject();
}
json.writeEndArray();
json.writeEndObject();
}
json.writeEndArray();
json.writeEndObject();
} finally {
IOUtils.cleanup(LOG, json);
}
}
/**
* Writes a JSON number field only if the value is defined.
*
* @param json JsonGenerator to receive output
* @param key String key to put
* @param value long value to put
* @throws IOException if there is an I/O error
*/
private static void writeNumberFieldIfDefined(JsonGenerator json, String key,
long value) throws IOException {
if (value != Long.MIN_VALUE) {
json.writeNumberField(key, value);
}
}
/**
* Writes a JSON string field only if the value is non-null.
*
* @param json JsonGenerator to receive output
* @param key String key to put
* @param value String value to put
* @throws IOException if there is an I/O error
*/
private static void writeStringFieldIfNotNull(JsonGenerator json, String key,
String value) throws IOException {
if (value != null) {
json.writeStringField(key, value);
}
}
}
| 5,453 | 37.957143 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySummary.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.text.NumberFormat;
import java.util.Arrays;
import java.util.Collections;
import java.util.Comparator;
import java.util.EnumMap;
import java.util.Formatter;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Objects;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
/**
* Aggregate the storage type information for a set of blocks
*
*/
public class StoragePolicySummary {
Map<StorageTypeAllocation, Long> storageComboCounts = new HashMap<>();
final BlockStoragePolicy[] storagePolicies;
int totalBlocks;
StoragePolicySummary(BlockStoragePolicy[] storagePolicies) {
this.storagePolicies = storagePolicies;
}
// Add a storage type combination
void add(StorageType[] storageTypes, BlockStoragePolicy policy) {
StorageTypeAllocation storageCombo =
new StorageTypeAllocation(storageTypes, policy);
Long count = storageComboCounts.get(storageCombo);
if (count == null) {
storageComboCounts.put(storageCombo, 1l);
storageCombo.setActualStoragePolicy(
getStoragePolicy(storageCombo.getStorageTypes()));
} else {
storageComboCounts.put(storageCombo, count.longValue()+1);
}
totalBlocks++;
}
// sort the storageType combinations based on the total blocks counts
// in descending order
static List<Entry<StorageTypeAllocation, Long>> sortByComparator(
Map<StorageTypeAllocation, Long> unsortMap) {
List<Entry<StorageTypeAllocation, Long>> storageAllocations =
new LinkedList<>(unsortMap.entrySet());
// Sorting the list based on values
Collections.sort(storageAllocations,
new Comparator<Entry<StorageTypeAllocation, Long>>() {
public int compare(Entry<StorageTypeAllocation, Long> o1,
Entry<StorageTypeAllocation, Long> o2)
{
return o2.getValue().compareTo(o1.getValue());
}
});
return storageAllocations;
}
public String toString() {
StringBuilder compliantBlocksSB = new StringBuilder();
compliantBlocksSB.append("\nBlocks satisfying the specified storage policy:");
compliantBlocksSB.append("\nStorage Policy # of blocks % of blocks\n");
StringBuilder nonCompliantBlocksSB = new StringBuilder();
Formatter compliantFormatter = new Formatter(compliantBlocksSB);
Formatter nonCompliantFormatter = new Formatter(nonCompliantBlocksSB);
NumberFormat percentFormat = NumberFormat.getPercentInstance();
percentFormat.setMinimumFractionDigits(4);
percentFormat.setMaximumFractionDigits(4);
for (Map.Entry<StorageTypeAllocation, Long> storageComboCount:
sortByComparator(storageComboCounts)) {
double percent = (double) storageComboCount.getValue() /
(double) totalBlocks;
StorageTypeAllocation sta = storageComboCount.getKey();
if (sta.policyMatches()) {
compliantFormatter.format("%-25s %10d %20s%n",
sta.getStoragePolicyDescriptor(),
storageComboCount.getValue(),
percentFormat.format(percent));
} else {
if (nonCompliantBlocksSB.length() == 0) {
nonCompliantBlocksSB.append("\nBlocks NOT satisfying the specified storage policy:");
nonCompliantBlocksSB.append("\nStorage Policy ");
nonCompliantBlocksSB.append(
"Specified Storage Policy # of blocks % of blocks\n");
}
nonCompliantFormatter.format("%-35s %-20s %10d %20s%n",
sta.getStoragePolicyDescriptor(),
sta.getSpecifiedStoragePolicy().getName(),
storageComboCount.getValue(),
percentFormat.format(percent));
}
}
if (nonCompliantBlocksSB.length() == 0) {
nonCompliantBlocksSB.append("\nAll blocks satisfy specified storage policy.\n");
}
compliantFormatter.close();
nonCompliantFormatter.close();
return compliantBlocksSB.toString() + nonCompliantBlocksSB;
}
/**
*
* @param storageTypes - sorted array of storageTypes
* @return Storage Policy which matches the specific storage Combination
*/
private BlockStoragePolicy getStoragePolicy(StorageType[] storageTypes) {
for (BlockStoragePolicy storagePolicy:storagePolicies) {
StorageType[] policyStorageTypes = storagePolicy.getStorageTypes();
policyStorageTypes = Arrays.copyOf(policyStorageTypes, policyStorageTypes.length);
Arrays.sort(policyStorageTypes);
if (policyStorageTypes.length <= storageTypes.length) {
int i = 0;
for (; i < policyStorageTypes.length; i++) {
if (policyStorageTypes[i] != storageTypes[i]) {
break;
}
}
if (i < policyStorageTypes.length) {
continue;
}
int j=policyStorageTypes.length;
for (; j < storageTypes.length; j++) {
if (policyStorageTypes[i-1] != storageTypes[j]) {
break;
}
}
if (j==storageTypes.length) {
return storagePolicy;
}
}
}
return null;
}
/**
* Internal class which represents a unique Storage type combination
*
*/
static class StorageTypeAllocation {
private final BlockStoragePolicy specifiedStoragePolicy;
private final StorageType[] storageTypes;
private BlockStoragePolicy actualStoragePolicy;
StorageTypeAllocation(StorageType[] storageTypes,
BlockStoragePolicy specifiedStoragePolicy) {
Arrays.sort(storageTypes);
this.storageTypes = storageTypes;
this.specifiedStoragePolicy = specifiedStoragePolicy;
}
StorageType[] getStorageTypes() {
return storageTypes;
}
BlockStoragePolicy getSpecifiedStoragePolicy() {
return specifiedStoragePolicy;
}
void setActualStoragePolicy(BlockStoragePolicy actualStoragePolicy) {
this.actualStoragePolicy = actualStoragePolicy;
}
BlockStoragePolicy getActualStoragePolicy() {
return actualStoragePolicy;
}
private static String getStorageAllocationAsString
(Map<StorageType, Integer> storageType_countmap) {
StringBuilder sb = new StringBuilder();
for (Map.Entry<StorageType, Integer>
storageTypeCountEntry:storageType_countmap.entrySet()) {
sb.append(storageTypeCountEntry.getKey().name()+ ":"
+ storageTypeCountEntry.getValue() + ",");
}
if (sb.length() > 1) {
sb.deleteCharAt(sb.length()-1);
}
return sb.toString();
}
private String getStorageAllocationAsString() {
Map<StorageType, Integer> storageType_countmap =
new EnumMap<>(StorageType.class);
for (StorageType storageType: storageTypes) {
Integer count = storageType_countmap.get(storageType);
if (count == null) {
storageType_countmap.put(storageType, 1);
} else {
storageType_countmap.put(storageType, count.intValue()+1);
}
}
return (getStorageAllocationAsString(storageType_countmap));
}
String getStoragePolicyDescriptor() {
StringBuilder storagePolicyDescriptorSB = new StringBuilder();
if (actualStoragePolicy!=null) {
storagePolicyDescriptorSB.append(getStorageAllocationAsString())
.append("(")
.append(actualStoragePolicy.getName())
.append(")");
} else {
storagePolicyDescriptorSB.append(getStorageAllocationAsString());
}
return storagePolicyDescriptorSB.toString();
}
boolean policyMatches() {
return specifiedStoragePolicy.equals(actualStoragePolicy);
}
@Override
public String toString() {
return specifiedStoragePolicy.getName() + "|" + getStoragePolicyDescriptor();
}
@Override
public int hashCode() {
return Objects.hash(specifiedStoragePolicy,Arrays.hashCode(storageTypes));
}
@Override
public boolean equals(Object another) {
return (another instanceof StorageTypeAllocation &&
Objects.equals(specifiedStoragePolicy,
((StorageTypeAllocation)another).specifiedStoragePolicy) &&
Arrays.equals(storageTypes,
((StorageTypeAllocation)another).storageTypes));
}
}
}
| 9,228 | 34.771318 | 98 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.util.ExitUtil.terminate;
import java.io.File;
import java.io.FilenameFilter;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.URI;
import java.net.URL;
import java.security.PrivilegedAction;
import java.security.PrivilegedExceptionAction;
import java.util.*;
import com.google.common.collect.Lists;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.OptionBuilder;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.commons.cli.PosixParser;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HAUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.NameNodeProxies;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.common.Storage.StorageState;
import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
import org.apache.hadoop.hdfs.server.namenode.NNStorageRetentionManager.StoragePurger;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
import org.apache.hadoop.hdfs.util.Canceler;
import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.http.HttpServer2;
import org.apache.hadoop.io.MD5Hash;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.metrics2.source.JvmMetrics;
import org.apache.hadoop.metrics2.util.MBeans;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Daemon;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import org.apache.hadoop.util.VersionInfo;
import javax.management.ObjectName;
/**********************************************************
* The Secondary NameNode is a helper to the primary NameNode.
* The Secondary is responsible for supporting periodic checkpoints
* of the HDFS metadata. The current design allows only one Secondary
* NameNode per HDFs cluster.
*
* The Secondary NameNode is a daemon that periodically wakes
* up (determined by the schedule specified in the configuration),
* triggers a periodic checkpoint and then goes back to sleep.
* The Secondary NameNode uses the NamenodeProtocol to talk to the
* primary NameNode.
*
**********************************************************/
@InterfaceAudience.Private
public class SecondaryNameNode implements Runnable,
SecondaryNameNodeInfoMXBean {
static{
HdfsConfiguration.init();
}
public static final Log LOG =
LogFactory.getLog(SecondaryNameNode.class.getName());
private final long starttime = Time.now();
private volatile long lastCheckpointTime = 0;
private volatile long lastCheckpointWallclockTime = 0;
private URL fsName;
private CheckpointStorage checkpointImage;
private NamenodeProtocol namenode;
private Configuration conf;
private InetSocketAddress nameNodeAddr;
private volatile boolean shouldRun;
private HttpServer2 infoServer;
private Collection<URI> checkpointDirs;
private List<URI> checkpointEditsDirs;
private CheckpointConf checkpointConf;
private FSNamesystem namesystem;
private Thread checkpointThread;
private ObjectName nameNodeStatusBeanName;
private String legacyOivImageDir;
@Override
public String toString() {
return getClass().getSimpleName() + " Status"
+ "\nName Node Address : " + nameNodeAddr
+ "\nStart Time : " + new Date(starttime)
+ "\nLast Checkpoint : " + (lastCheckpointTime == 0? "--":
new Date(lastCheckpointWallclockTime))
+ " (" + ((Time.monotonicNow() - lastCheckpointTime) / 1000)
+ " seconds ago)"
+ "\nCheckpoint Period : " + checkpointConf.getPeriod() + " seconds"
+ "\nCheckpoint Transactions: " + checkpointConf.getTxnCount()
+ "\nCheckpoint Dirs : " + checkpointDirs
+ "\nCheckpoint Edits Dirs : " + checkpointEditsDirs;
}
@VisibleForTesting
FSImage getFSImage() {
return checkpointImage;
}
@VisibleForTesting
int getMergeErrorCount() {
return checkpointImage.getMergeErrorCount();
}
@VisibleForTesting
public FSNamesystem getFSNamesystem() {
return namesystem;
}
@VisibleForTesting
void setFSImage(CheckpointStorage image) {
this.checkpointImage = image;
}
@VisibleForTesting
NamenodeProtocol getNameNode() {
return namenode;
}
@VisibleForTesting
void setNameNode(NamenodeProtocol namenode) {
this.namenode = namenode;
}
/**
* Create a connection to the primary namenode.
*/
public SecondaryNameNode(Configuration conf) throws IOException {
this(conf, new CommandLineOpts());
}
public SecondaryNameNode(Configuration conf,
CommandLineOpts commandLineOpts) throws IOException {
try {
String nsId = DFSUtil.getSecondaryNameServiceId(conf);
if (HAUtil.isHAEnabled(conf, nsId)) {
throw new IOException(
"Cannot use SecondaryNameNode in an HA cluster." +
" The Standby Namenode will perform checkpointing.");
}
NameNode.initializeGenericKeys(conf, nsId, null);
initialize(conf, commandLineOpts);
} catch (IOException e) {
shutdown();
throw e;
} catch (HadoopIllegalArgumentException e) {
shutdown();
throw e;
}
}
public static InetSocketAddress getHttpAddress(Configuration conf) {
return NetUtils.createSocketAddr(conf.getTrimmed(
DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_DEFAULT));
}
/**
* Initialize SecondaryNameNode.
*/
private void initialize(final Configuration conf,
CommandLineOpts commandLineOpts) throws IOException {
final InetSocketAddress infoSocAddr = getHttpAddress(conf);
final String infoBindAddress = infoSocAddr.getHostName();
UserGroupInformation.setConfiguration(conf);
if (UserGroupInformation.isSecurityEnabled()) {
SecurityUtil.login(conf,
DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY,
DFSConfigKeys.DFS_SECONDARY_NAMENODE_KERBEROS_PRINCIPAL_KEY, infoBindAddress);
}
// initiate Java VM metrics
DefaultMetricsSystem.initialize("SecondaryNameNode");
JvmMetrics.create("SecondaryNameNode",
conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY),
DefaultMetricsSystem.instance());
// Create connection to the namenode.
shouldRun = true;
nameNodeAddr = NameNode.getServiceAddress(conf, true);
this.conf = conf;
this.namenode = NameNodeProxies.createNonHAProxy(conf, nameNodeAddr,
NamenodeProtocol.class, UserGroupInformation.getCurrentUser(),
true).getProxy();
// initialize checkpoint directories
fsName = getInfoServer();
checkpointDirs = FSImage.getCheckpointDirs(conf,
"/tmp/hadoop/dfs/namesecondary");
checkpointEditsDirs = FSImage.getCheckpointEditsDirs(conf,
"/tmp/hadoop/dfs/namesecondary");
checkpointImage = new CheckpointStorage(conf, checkpointDirs, checkpointEditsDirs);
checkpointImage.recoverCreate(commandLineOpts.shouldFormat());
checkpointImage.deleteTempEdits();
namesystem = new FSNamesystem(conf, checkpointImage, true);
// Disable quota checks
namesystem.dir.disableQuotaChecks();
// Initialize other scheduling parameters from the configuration
checkpointConf = new CheckpointConf(conf);
final InetSocketAddress httpAddr = infoSocAddr;
final String httpsAddrString = conf.getTrimmed(
DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTPS_ADDRESS_KEY,
DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTPS_ADDRESS_DEFAULT);
InetSocketAddress httpsAddr = NetUtils.createSocketAddr(httpsAddrString);
HttpServer2.Builder builder = DFSUtil.httpServerTemplateForNNAndJN(conf,
httpAddr, httpsAddr, "secondary",
DFSConfigKeys.DFS_SECONDARY_NAMENODE_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY,
DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY);
nameNodeStatusBeanName = MBeans.register("SecondaryNameNode",
"SecondaryNameNodeInfo", this);
infoServer = builder.build();
infoServer.setAttribute("secondary.name.node", this);
infoServer.setAttribute("name.system.image", checkpointImage);
infoServer.setAttribute(JspHelper.CURRENT_CONF, conf);
infoServer.addInternalServlet("imagetransfer", ImageServlet.PATH_SPEC,
ImageServlet.class, true);
infoServer.start();
LOG.info("Web server init done");
HttpConfig.Policy policy = DFSUtil.getHttpPolicy(conf);
int connIdx = 0;
if (policy.isHttpEnabled()) {
InetSocketAddress httpAddress = infoServer.getConnectorAddress(connIdx++);
conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
NetUtils.getHostPortString(httpAddress));
}
if (policy.isHttpsEnabled()) {
InetSocketAddress httpsAddress = infoServer.getConnectorAddress(connIdx);
conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTPS_ADDRESS_KEY,
NetUtils.getHostPortString(httpsAddress));
}
legacyOivImageDir = conf.get(
DFSConfigKeys.DFS_NAMENODE_LEGACY_OIV_IMAGE_DIR_KEY);
LOG.info("Checkpoint Period :" + checkpointConf.getPeriod() + " secs "
+ "(" + checkpointConf.getPeriod() / 60 + " min)");
LOG.info("Log Size Trigger :" + checkpointConf.getTxnCount() + " txns");
}
/**
* Wait for the service to finish.
* (Normally, it runs forever.)
*/
private void join() {
try {
infoServer.join();
} catch (InterruptedException ie) {
LOG.debug("Exception ", ie);
}
}
/**
* Shut down this instance of the datanode.
* Returns only after shutdown is complete.
*/
public void shutdown() {
shouldRun = false;
if (checkpointThread != null) {
checkpointThread.interrupt();
try {
checkpointThread.join(10000);
} catch (InterruptedException e) {
LOG.info("Interrupted waiting to join on checkpointer thread");
Thread.currentThread().interrupt(); // maintain status
}
}
try {
if (infoServer != null) {
infoServer.stop();
infoServer = null;
}
} catch (Exception e) {
LOG.warn("Exception shutting down SecondaryNameNode", e);
}
if (nameNodeStatusBeanName != null) {
MBeans.unregister(nameNodeStatusBeanName);
nameNodeStatusBeanName = null;
}
try {
if (checkpointImage != null) {
checkpointImage.close();
checkpointImage = null;
}
} catch(IOException e) {
LOG.warn("Exception while closing CheckpointStorage", e);
}
if (namesystem != null) {
namesystem.shutdown();
namesystem = null;
}
}
@Override
public void run() {
SecurityUtil.doAsLoginUserOrFatal(
new PrivilegedAction<Object>() {
@Override
public Object run() {
doWork();
return null;
}
});
}
//
// The main work loop
//
public void doWork() {
//
// Poll the Namenode (once every checkpointCheckPeriod seconds) to find the
// number of transactions in the edit log that haven't yet been checkpointed.
//
long period = checkpointConf.getCheckPeriod();
int maxRetries = checkpointConf.getMaxRetriesOnMergeError();
while (shouldRun) {
try {
Thread.sleep(1000 * period);
} catch (InterruptedException ie) {
// do nothing
}
if (!shouldRun) {
break;
}
try {
// We may have lost our ticket since last checkpoint, log in again, just in case
if(UserGroupInformation.isSecurityEnabled())
UserGroupInformation.getCurrentUser().checkTGTAndReloginFromKeytab();
final long monotonicNow = Time.monotonicNow();
final long now = Time.now();
if (shouldCheckpointBasedOnCount() ||
monotonicNow >= lastCheckpointTime + 1000 * checkpointConf.getPeriod()) {
doCheckpoint();
lastCheckpointTime = monotonicNow;
lastCheckpointWallclockTime = now;
}
} catch (IOException e) {
LOG.error("Exception in doCheckpoint", e);
e.printStackTrace();
// Prevent a huge number of edits from being created due to
// unrecoverable conditions and endless retries.
if (checkpointImage.getMergeErrorCount() > maxRetries) {
LOG.fatal("Merging failed " +
checkpointImage.getMergeErrorCount() + " times.");
terminate(1);
}
} catch (Throwable e) {
LOG.fatal("Throwable Exception in doCheckpoint", e);
e.printStackTrace();
terminate(1, e);
}
}
}
/**
* Download <code>fsimage</code> and <code>edits</code>
* files from the name-node.
* @return true if a new image has been downloaded and needs to be loaded
* @throws IOException
*/
static boolean downloadCheckpointFiles(
final URL nnHostPort,
final FSImage dstImage,
final CheckpointSignature sig,
final RemoteEditLogManifest manifest
) throws IOException {
// Sanity check manifest - these could happen if, eg, someone on the
// NN side accidentally rmed the storage directories
if (manifest.getLogs().isEmpty()) {
throw new IOException("Found no edit logs to download on NN since txid "
+ sig.mostRecentCheckpointTxId);
}
long expectedTxId = sig.mostRecentCheckpointTxId + 1;
if (manifest.getLogs().get(0).getStartTxId() != expectedTxId) {
throw new IOException("Bad edit log manifest (expected txid = " +
expectedTxId + ": " + manifest);
}
try {
Boolean b = UserGroupInformation.getCurrentUser().doAs(
new PrivilegedExceptionAction<Boolean>() {
@Override
public Boolean run() throws Exception {
dstImage.getStorage().cTime = sig.cTime;
// get fsimage
if (sig.mostRecentCheckpointTxId ==
dstImage.getStorage().getMostRecentCheckpointTxId()) {
LOG.info("Image has not changed. Will not download image.");
} else {
LOG.info("Image has changed. Downloading updated image from NN.");
MD5Hash downloadedHash = TransferFsImage.downloadImageToStorage(
nnHostPort, sig.mostRecentCheckpointTxId,
dstImage.getStorage(), true);
dstImage.saveDigestAndRenameCheckpointImage(NameNodeFile.IMAGE,
sig.mostRecentCheckpointTxId, downloadedHash);
}
// get edits file
for (RemoteEditLog log : manifest.getLogs()) {
TransferFsImage.downloadEditsToStorage(
nnHostPort, log, dstImage.getStorage());
}
// true if we haven't loaded all the transactions represented by the
// downloaded fsimage.
return dstImage.getLastAppliedTxId() < sig.mostRecentCheckpointTxId;
}
});
return b.booleanValue();
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
InetSocketAddress getNameNodeAddress() {
return nameNodeAddr;
}
/**
* Returns the Jetty server that the Namenode is listening on.
*/
private URL getInfoServer() throws IOException {
URI fsName = FileSystem.getDefaultUri(conf);
if (!HdfsConstants.HDFS_URI_SCHEME.equalsIgnoreCase(fsName.getScheme())) {
throw new IOException("This is not a DFS");
}
final String scheme = DFSUtil.getHttpClientScheme(conf);
URI address = DFSUtil.getInfoServerWithDefaultHost(fsName.getHost(), conf,
scheme);
LOG.debug("Will connect to NameNode at " + address);
return address.toURL();
}
/**
* Create a new checkpoint
* @return if the image is fetched from primary or not
*/
@VisibleForTesting
@SuppressWarnings("deprecated")
public boolean doCheckpoint() throws IOException {
checkpointImage.ensureCurrentDirExists();
NNStorage dstStorage = checkpointImage.getStorage();
// Tell the namenode to start logging transactions in a new edit file
// Returns a token that would be used to upload the merged image.
CheckpointSignature sig = namenode.rollEditLog();
boolean loadImage = false;
boolean isFreshCheckpointer = (checkpointImage.getNamespaceID() == 0);
boolean isSameCluster =
(dstStorage.versionSupportsFederation(NameNodeLayoutVersion.FEATURES)
&& sig.isSameCluster(checkpointImage)) ||
(!dstStorage.versionSupportsFederation(NameNodeLayoutVersion.FEATURES)
&& sig.namespaceIdMatches(checkpointImage));
if (isFreshCheckpointer ||
(isSameCluster &&
!sig.storageVersionMatches(checkpointImage.getStorage()))) {
// if we're a fresh 2NN, or if we're on the same cluster and our storage
// needs an upgrade, just take the storage info from the server.
dstStorage.setStorageInfo(sig);
dstStorage.setClusterID(sig.getClusterID());
dstStorage.setBlockPoolID(sig.getBlockpoolID());
loadImage = true;
}
sig.validateStorageInfo(checkpointImage);
// error simulation code for junit test
CheckpointFaultInjector.getInstance().afterSecondaryCallsRollEditLog();
RemoteEditLogManifest manifest =
namenode.getEditLogManifest(sig.mostRecentCheckpointTxId + 1);
// Fetch fsimage and edits. Reload the image if previous merge failed.
loadImage |= downloadCheckpointFiles(
fsName, checkpointImage, sig, manifest) |
checkpointImage.hasMergeError();
try {
doMerge(sig, manifest, loadImage, checkpointImage, namesystem);
} catch (IOException ioe) {
// A merge error occurred. The in-memory file system state may be
// inconsistent, so the image and edits need to be reloaded.
checkpointImage.setMergeError();
throw ioe;
}
// Clear any error since merge was successful.
checkpointImage.clearMergeError();
//
// Upload the new image into the NameNode. Then tell the Namenode
// to make this new uploaded image as the most current image.
//
long txid = checkpointImage.getLastAppliedTxId();
TransferFsImage.uploadImageFromStorage(fsName, conf, dstStorage,
NameNodeFile.IMAGE, txid);
// error simulation code for junit test
CheckpointFaultInjector.getInstance().afterSecondaryUploadsNewImage();
LOG.warn("Checkpoint done. New Image Size: "
+ dstStorage.getFsImageName(txid).length());
if (legacyOivImageDir != null && !legacyOivImageDir.isEmpty()) {
try {
checkpointImage.saveLegacyOIVImage(namesystem, legacyOivImageDir,
new Canceler());
} catch (IOException e) {
LOG.warn("Failed to write legacy OIV image: ", e);
}
}
return loadImage;
}
/**
* @param opts The parameters passed to this program.
* @exception Exception if the filesystem does not exist.
* @return 0 on success, non zero on error.
*/
private int processStartupCommand(CommandLineOpts opts) throws Exception {
if (opts.getCommand() == null) {
return 0;
}
String cmd = StringUtils.toLowerCase(opts.getCommand().toString());
int exitCode = 0;
try {
switch (opts.getCommand()) {
case CHECKPOINT:
long count = countUncheckpointedTxns();
if (count > checkpointConf.getTxnCount() ||
opts.shouldForceCheckpoint()) {
doCheckpoint();
} else {
System.err.println("EditLog size " + count + " transactions is " +
"smaller than configured checkpoint " +
"interval " + checkpointConf.getTxnCount() + " transactions.");
System.err.println("Skipping checkpoint.");
}
break;
case GETEDITSIZE:
long uncheckpointed = countUncheckpointedTxns();
System.out.println("NameNode has " + uncheckpointed +
" uncheckpointed transactions");
break;
default:
throw new AssertionError("bad command enum: " + opts.getCommand());
}
} catch (RemoteException e) {
//
// This is a error returned by hadoop server. Print
// out the first line of the error mesage, ignore the stack trace.
exitCode = 1;
try {
String[] content;
content = e.getLocalizedMessage().split("\n");
LOG.error(cmd + ": " + content[0]);
} catch (Exception ex) {
LOG.error(cmd + ": " + ex.getLocalizedMessage());
}
} catch (IOException e) {
//
// IO exception encountered locally.
//
exitCode = 1;
LOG.error(cmd + ": " + e.getLocalizedMessage());
} finally {
// Does the RPC connection need to be closed?
}
return exitCode;
}
private long countUncheckpointedTxns() throws IOException {
long curTxId = namenode.getTransactionID();
long uncheckpointedTxns = curTxId -
checkpointImage.getStorage().getMostRecentCheckpointTxId();
assert uncheckpointedTxns >= 0;
return uncheckpointedTxns;
}
boolean shouldCheckpointBasedOnCount() throws IOException {
return countUncheckpointedTxns() >= checkpointConf.getTxnCount();
}
/**
* main() has some simple utility methods.
* @param argv Command line parameters.
* @exception Exception if the filesystem does not exist.
*/
public static void main(String[] argv) throws Exception {
CommandLineOpts opts = SecondaryNameNode.parseArgs(argv);
if (opts == null) {
LOG.fatal("Failed to parse options");
terminate(1);
} else if (opts.shouldPrintHelp()) {
opts.usage();
System.exit(0);
}
try {
StringUtils.startupShutdownMessage(SecondaryNameNode.class, argv, LOG);
Configuration tconf = new HdfsConfiguration();
SecondaryNameNode secondary = null;
secondary = new SecondaryNameNode(tconf, opts);
if (opts != null && opts.getCommand() != null) {
int ret = secondary.processStartupCommand(opts);
terminate(ret);
}
if (secondary != null) {
secondary.startCheckpointThread();
secondary.join();
}
} catch (Throwable e) {
LOG.fatal("Failed to start secondary namenode", e);
terminate(1);
}
}
public void startCheckpointThread() {
Preconditions.checkState(checkpointThread == null,
"Should not already have a thread");
Preconditions.checkState(shouldRun, "shouldRun should be true");
checkpointThread = new Daemon(this);
checkpointThread.start();
}
@Override // SecondaryNameNodeInfoMXBean
public String getHostAndPort() {
return NetUtils.getHostPortString(nameNodeAddr);
}
@Override // SecondaryNameNodeInfoMXBean
public long getStartTime() {
return starttime;
}
@Override // SecondaryNameNodeInfoMXBean
public long getLastCheckpointTime() {
return lastCheckpointWallclockTime;
}
@Override // SecondaryNameNodeInfoMXBean
public long getLastCheckpointDeltaMs() {
if (lastCheckpointTime == 0) {
return -1;
} else {
return (Time.monotonicNow() - lastCheckpointTime);
}
}
@Override // SecondaryNameNodeInfoMXBean
public String[] getCheckpointDirectories() {
ArrayList<String> r = Lists.newArrayListWithCapacity(checkpointDirs.size());
for (URI d : checkpointDirs) {
r.add(d.toString());
}
return r.toArray(new String[r.size()]);
}
@Override // SecondaryNameNodeInfoMXBean
public String[] getCheckpointEditlogDirectories() {
ArrayList<String> r = Lists.newArrayListWithCapacity(checkpointEditsDirs.size());
for (URI d : checkpointEditsDirs) {
r.add(d.toString());
}
return r.toArray(new String[r.size()]);
}
@Override // VersionInfoMXBean
public String getCompileInfo() {
return VersionInfo.getDate() + " by " + VersionInfo.getUser() +
" from " + VersionInfo.getBranch();
}
@Override // VersionInfoMXBean
public String getSoftwareVersion() {
return VersionInfo.getVersion();
}
/**
* Container for parsed command-line options.
*/
@SuppressWarnings("static-access")
static class CommandLineOpts {
private final Options options = new Options();
private final Option geteditsizeOpt;
private final Option checkpointOpt;
private final Option formatOpt;
private final Option helpOpt;
Command cmd;
enum Command {
GETEDITSIZE,
CHECKPOINT;
}
private boolean shouldForce;
private boolean shouldFormat;
private boolean shouldPrintHelp;
CommandLineOpts() {
geteditsizeOpt = new Option("geteditsize",
"return the number of uncheckpointed transactions on the NameNode");
checkpointOpt = OptionBuilder.withArgName("force")
.hasOptionalArg().withDescription("checkpoint on startup").create("checkpoint");;
formatOpt = new Option("format", "format the local storage during startup");
helpOpt = new Option("h", "help", false, "get help information");
options.addOption(geteditsizeOpt);
options.addOption(checkpointOpt);
options.addOption(formatOpt);
options.addOption(helpOpt);
}
public boolean shouldFormat() {
return shouldFormat;
}
public boolean shouldPrintHelp() {
return shouldPrintHelp;
}
public void parse(String ... argv) throws ParseException {
CommandLineParser parser = new PosixParser();
CommandLine cmdLine = parser.parse(options, argv);
if (cmdLine.hasOption(helpOpt.getOpt())
|| cmdLine.hasOption(helpOpt.getLongOpt())) {
shouldPrintHelp = true;
return;
}
boolean hasGetEdit = cmdLine.hasOption(geteditsizeOpt.getOpt());
boolean hasCheckpoint = cmdLine.hasOption(checkpointOpt.getOpt());
if (hasGetEdit && hasCheckpoint) {
throw new ParseException("May not pass both "
+ geteditsizeOpt.getOpt() + " and "
+ checkpointOpt.getOpt());
}
if (hasGetEdit) {
cmd = Command.GETEDITSIZE;
} else if (hasCheckpoint) {
cmd = Command.CHECKPOINT;
String arg = cmdLine.getOptionValue(checkpointOpt.getOpt());
if ("force".equals(arg)) {
shouldForce = true;
} else if (arg != null) {
throw new ParseException("-checkpoint may only take 'force' as an "
+ "argument");
}
}
if (cmdLine.hasOption(formatOpt.getOpt())) {
shouldFormat = true;
}
}
public Command getCommand() {
return cmd;
}
public boolean shouldForceCheckpoint() {
return shouldForce;
}
void usage() {
String header = "The Secondary NameNode is a helper "
+ "to the primary NameNode. The Secondary is responsible "
+ "for supporting periodic checkpoints of the HDFS metadata. "
+ "The current design allows only one Secondary NameNode "
+ "per HDFS cluster.";
HelpFormatter formatter = new HelpFormatter();
formatter.printHelp("secondarynamenode", header, options, "", false);
}
}
private static CommandLineOpts parseArgs(String[] argv) {
CommandLineOpts opts = new CommandLineOpts();
try {
opts.parse(argv);
} catch (ParseException pe) {
LOG.error(pe.getMessage());
opts.usage();
return null;
}
return opts;
}
static class CheckpointStorage extends FSImage {
private int mergeErrorCount;
private static class CheckpointLogPurger implements LogsPurgeable {
private final NNStorage storage;
private final StoragePurger purger
= new NNStorageRetentionManager.DeletionStoragePurger();
public CheckpointLogPurger(NNStorage storage) {
this.storage = storage;
}
@Override
public void purgeLogsOlderThan(long minTxIdToKeep) throws IOException {
Iterator<StorageDirectory> iter = storage.dirIterator();
while (iter.hasNext()) {
StorageDirectory dir = iter.next();
List<EditLogFile> editFiles = FileJournalManager.matchEditLogs(
dir.getCurrentDir());
for (EditLogFile f : editFiles) {
if (f.getLastTxId() < minTxIdToKeep) {
purger.purgeLog(f);
}
}
}
}
@Override
public void selectInputStreams(Collection<EditLogInputStream> streams,
long fromTxId, boolean inProgressOk) {
Iterator<StorageDirectory> iter = storage.dirIterator();
while (iter.hasNext()) {
StorageDirectory dir = iter.next();
List<EditLogFile> editFiles;
try {
editFiles = FileJournalManager.matchEditLogs(
dir.getCurrentDir());
} catch (IOException ioe) {
throw new RuntimeException(ioe);
}
FileJournalManager.addStreamsToCollectionFromFiles(editFiles, streams,
fromTxId, inProgressOk);
}
}
}
/**
* Construct a checkpoint image.
* @param conf Node configuration.
* @param imageDirs URIs of storage for image.
* @param editsDirs URIs of storage for edit logs.
* @throws IOException If storage cannot be access.
*/
CheckpointStorage(Configuration conf,
Collection<URI> imageDirs,
List<URI> editsDirs) throws IOException {
super(conf, imageDirs, editsDirs);
// the 2NN never writes edits -- it only downloads them. So
// we shouldn't have any editLog instance. Setting to null
// makes sure we don't accidentally depend on it.
editLog = null;
mergeErrorCount = 0;
// Replace the archival manager with one that can actually work on the
// 2NN's edits storage.
this.archivalManager = new NNStorageRetentionManager(conf, storage,
new CheckpointLogPurger(storage));
}
/**
* Analyze checkpoint directories.
* Create directories if they do not exist.
* Recover from an unsuccessful checkpoint if necessary.
*
* @throws IOException
*/
void recoverCreate(boolean format) throws IOException {
storage.attemptRestoreRemovedStorage();
storage.unlockAll();
for (Iterator<StorageDirectory> it =
storage.dirIterator(); it.hasNext();) {
StorageDirectory sd = it.next();
boolean isAccessible = true;
try { // create directories if don't exist yet
if(!sd.getRoot().mkdirs()) {
// do nothing, directory is already created
}
} catch(SecurityException se) {
isAccessible = false;
}
if(!isAccessible)
throw new InconsistentFSStateException(sd.getRoot(),
"cannot access checkpoint directory.");
if (format) {
// Don't confirm, since this is just the secondary namenode.
LOG.info("Formatting storage directory " + sd);
sd.clearDirectory();
}
StorageState curState;
try {
curState = sd.analyzeStorage(HdfsServerConstants.StartupOption.REGULAR, storage);
// sd is locked but not opened
switch(curState) {
case NON_EXISTENT:
// fail if any of the configured checkpoint dirs are inaccessible
throw new InconsistentFSStateException(sd.getRoot(),
"checkpoint directory does not exist or is not accessible.");
case NOT_FORMATTED:
break; // it's ok since initially there is no current and VERSION
case NORMAL:
// Read the VERSION file. This verifies that:
// (a) the VERSION file for each of the directories is the same,
// and (b) when we connect to a NN, we can verify that the remote
// node matches the same namespace that we ran on previously.
storage.readProperties(sd);
break;
default: // recovery is possible
sd.doRecover(curState);
}
} catch (IOException ioe) {
sd.unlock();
throw ioe;
}
}
}
boolean hasMergeError() {
return (mergeErrorCount > 0);
}
int getMergeErrorCount() {
return mergeErrorCount;
}
void setMergeError() {
mergeErrorCount++;
}
void clearMergeError() {
mergeErrorCount = 0;
}
/**
* Ensure that the current/ directory exists in all storage
* directories
*/
void ensureCurrentDirExists() throws IOException {
for (Iterator<StorageDirectory> it
= storage.dirIterator(); it.hasNext();) {
StorageDirectory sd = it.next();
File curDir = sd.getCurrentDir();
if (!curDir.exists() && !curDir.mkdirs()) {
throw new IOException("Could not create directory " + curDir);
}
}
}
void deleteTempEdits() throws IOException {
FilenameFilter filter = new FilenameFilter() {
@Override
public boolean accept(File dir, String name) {
return name.matches(NameNodeFile.EDITS_TMP.getName()
+ "_(\\d+)-(\\d+)_(\\d+)");
}
};
Iterator<StorageDirectory> it = storage.dirIterator(NameNodeDirType.EDITS);
for (;it.hasNext();) {
StorageDirectory dir = it.next();
File[] tempEdits = dir.getCurrentDir().listFiles(filter);
if (tempEdits != null) {
for (File t : tempEdits) {
boolean success = t.delete();
if (!success) {
LOG.warn("Failed to delete temporary edits file: "
+ t.getAbsolutePath());
}
}
}
}
}
}
static void doMerge(
CheckpointSignature sig, RemoteEditLogManifest manifest,
boolean loadImage, FSImage dstImage, FSNamesystem dstNamesystem)
throws IOException {
NNStorage dstStorage = dstImage.getStorage();
dstStorage.setStorageInfo(sig);
if (loadImage) {
File file = dstStorage.findImageFile(NameNodeFile.IMAGE,
sig.mostRecentCheckpointTxId);
if (file == null) {
throw new IOException("Couldn't find image file at txid " +
sig.mostRecentCheckpointTxId + " even though it should have " +
"just been downloaded");
}
dstNamesystem.writeLock();
try {
dstImage.reloadFromImageFile(file, dstNamesystem);
} finally {
dstNamesystem.writeUnlock();
}
dstNamesystem.imageLoadComplete();
}
// error simulation code for junit test
CheckpointFaultInjector.getInstance().duringMerge();
Checkpointer.rollForwardByApplyingLogs(manifest, dstImage, dstNamesystem);
// The following has the side effect of purging old fsimages/edit logs.
dstImage.saveFSImageInAllDirs(dstNamesystem, dstImage.getLastAppliedTxId());
if (!dstNamesystem.isRollingUpgrade()) {
dstStorage.writeAll();
}
}
}
| 37,409 | 33.258242 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.net.NodeBase;
class NamenodeJspHelper {
/** @return a randomly chosen datanode. */
static DatanodeDescriptor getRandomDatanode(final NameNode namenode) {
return (DatanodeDescriptor)namenode.getNamesystem().getBlockManager(
).getDatanodeManager().getNetworkTopology().chooseRandom(
NodeBase.ROOT);
}
}
| 1,284 | 37.939394 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CheckpointConf.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import com.google.common.collect.ImmutableList;
@InterfaceAudience.Private
public class CheckpointConf {
private static final Log LOG = LogFactory.getLog(CheckpointConf.class);
/** How often to checkpoint regardless of number of txns */
private final long checkpointPeriod; // in seconds
/** How often to poll the NN to check checkpointTxnCount */
private final long checkpointCheckPeriod; // in seconds
/** checkpoint once every this many transactions, regardless of time */
private final long checkpointTxnCount;
/** maxium number of retries when merge errors occur */
private final int maxRetriesOnMergeError;
/** The output dir for legacy OIV image */
private final String legacyOivImageDir;
public CheckpointConf(Configuration conf) {
checkpointCheckPeriod = conf.getLong(
DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY,
DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_DEFAULT);
checkpointPeriod = conf.getLong(DFS_NAMENODE_CHECKPOINT_PERIOD_KEY,
DFS_NAMENODE_CHECKPOINT_PERIOD_DEFAULT);
checkpointTxnCount = conf.getLong(DFS_NAMENODE_CHECKPOINT_TXNS_KEY,
DFS_NAMENODE_CHECKPOINT_TXNS_DEFAULT);
maxRetriesOnMergeError = conf.getInt(DFS_NAMENODE_CHECKPOINT_MAX_RETRIES_KEY,
DFS_NAMENODE_CHECKPOINT_MAX_RETRIES_DEFAULT);
legacyOivImageDir = conf.get(DFS_NAMENODE_LEGACY_OIV_IMAGE_DIR_KEY);
warnForDeprecatedConfigs(conf);
}
private static void warnForDeprecatedConfigs(Configuration conf) {
for (String key : ImmutableList.of(
"fs.checkpoint.size",
"dfs.namenode.checkpoint.size")) {
if (conf.get(key) != null) {
LOG.warn("Configuration key " + key + " is deprecated! Ignoring..." +
" Instead please specify a value for " +
DFS_NAMENODE_CHECKPOINT_TXNS_KEY);
}
}
}
public long getPeriod() {
return checkpointPeriod;
}
public long getCheckPeriod() {
return Math.min(checkpointCheckPeriod, checkpointPeriod);
}
public long getTxnCount() {
return checkpointTxnCount;
}
public int getMaxRetriesOnMergeError() {
return maxRetriesOnMergeError;
}
public String getLegacyOivImageDir() {
return legacyOivImageDir;
}
}
| 3,419 | 35 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SaveNamespaceCancelledException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
@InterfaceAudience.Private
public class SaveNamespaceCancelledException extends IOException {
private static final long serialVersionUID = 1L;
SaveNamespaceCancelledException(String cancelReason) {
super(cancelReason);
}
}
| 1,178 | 37.032258 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageServlet.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.util.Time.monotonicNow;
import java.net.HttpURLConnection;
import java.security.PrivilegedExceptionAction;
import java.util.*;
import java.io.*;
import javax.servlet.ServletContext;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HAUtil;
import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
import org.apache.hadoop.hdfs.util.DataTransferThrottler;
import org.apache.hadoop.hdfs.util.MD5FileUtils;
import org.apache.hadoop.http.HttpServer2;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.MD5Hash;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.ServletUtil;
import org.apache.hadoop.util.StringUtils;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
/**
* This class is used in Namesystem's jetty to retrieve/upload a file
* Typically used by the Secondary NameNode to retrieve image and
* edit file for periodic checkpointing in Non-HA deployments.
* Standby NameNode uses to upload checkpoints in HA deployments.
*/
@InterfaceAudience.Private
public class ImageServlet extends HttpServlet {
public static final String PATH_SPEC = "/imagetransfer";
private static final long serialVersionUID = -7669068179452648952L;
private static final Log LOG = LogFactory.getLog(ImageServlet.class);
public final static String CONTENT_DISPOSITION = "Content-Disposition";
public final static String HADOOP_IMAGE_EDITS_HEADER = "X-Image-Edits-Name";
private static final String TXID_PARAM = "txid";
private static final String START_TXID_PARAM = "startTxId";
private static final String END_TXID_PARAM = "endTxId";
private static final String STORAGEINFO_PARAM = "storageInfo";
private static final String LATEST_FSIMAGE_VALUE = "latest";
private static final String IMAGE_FILE_TYPE = "imageFile";
@Override
public void doGet(final HttpServletRequest request,
final HttpServletResponse response) throws ServletException, IOException {
try {
final ServletContext context = getServletContext();
final FSImage nnImage = NameNodeHttpServer.getFsImageFromContext(context);
final GetImageParams parsedParams = new GetImageParams(request, response);
final Configuration conf = (Configuration) context
.getAttribute(JspHelper.CURRENT_CONF);
final NameNodeMetrics metrics = NameNode.getNameNodeMetrics();
validateRequest(context, conf, request, response, nnImage,
parsedParams.getStorageInfoString());
UserGroupInformation.getCurrentUser().doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
if (parsedParams.isGetImage()) {
long txid = parsedParams.getTxId();
File imageFile = null;
String errorMessage = "Could not find image";
if (parsedParams.shouldFetchLatest()) {
imageFile = nnImage.getStorage().getHighestFsImageName();
} else {
errorMessage += " with txid " + txid;
imageFile = nnImage.getStorage().getFsImage(txid,
EnumSet.of(NameNodeFile.IMAGE, NameNodeFile.IMAGE_ROLLBACK));
}
if (imageFile == null) {
throw new IOException(errorMessage);
}
CheckpointFaultInjector.getInstance().beforeGetImageSetsHeaders();
long start = monotonicNow();
serveFile(imageFile);
if (metrics != null) { // Metrics non-null only when used inside name node
long elapsed = monotonicNow() - start;
metrics.addGetImage(elapsed);
}
} else if (parsedParams.isGetEdit()) {
long startTxId = parsedParams.getStartTxId();
long endTxId = parsedParams.getEndTxId();
File editFile = nnImage.getStorage()
.findFinalizedEditsFile(startTxId, endTxId);
long start = monotonicNow();
serveFile(editFile);
if (metrics != null) { // Metrics non-null only when used inside name node
long elapsed = monotonicNow() - start;
metrics.addGetEdit(elapsed);
}
}
return null;
}
private void serveFile(File file) throws IOException {
FileInputStream fis = new FileInputStream(file);
try {
setVerificationHeadersForGet(response, file);
setFileNameHeaders(response, file);
if (!file.exists()) {
// Potential race where the file was deleted while we were in the
// process of setting headers!
throw new FileNotFoundException(file.toString());
// It's possible the file could be deleted after this point, but
// we've already opened the 'fis' stream.
// It's also possible length could change, but this would be
// detected by the client side as an inaccurate length header.
}
// send file
TransferFsImage.copyFileToStream(response.getOutputStream(),
file, fis, getThrottler(conf));
} finally {
IOUtils.closeStream(fis);
}
}
});
} catch (Throwable t) {
String errMsg = "GetImage failed. " + StringUtils.stringifyException(t);
response.sendError(HttpServletResponse.SC_GONE, errMsg);
throw new IOException(errMsg);
} finally {
response.getOutputStream().close();
}
}
private void validateRequest(ServletContext context, Configuration conf,
HttpServletRequest request, HttpServletResponse response,
FSImage nnImage, String theirStorageInfoString) throws IOException {
if (UserGroupInformation.isSecurityEnabled()
&& !isValidRequestor(context, request.getUserPrincipal().getName(),
conf)) {
String errorMsg = "Only Namenode, Secondary Namenode, and administrators may access "
+ "this servlet";
response.sendError(HttpServletResponse.SC_FORBIDDEN, errorMsg);
LOG.warn("Received non-NN/SNN/administrator request for image or edits from "
+ request.getUserPrincipal().getName()
+ " at "
+ request.getRemoteHost());
throw new IOException(errorMsg);
}
String myStorageInfoString = nnImage.getStorage().toColonSeparatedString();
if (theirStorageInfoString != null
&& !myStorageInfoString.equals(theirStorageInfoString)) {
String errorMsg = "This namenode has storage info " + myStorageInfoString
+ " but the secondary expected " + theirStorageInfoString;
response.sendError(HttpServletResponse.SC_FORBIDDEN, errorMsg);
LOG.warn("Received an invalid request file transfer request "
+ "from a secondary with storage info " + theirStorageInfoString);
throw new IOException(errorMsg);
}
}
public static void setFileNameHeaders(HttpServletResponse response,
File file) {
response.setHeader(CONTENT_DISPOSITION, "attachment; filename=" +
file.getName());
response.setHeader(HADOOP_IMAGE_EDITS_HEADER, file.getName());
}
/**
* Construct a throttler from conf
* @param conf configuration
* @return a data transfer throttler
*/
public final static DataTransferThrottler getThrottler(Configuration conf) {
long transferBandwidth =
conf.getLong(DFSConfigKeys.DFS_IMAGE_TRANSFER_RATE_KEY,
DFSConfigKeys.DFS_IMAGE_TRANSFER_RATE_DEFAULT);
DataTransferThrottler throttler = null;
if (transferBandwidth > 0) {
throttler = new DataTransferThrottler(transferBandwidth);
}
return throttler;
}
@VisibleForTesting
static boolean isValidRequestor(ServletContext context, String remoteUser,
Configuration conf) throws IOException {
if (remoteUser == null) { // This really shouldn't happen...
LOG.warn("Received null remoteUser while authorizing access to getImage servlet");
return false;
}
Set<String> validRequestors = new HashSet<String>();
validRequestors.add(SecurityUtil.getServerPrincipal(conf
.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY),
NameNode.getAddress(conf).getHostName()));
try {
validRequestors.add(
SecurityUtil.getServerPrincipal(conf
.get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_KERBEROS_PRINCIPAL_KEY),
SecondaryNameNode.getHttpAddress(conf).getHostName()));
} catch (Exception e) {
// Don't halt if SecondaryNameNode principal could not be added.
LOG.debug("SecondaryNameNode principal could not be added", e);
String msg = String.format(
"SecondaryNameNode principal not considered, %s = %s, %s = %s",
DFSConfigKeys.DFS_SECONDARY_NAMENODE_KERBEROS_PRINCIPAL_KEY,
conf.get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_KERBEROS_PRINCIPAL_KEY),
DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
conf.getTrimmed(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_DEFAULT));
LOG.warn(msg);
}
if (HAUtil.isHAEnabled(conf, DFSUtil.getNamenodeNameServiceId(conf))) {
Configuration otherNnConf = HAUtil.getConfForOtherNode(conf);
validRequestors.add(SecurityUtil.getServerPrincipal(otherNnConf
.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY),
NameNode.getAddress(otherNnConf).getHostName()));
}
for (String v : validRequestors) {
if (v != null && v.equals(remoteUser)) {
LOG.info("ImageServlet allowing checkpointer: " + remoteUser);
return true;
}
}
if (HttpServer2.userHasAdministratorAccess(context, remoteUser)) {
LOG.info("ImageServlet allowing administrator: " + remoteUser);
return true;
}
LOG.info("ImageServlet rejecting: " + remoteUser);
return false;
}
/**
* Set headers for content length, and, if available, md5.
* @throws IOException
*/
public static void setVerificationHeadersForGet(HttpServletResponse response,
File file) throws IOException {
response.setHeader(TransferFsImage.CONTENT_LENGTH,
String.valueOf(file.length()));
MD5Hash hash = MD5FileUtils.readStoredMd5ForFile(file);
if (hash != null) {
response.setHeader(TransferFsImage.MD5_HEADER, hash.toString());
}
}
static String getParamStringForMostRecentImage() {
return "getimage=1&" + TXID_PARAM + "=" + LATEST_FSIMAGE_VALUE;
}
static String getParamStringForImage(NameNodeFile nnf, long txid,
StorageInfo remoteStorageInfo) {
final String imageType = nnf == null ? "" : "&" + IMAGE_FILE_TYPE + "="
+ nnf.name();
return "getimage=1&" + TXID_PARAM + "=" + txid
+ imageType
+ "&" + STORAGEINFO_PARAM + "=" +
remoteStorageInfo.toColonSeparatedString();
}
static String getParamStringForLog(RemoteEditLog log,
StorageInfo remoteStorageInfo) {
return "getedit=1&" + START_TXID_PARAM + "=" + log.getStartTxId()
+ "&" + END_TXID_PARAM + "=" + log.getEndTxId()
+ "&" + STORAGEINFO_PARAM + "=" +
remoteStorageInfo.toColonSeparatedString();
}
static class GetImageParams {
private boolean isGetImage;
private boolean isGetEdit;
private NameNodeFile nnf;
private long startTxId, endTxId, txId;
private String storageInfoString;
private boolean fetchLatest;
/**
* @param request the object from which this servlet reads the url contents
* @param response the object into which this servlet writes the url contents
* @throws IOException if the request is bad
*/
public GetImageParams(HttpServletRequest request,
HttpServletResponse response
) throws IOException {
@SuppressWarnings("unchecked")
Map<String, String[]> pmap = request.getParameterMap();
isGetImage = isGetEdit = fetchLatest = false;
for (Map.Entry<String, String[]> entry : pmap.entrySet()) {
String key = entry.getKey();
String[] val = entry.getValue();
if (key.equals("getimage")) {
isGetImage = true;
try {
txId = ServletUtil.parseLongParam(request, TXID_PARAM);
String imageType = ServletUtil.getParameter(request, IMAGE_FILE_TYPE);
nnf = imageType == null ? NameNodeFile.IMAGE : NameNodeFile
.valueOf(imageType);
} catch (NumberFormatException nfe) {
if (request.getParameter(TXID_PARAM).equals(LATEST_FSIMAGE_VALUE)) {
fetchLatest = true;
} else {
throw nfe;
}
}
} else if (key.equals("getedit")) {
isGetEdit = true;
startTxId = ServletUtil.parseLongParam(request, START_TXID_PARAM);
endTxId = ServletUtil.parseLongParam(request, END_TXID_PARAM);
} else if (key.equals(STORAGEINFO_PARAM)) {
storageInfoString = val[0];
}
}
int numGets = (isGetImage?1:0) + (isGetEdit?1:0);
if ((numGets > 1) || (numGets == 0)) {
throw new IOException("Illegal parameters to TransferFsImage");
}
}
public String getStorageInfoString() {
return storageInfoString;
}
public long getTxId() {
Preconditions.checkState(isGetImage);
return txId;
}
public NameNodeFile getNameNodeFile() {
Preconditions.checkState(isGetImage);
return nnf;
}
public long getStartTxId() {
Preconditions.checkState(isGetEdit);
return startTxId;
}
public long getEndTxId() {
Preconditions.checkState(isGetEdit);
return endTxId;
}
boolean isGetEdit() {
return isGetEdit;
}
boolean isGetImage() {
return isGetImage;
}
boolean shouldFetchLatest() {
return fetchLatest;
}
}
/**
* Set headers for image length and if available, md5.
*
* @throws IOException
*/
static void setVerificationHeadersForPut(HttpURLConnection connection,
File file) throws IOException {
connection.setRequestProperty(TransferFsImage.CONTENT_LENGTH,
String.valueOf(file.length()));
MD5Hash hash = MD5FileUtils.readStoredMd5ForFile(file);
if (hash != null) {
connection
.setRequestProperty(TransferFsImage.MD5_HEADER, hash.toString());
}
}
/**
* Set the required parameters for uploading image
*
* @param httpMethod instance of method to set the parameters
* @param storage colon separated storageInfo string
* @param txid txid of the image
* @param imageFileSize size of the imagefile to be uploaded
* @param nnf NameNodeFile Type
* @return Returns map of parameters to be used with PUT request.
*/
static Map<String, String> getParamsForPutImage(Storage storage, long txid,
long imageFileSize, NameNodeFile nnf) {
Map<String, String> params = new HashMap<String, String>();
params.put(TXID_PARAM, Long.toString(txid));
params.put(STORAGEINFO_PARAM, storage.toColonSeparatedString());
// setting the length of the file to be uploaded in separate property as
// Content-Length only supports up to 2GB
params.put(TransferFsImage.FILE_LENGTH, Long.toString(imageFileSize));
params.put(IMAGE_FILE_TYPE, nnf.name());
return params;
}
@Override
protected void doPut(final HttpServletRequest request,
final HttpServletResponse response) throws ServletException, IOException {
try {
ServletContext context = getServletContext();
final FSImage nnImage = NameNodeHttpServer.getFsImageFromContext(context);
final Configuration conf = (Configuration) getServletContext()
.getAttribute(JspHelper.CURRENT_CONF);
final PutImageParams parsedParams = new PutImageParams(request, response,
conf);
final NameNodeMetrics metrics = NameNode.getNameNodeMetrics();
validateRequest(context, conf, request, response, nnImage,
parsedParams.getStorageInfoString());
UserGroupInformation.getCurrentUser().doAs(
new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
final long txid = parsedParams.getTxId();
final NameNodeFile nnf = parsedParams.getNameNodeFile();
if (!nnImage.addToCheckpointing(txid)) {
response.sendError(HttpServletResponse.SC_CONFLICT,
"Either current namenode is checkpointing or another"
+ " checkpointer is already in the process of "
+ "uploading a checkpoint made at transaction ID "
+ txid);
return null;
}
try {
if (nnImage.getStorage().findImageFile(nnf, txid) != null) {
response.sendError(HttpServletResponse.SC_CONFLICT,
"Either current namenode has checkpointed or "
+ "another checkpointer already uploaded an "
+ "checkpoint for txid " + txid);
return null;
}
InputStream stream = request.getInputStream();
try {
long start = monotonicNow();
MD5Hash downloadImageDigest = TransferFsImage
.handleUploadImageRequest(request, txid,
nnImage.getStorage(), stream,
parsedParams.getFileSize(), getThrottler(conf));
nnImage.saveDigestAndRenameCheckpointImage(nnf, txid,
downloadImageDigest);
// Metrics non-null only when used inside name node
if (metrics != null) {
long elapsed = monotonicNow() - start;
metrics.addPutImage(elapsed);
}
// Now that we have a new checkpoint, we might be able to
// remove some old ones.
nnImage.purgeOldStorage(nnf);
} finally {
stream.close();
}
} finally {
nnImage.removeFromCheckpointing(txid);
}
return null;
}
});
} catch (Throwable t) {
String errMsg = "PutImage failed. " + StringUtils.stringifyException(t);
response.sendError(HttpServletResponse.SC_GONE, errMsg);
throw new IOException(errMsg);
}
}
/*
* Params required to handle put image request
*/
static class PutImageParams {
private long txId = -1;
private String storageInfoString = null;
private long fileSize = 0L;
private NameNodeFile nnf;
public PutImageParams(HttpServletRequest request,
HttpServletResponse response, Configuration conf) throws IOException {
txId = ServletUtil.parseLongParam(request, TXID_PARAM);
storageInfoString = ServletUtil.getParameter(request, STORAGEINFO_PARAM);
fileSize = ServletUtil.parseLongParam(request,
TransferFsImage.FILE_LENGTH);
String imageType = ServletUtil.getParameter(request, IMAGE_FILE_TYPE);
nnf = imageType == null ? NameNodeFile.IMAGE : NameNodeFile
.valueOf(imageType);
if (fileSize == 0 || txId == -1 || storageInfoString == null
|| storageInfoString.isEmpty()) {
throw new IOException("Illegal parameters to TransferFsImage");
}
}
public long getTxId() {
return txId;
}
public String getStorageInfoString() {
return storageInfoString;
}
public long getFileSize() {
return fileSize;
}
public NameNodeFile getNameNodeFile() {
return nnf;
}
}
}
| 21,561 | 37.572451 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Quota.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import org.apache.hadoop.hdfs.util.EnumCounters;
/** Quota types. */
public enum Quota {
/** The namespace usage, i.e. the number of name objects. */
NAMESPACE,
/** The storage space usage in bytes including replication. */
STORAGESPACE;
/** Counters for quota counts. */
public static class Counts extends EnumCounters<Quota> {
/** @return a new counter with the given namespace and storagespace usages. */
public static Counts newInstance(long namespace, long storagespace) {
final Counts c = new Counts();
c.set(NAMESPACE, namespace);
c.set(STORAGESPACE, storagespace);
return c;
}
public static Counts newInstance() {
return newInstance(0, 0);
}
Counts() {
super(Quota.class);
}
}
/**
* Is quota violated?
* The quota is violated if quota is set and usage > quota.
*/
static boolean isViolated(final long quota, final long usage) {
return quota >= 0 && usage > quota;
}
/**
* Is quota violated?
* The quota is violated if quota is set, delta > 0 and usage + delta > quota.
*/
static boolean isViolated(final long quota, final long usage,
final long delta) {
return quota >= 0 && delta > 0 && usage > quota - delta;
}
}
| 2,107 | 31.9375 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.File;
import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.common.StorageErrorReporter;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.EditLogValidation;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
import org.apache.hadoop.hdfs.server.namenode.NNStorageRetentionManager.StoragePurger;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
import org.apache.hadoop.io.nativeio.NativeIO;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
import com.google.common.collect.ComparisonChain;
import com.google.common.collect.Lists;
/**
* Journal manager for the common case of edits files being written
* to a storage directory.
*
* Note: this class is not thread-safe and should be externally
* synchronized.
*/
@InterfaceAudience.Private
public class FileJournalManager implements JournalManager {
private static final Log LOG = LogFactory.getLog(FileJournalManager.class);
private final Configuration conf;
private final StorageDirectory sd;
private final StorageErrorReporter errorReporter;
private int outputBufferCapacity = 512*1024;
private static final Pattern EDITS_REGEX = Pattern.compile(
NameNodeFile.EDITS.getName() + "_(\\d+)-(\\d+)");
private static final Pattern EDITS_INPROGRESS_REGEX = Pattern.compile(
NameNodeFile.EDITS_INPROGRESS.getName() + "_(\\d+)");
private static final Pattern EDITS_INPROGRESS_STALE_REGEX = Pattern.compile(
NameNodeFile.EDITS_INPROGRESS.getName() + "_(\\d+).*(\\S+)");
private File currentInProgress = null;
@VisibleForTesting
StoragePurger purger
= new NNStorageRetentionManager.DeletionStoragePurger();
public FileJournalManager(Configuration conf, StorageDirectory sd,
StorageErrorReporter errorReporter) {
this.conf = conf;
this.sd = sd;
this.errorReporter = errorReporter;
}
@Override
public void close() throws IOException {}
@Override
public void format(NamespaceInfo ns) throws IOException {
// Formatting file journals is done by the StorageDirectory
// format code, since they may share their directory with
// checkpoints, etc.
throw new UnsupportedOperationException();
}
@Override
public boolean hasSomeData() {
// Formatting file journals is done by the StorageDirectory
// format code, since they may share their directory with
// checkpoints, etc.
throw new UnsupportedOperationException();
}
@Override
synchronized public EditLogOutputStream startLogSegment(long txid,
int layoutVersion) throws IOException {
try {
currentInProgress = NNStorage.getInProgressEditsFile(sd, txid);
EditLogOutputStream stm = new EditLogFileOutputStream(conf,
currentInProgress, outputBufferCapacity);
stm.create(layoutVersion);
return stm;
} catch (IOException e) {
LOG.warn("Unable to start log segment " + txid +
" at " + currentInProgress + ": " +
e.getLocalizedMessage());
errorReporter.reportErrorOnFile(currentInProgress);
throw e;
}
}
@Override
synchronized public void finalizeLogSegment(long firstTxId, long lastTxId)
throws IOException {
File inprogressFile = NNStorage.getInProgressEditsFile(sd, firstTxId);
File dstFile = NNStorage.getFinalizedEditsFile(
sd, firstTxId, lastTxId);
LOG.info("Finalizing edits file " + inprogressFile + " -> " + dstFile);
Preconditions.checkState(!dstFile.exists(),
"Can't finalize edits file " + inprogressFile + " since finalized file " +
"already exists");
try {
NativeIO.renameTo(inprogressFile, dstFile);
} catch (IOException e) {
errorReporter.reportErrorOnFile(dstFile);
throw new IllegalStateException("Unable to finalize edits file " + inprogressFile, e);
}
if (inprogressFile.equals(currentInProgress)) {
currentInProgress = null;
}
}
@VisibleForTesting
public StorageDirectory getStorageDirectory() {
return sd;
}
@Override
synchronized public void setOutputBufferCapacity(int size) {
this.outputBufferCapacity = size;
}
@Override
public void purgeLogsOlderThan(long minTxIdToKeep)
throws IOException {
LOG.info("Purging logs older than " + minTxIdToKeep);
File[] files = FileUtil.listFiles(sd.getCurrentDir());
List<EditLogFile> editLogs = matchEditLogs(files, true);
for (EditLogFile log : editLogs) {
if (log.getFirstTxId() < minTxIdToKeep &&
log.getLastTxId() < minTxIdToKeep) {
purger.purgeLog(log);
}
}
}
/**
* Find all editlog segments starting at or above the given txid.
* @param firstTxId the txnid which to start looking
* @param inProgressOk whether or not to include the in-progress edit log
* segment
* @return a list of remote edit logs
* @throws IOException if edit logs cannot be listed.
*/
public List<RemoteEditLog> getRemoteEditLogs(long firstTxId,
boolean inProgressOk) throws IOException {
File currentDir = sd.getCurrentDir();
List<EditLogFile> allLogFiles = matchEditLogs(currentDir);
List<RemoteEditLog> ret = Lists.newArrayListWithCapacity(
allLogFiles.size());
for (EditLogFile elf : allLogFiles) {
if (elf.hasCorruptHeader() || (!inProgressOk && elf.isInProgress())) {
continue;
}
if (elf.isInProgress()) {
try {
elf.validateLog();
} catch (IOException e) {
LOG.error("got IOException while trying to validate header of " +
elf + ". Skipping.", e);
continue;
}
}
if (elf.getFirstTxId() >= firstTxId) {
ret.add(new RemoteEditLog(elf.firstTxId, elf.lastTxId,
elf.isInProgress()));
} else if (elf.getFirstTxId() < firstTxId && firstTxId <= elf.getLastTxId()) {
// If the firstTxId is in the middle of an edit log segment. Return this
// anyway and let the caller figure out whether it wants to use it.
ret.add(new RemoteEditLog(elf.firstTxId, elf.lastTxId,
elf.isInProgress()));
}
}
Collections.sort(ret);
return ret;
}
/**
* Discard all editlog segments whose first txid is greater than or equal to
* the given txid, by renaming them with suffix ".trash".
*/
private void discardEditLogSegments(long startTxId) throws IOException {
File currentDir = sd.getCurrentDir();
List<EditLogFile> allLogFiles = matchEditLogs(currentDir);
List<EditLogFile> toTrash = Lists.newArrayList();
LOG.info("Discard the EditLog files, the given start txid is " + startTxId);
// go through the editlog files to make sure the startTxId is right at the
// segment boundary
for (EditLogFile elf : allLogFiles) {
if (elf.getFirstTxId() >= startTxId) {
toTrash.add(elf);
} else {
Preconditions.checkState(elf.getLastTxId() < startTxId);
}
}
for (EditLogFile elf : toTrash) {
// rename these editlog file as .trash
elf.moveAsideTrashFile(startTxId);
LOG.info("Trash the EditLog file " + elf);
}
}
/**
* returns matching edit logs via the log directory. Simple helper function
* that lists the files in the logDir and calls matchEditLogs(File[])
*
* @param logDir
* directory to match edit logs in
* @return matched edit logs
* @throws IOException
* IOException thrown for invalid logDir
*/
public static List<EditLogFile> matchEditLogs(File logDir) throws IOException {
return matchEditLogs(FileUtil.listFiles(logDir));
}
static List<EditLogFile> matchEditLogs(File[] filesInStorage) {
return matchEditLogs(filesInStorage, false);
}
private static List<EditLogFile> matchEditLogs(File[] filesInStorage,
boolean forPurging) {
List<EditLogFile> ret = Lists.newArrayList();
for (File f : filesInStorage) {
String name = f.getName();
// Check for edits
Matcher editsMatch = EDITS_REGEX.matcher(name);
if (editsMatch.matches()) {
try {
long startTxId = Long.parseLong(editsMatch.group(1));
long endTxId = Long.parseLong(editsMatch.group(2));
ret.add(new EditLogFile(f, startTxId, endTxId));
continue;
} catch (NumberFormatException nfe) {
LOG.error("Edits file " + f + " has improperly formatted " +
"transaction ID");
// skip
}
}
// Check for in-progress edits
Matcher inProgressEditsMatch = EDITS_INPROGRESS_REGEX.matcher(name);
if (inProgressEditsMatch.matches()) {
try {
long startTxId = Long.parseLong(inProgressEditsMatch.group(1));
ret.add(
new EditLogFile(f, startTxId, HdfsServerConstants.INVALID_TXID, true));
continue;
} catch (NumberFormatException nfe) {
LOG.error("In-progress edits file " + f + " has improperly " +
"formatted transaction ID");
// skip
}
}
if (forPurging) {
// Check for in-progress stale edits
Matcher staleInprogressEditsMatch = EDITS_INPROGRESS_STALE_REGEX
.matcher(name);
if (staleInprogressEditsMatch.matches()) {
try {
long startTxId = Long.parseLong(staleInprogressEditsMatch.group(1));
ret.add(new EditLogFile(f, startTxId, HdfsServerConstants.INVALID_TXID,
true));
continue;
} catch (NumberFormatException nfe) {
LOG.error("In-progress stale edits file " + f + " has improperly "
+ "formatted transaction ID");
// skip
}
}
}
}
return ret;
}
@Override
synchronized public void selectInputStreams(
Collection<EditLogInputStream> streams, long fromTxId,
boolean inProgressOk) throws IOException {
List<EditLogFile> elfs = matchEditLogs(sd.getCurrentDir());
if (LOG.isDebugEnabled()) {
LOG.debug(this + ": selecting input streams starting at " + fromTxId +
(inProgressOk ? " (inProgress ok) " : " (excluding inProgress) ") +
"from among " + elfs.size() + " candidate file(s)");
}
addStreamsToCollectionFromFiles(elfs, streams, fromTxId, inProgressOk);
}
static void addStreamsToCollectionFromFiles(Collection<EditLogFile> elfs,
Collection<EditLogInputStream> streams, long fromTxId, boolean inProgressOk) {
for (EditLogFile elf : elfs) {
if (elf.isInProgress()) {
if (!inProgressOk) {
if (LOG.isDebugEnabled()) {
LOG.debug("passing over " + elf + " because it is in progress " +
"and we are ignoring in-progress logs.");
}
continue;
}
try {
elf.validateLog();
} catch (IOException e) {
LOG.error("got IOException while trying to validate header of " +
elf + ". Skipping.", e);
continue;
}
}
if (elf.lastTxId < fromTxId) {
assert elf.lastTxId != HdfsServerConstants.INVALID_TXID;
if (LOG.isDebugEnabled()) {
LOG.debug("passing over " + elf + " because it ends at " +
elf.lastTxId + ", but we only care about transactions " +
"as new as " + fromTxId);
}
continue;
}
EditLogFileInputStream elfis = new EditLogFileInputStream(elf.getFile(),
elf.getFirstTxId(), elf.getLastTxId(), elf.isInProgress());
LOG.debug("selecting edit log stream " + elf);
streams.add(elfis);
}
}
@Override
synchronized public void recoverUnfinalizedSegments() throws IOException {
File currentDir = sd.getCurrentDir();
LOG.info("Recovering unfinalized segments in " + currentDir);
List<EditLogFile> allLogFiles = matchEditLogs(currentDir);
for (EditLogFile elf : allLogFiles) {
if (elf.getFile().equals(currentInProgress)) {
continue;
}
if (elf.isInProgress()) {
// If the file is zero-length, we likely just crashed after opening the
// file, but before writing anything to it. Safe to delete it.
if (elf.getFile().length() == 0) {
LOG.info("Deleting zero-length edit log file " + elf);
if (!elf.getFile().delete()) {
throw new IOException("Unable to delete file " + elf.getFile());
}
continue;
}
elf.validateLog();
if (elf.hasCorruptHeader()) {
elf.moveAsideCorruptFile();
throw new CorruptionException("In-progress edit log file is corrupt: "
+ elf);
}
if (elf.getLastTxId() == HdfsServerConstants.INVALID_TXID) {
// If the file has a valid header (isn't corrupt) but contains no
// transactions, we likely just crashed after opening the file and
// writing the header, but before syncing any transactions. Safe to
// delete the file.
LOG.info("Moving aside edit log file that seems to have zero " +
"transactions " + elf);
elf.moveAsideEmptyFile();
continue;
}
finalizeLogSegment(elf.getFirstTxId(), elf.getLastTxId());
}
}
}
public List<EditLogFile> getLogFiles(long fromTxId) throws IOException {
File currentDir = sd.getCurrentDir();
List<EditLogFile> allLogFiles = matchEditLogs(currentDir);
List<EditLogFile> logFiles = Lists.newArrayList();
for (EditLogFile elf : allLogFiles) {
if (fromTxId <= elf.getFirstTxId() ||
elf.containsTxId(fromTxId)) {
logFiles.add(elf);
}
}
Collections.sort(logFiles, EditLogFile.COMPARE_BY_START_TXID);
return logFiles;
}
public EditLogFile getLogFile(long startTxId) throws IOException {
return getLogFile(sd.getCurrentDir(), startTxId);
}
public static EditLogFile getLogFile(File dir, long startTxId)
throws IOException {
List<EditLogFile> files = matchEditLogs(dir);
List<EditLogFile> ret = Lists.newLinkedList();
for (EditLogFile elf : files) {
if (elf.getFirstTxId() == startTxId) {
ret.add(elf);
}
}
if (ret.isEmpty()) {
// no matches
return null;
} else if (ret.size() == 1) {
return ret.get(0);
} else {
throw new IllegalStateException("More than one log segment in " +
dir + " starting at txid " + startTxId + ": " +
Joiner.on(", ").join(ret));
}
}
@Override
public String toString() {
return String.format("FileJournalManager(root=%s)", sd.getRoot());
}
/**
* Record of an edit log that has been located and had its filename parsed.
*/
@InterfaceAudience.Private
public static class EditLogFile {
private File file;
private final long firstTxId;
private long lastTxId;
private boolean hasCorruptHeader = false;
private final boolean isInProgress;
final static Comparator<EditLogFile> COMPARE_BY_START_TXID
= new Comparator<EditLogFile>() {
@Override
public int compare(EditLogFile a, EditLogFile b) {
return ComparisonChain.start()
.compare(a.getFirstTxId(), b.getFirstTxId())
.compare(a.getLastTxId(), b.getLastTxId())
.result();
}
};
EditLogFile(File file,
long firstTxId, long lastTxId) {
this(file, firstTxId, lastTxId, false);
assert (lastTxId != HdfsServerConstants.INVALID_TXID)
&& (lastTxId >= firstTxId);
}
EditLogFile(File file, long firstTxId,
long lastTxId, boolean isInProgress) {
assert (lastTxId == HdfsServerConstants.INVALID_TXID && isInProgress)
|| (lastTxId != HdfsServerConstants.INVALID_TXID && lastTxId >= firstTxId);
assert (firstTxId > 0) || (firstTxId == HdfsServerConstants.INVALID_TXID);
assert file != null;
Preconditions.checkArgument(!isInProgress ||
lastTxId == HdfsServerConstants.INVALID_TXID);
this.firstTxId = firstTxId;
this.lastTxId = lastTxId;
this.file = file;
this.isInProgress = isInProgress;
}
public long getFirstTxId() {
return firstTxId;
}
public long getLastTxId() {
return lastTxId;
}
boolean containsTxId(long txId) {
return firstTxId <= txId && txId <= lastTxId;
}
/**
* Find out where the edit log ends.
* This will update the lastTxId of the EditLogFile or
* mark it as corrupt if it is.
*/
public void validateLog() throws IOException {
EditLogValidation val = EditLogFileInputStream.validateEditLog(file);
this.lastTxId = val.getEndTxId();
this.hasCorruptHeader = val.hasCorruptHeader();
}
public void scanLog() throws IOException {
EditLogValidation val = EditLogFileInputStream.scanEditLog(file);
this.lastTxId = val.getEndTxId();
this.hasCorruptHeader = val.hasCorruptHeader();
}
public boolean isInProgress() {
return isInProgress;
}
public File getFile() {
return file;
}
boolean hasCorruptHeader() {
return hasCorruptHeader;
}
void moveAsideCorruptFile() throws IOException {
assert hasCorruptHeader;
renameSelf(".corrupt");
}
void moveAsideTrashFile(long markerTxid) throws IOException {
assert this.getFirstTxId() >= markerTxid;
renameSelf(".trash");
}
public void moveAsideEmptyFile() throws IOException {
assert lastTxId == HdfsServerConstants.INVALID_TXID;
renameSelf(".empty");
}
private void renameSelf(String newSuffix) throws IOException {
File src = file;
File dst = new File(src.getParent(), src.getName() + newSuffix);
// renameTo fails on Windows if the destination file already exists.
try {
if (dst.exists()) {
if (!dst.delete()) {
throw new IOException("Couldn't delete " + dst);
}
}
NativeIO.renameTo(src, dst);
} catch (IOException e) {
throw new IOException(
"Couldn't rename log " + src + " to " + dst, e);
}
file = dst;
}
@Override
public String toString() {
return String.format("EditLogFile(file=%s,first=%019d,last=%019d,"
+"inProgress=%b,hasCorruptHeader=%b)",
file.toString(), firstTxId, lastTxId,
isInProgress(), hasCorruptHeader);
}
}
@Override
public void discardSegments(long startTxid) throws IOException {
discardEditLogSegments(startTxid);
}
@Override
public void doPreUpgrade() throws IOException {
LOG.info("Starting upgrade of edits directory " + sd.getRoot());
try {
NNUpgradeUtil.doPreUpgrade(conf, sd);
} catch (IOException ioe) {
LOG.error("Failed to move aside pre-upgrade storage " +
"in image directory " + sd.getRoot(), ioe);
throw ioe;
}
}
/**
* This method assumes that the fields of the {@link Storage} object have
* already been updated to the appropriate new values for the upgrade.
*/
@Override
public void doUpgrade(Storage storage) throws IOException {
NNUpgradeUtil.doUpgrade(sd, storage);
}
@Override
public void doFinalize() throws IOException {
NNUpgradeUtil.doFinalize(sd);
}
@Override
public boolean canRollBack(StorageInfo storage, StorageInfo prevStorage,
int targetLayoutVersion) throws IOException {
return NNUpgradeUtil.canRollBack(sd, storage,
prevStorage, targetLayoutVersion);
}
@Override
public void doRollback() throws IOException {
NNUpgradeUtil.doRollBack(sd);
}
@Override
public long getJournalCTime() throws IOException {
StorageInfo sInfo = new StorageInfo((NodeType)null);
sInfo.readProperties(sd);
return sInfo.getCTime();
}
}
| 21,812 | 33.243328 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.NoSuchElementException;
import com.google.common.collect.ImmutableList;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import com.google.common.base.Preconditions;
import static org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.CURRENT_STATE_ID;
import static org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.ID_INTEGER_COMPARATOR;
/**
* Contains INodes information resolved from a given path.
*/
public class INodesInPath {
public static final Log LOG = LogFactory.getLog(INodesInPath.class);
/**
* @return true if path component is {@link HdfsConstants#DOT_SNAPSHOT_DIR}
*/
private static boolean isDotSnapshotDir(byte[] pathComponent) {
return pathComponent != null &&
Arrays.equals(HdfsServerConstants.DOT_SNAPSHOT_DIR_BYTES, pathComponent);
}
static INodesInPath fromINode(INode inode) {
int depth = 0, index;
INode tmp = inode;
while (tmp != null) {
depth++;
tmp = tmp.getParent();
}
final byte[][] path = new byte[depth][];
final INode[] inodes = new INode[depth];
tmp = inode;
index = depth;
while (tmp != null) {
index--;
path[index] = tmp.getKey();
inodes[index] = tmp;
tmp = tmp.getParent();
}
return new INodesInPath(inodes, path);
}
/**
* Given some components, create a path name.
* @param components The path components
* @param start index
* @param end index
* @return concatenated path
*/
private static String constructPath(byte[][] components, int start, int end) {
StringBuilder buf = new StringBuilder();
for (int i = start; i < end; i++) {
buf.append(DFSUtil.bytes2String(components[i]));
if (i < end - 1) {
buf.append(Path.SEPARATOR);
}
}
return buf.toString();
}
/**
* Retrieve existing INodes from a path. For non-snapshot path,
* the number of INodes is equal to the number of path components. For
* snapshot path (e.g., /foo/.snapshot/s1/bar), the number of INodes is
* (number_of_path_components - 1).
*
* An UnresolvedPathException is always thrown when an intermediate path
* component refers to a symbolic link. If the final path component refers
* to a symbolic link then an UnresolvedPathException is only thrown if
* resolveLink is true.
*
* <p>
* Example: <br>
* Given the path /c1/c2/c3 where only /c1/c2 exists, resulting in the
* following path components: ["","c1","c2","c3"]
*
* <p>
* <code>getExistingPathINodes(["","c1","c2"])</code> should fill
* the array with [rootINode,c1,c2], <br>
* <code>getExistingPathINodes(["","c1","c2","c3"])</code> should
* fill the array with [rootINode,c1,c2,null]
*
* @param startingDir the starting directory
* @param components array of path component name
* @param resolveLink indicates whether UnresolvedLinkException should
* be thrown when the path refers to a symbolic link.
* @return the specified number of existing INodes in the path
*/
static INodesInPath resolve(final INodeDirectory startingDir,
final byte[][] components, final boolean resolveLink)
throws UnresolvedLinkException {
Preconditions.checkArgument(startingDir.compareTo(components[0]) == 0);
INode curNode = startingDir;
int count = 0;
int inodeNum = 0;
INode[] inodes = new INode[components.length];
boolean isSnapshot = false;
int snapshotId = CURRENT_STATE_ID;
while (count < components.length && curNode != null) {
final boolean lastComp = (count == components.length - 1);
inodes[inodeNum++] = curNode;
final boolean isRef = curNode.isReference();
final boolean isDir = curNode.isDirectory();
final INodeDirectory dir = isDir? curNode.asDirectory(): null;
if (!isRef && isDir && dir.isWithSnapshot()) {
//if the path is a non-snapshot path, update the latest snapshot.
if (!isSnapshot && shouldUpdateLatestId(
dir.getDirectoryWithSnapshotFeature().getLastSnapshotId(),
snapshotId)) {
snapshotId = dir.getDirectoryWithSnapshotFeature().getLastSnapshotId();
}
} else if (isRef && isDir && !lastComp) {
// If the curNode is a reference node, need to check its dstSnapshot:
// 1. if the existing snapshot is no later than the dstSnapshot (which
// is the latest snapshot in dst before the rename), the changes
// should be recorded in previous snapshots (belonging to src).
// 2. however, if the ref node is already the last component, we still
// need to know the latest snapshot among the ref node's ancestors,
// in case of processing a deletion operation. Thus we do not overwrite
// the latest snapshot if lastComp is true. In case of the operation is
// a modification operation, we do a similar check in corresponding
// recordModification method.
if (!isSnapshot) {
int dstSnapshotId = curNode.asReference().getDstSnapshotId();
if (snapshotId == CURRENT_STATE_ID || // no snapshot in dst tree of rename
(dstSnapshotId != CURRENT_STATE_ID &&
dstSnapshotId >= snapshotId)) { // the above scenario
int lastSnapshot = CURRENT_STATE_ID;
DirectoryWithSnapshotFeature sf;
if (curNode.isDirectory() &&
(sf = curNode.asDirectory().getDirectoryWithSnapshotFeature()) != null) {
lastSnapshot = sf.getLastSnapshotId();
}
snapshotId = lastSnapshot;
}
}
}
if (curNode.isSymlink() && (!lastComp || resolveLink)) {
final String path = constructPath(components, 0, components.length);
final String preceding = constructPath(components, 0, count);
final String remainder =
constructPath(components, count + 1, components.length);
final String link = DFSUtil.bytes2String(components[count]);
final String target = curNode.asSymlink().getSymlinkString();
if (LOG.isDebugEnabled()) {
LOG.debug("UnresolvedPathException " +
" path: " + path + " preceding: " + preceding +
" count: " + count + " link: " + link + " target: " + target +
" remainder: " + remainder);
}
throw new UnresolvedPathException(path, preceding, remainder, target);
}
if (lastComp || !isDir) {
break;
}
final byte[] childName = components[count + 1];
// check if the next byte[] in components is for ".snapshot"
if (isDotSnapshotDir(childName) && dir.isSnapshottable()) {
// skip the ".snapshot" in components
count++;
isSnapshot = true;
// check if ".snapshot" is the last element of components
if (count == components.length - 1) {
break;
}
// Resolve snapshot root
final Snapshot s = dir.getSnapshot(components[count + 1]);
if (s == null) {
curNode = null; // snapshot not found
} else {
curNode = s.getRoot();
snapshotId = s.getId();
}
} else {
// normal case, and also for resolving file/dir under snapshot root
curNode = dir.getChild(childName,
isSnapshot ? snapshotId : CURRENT_STATE_ID);
}
count++;
}
if (isSnapshot && !isDotSnapshotDir(components[components.length - 1])) {
// for snapshot path shrink the inode array. however, for path ending with
// .snapshot, still keep last the null inode in the array
INode[] newNodes = new INode[components.length - 1];
System.arraycopy(inodes, 0, newNodes, 0, newNodes.length);
inodes = newNodes;
}
return new INodesInPath(inodes, components, isSnapshot, snapshotId);
}
private static boolean shouldUpdateLatestId(int sid, int snapshotId) {
return snapshotId == CURRENT_STATE_ID || (sid != CURRENT_STATE_ID &&
ID_INTEGER_COMPARATOR.compare(snapshotId, sid) < 0);
}
/**
* Replace an inode of the given INodesInPath in the given position. We do a
* deep copy of the INode array.
* @param pos the position of the replacement
* @param inode the new inode
* @return a new INodesInPath instance
*/
public static INodesInPath replace(INodesInPath iip, int pos, INode inode) {
Preconditions.checkArgument(iip.length() > 0 && pos > 0 // no for root
&& pos < iip.length());
if (iip.getINode(pos) == null) {
Preconditions.checkState(iip.getINode(pos - 1) != null);
}
INode[] inodes = new INode[iip.inodes.length];
System.arraycopy(iip.inodes, 0, inodes, 0, inodes.length);
inodes[pos] = inode;
return new INodesInPath(inodes, iip.path, iip.isSnapshot, iip.snapshotId);
}
/**
* Extend a given INodesInPath with a child INode. The child INode will be
* appended to the end of the new INodesInPath.
*/
public static INodesInPath append(INodesInPath iip, INode child,
byte[] childName) {
Preconditions.checkArgument(!iip.isSnapshot && iip.length() > 0);
Preconditions.checkArgument(iip.getLastINode() != null && iip
.getLastINode().isDirectory());
INode[] inodes = new INode[iip.length() + 1];
System.arraycopy(iip.inodes, 0, inodes, 0, inodes.length - 1);
inodes[inodes.length - 1] = child;
byte[][] path = new byte[iip.path.length + 1][];
System.arraycopy(iip.path, 0, path, 0, path.length - 1);
path[path.length - 1] = childName;
return new INodesInPath(inodes, path, false, iip.snapshotId);
}
private final byte[][] path;
/**
* Array with the specified number of INodes resolved for a given path.
*/
private final INode[] inodes;
/**
* true if this path corresponds to a snapshot
*/
private final boolean isSnapshot;
/**
* For snapshot paths, it is the id of the snapshot; or
* {@link Snapshot#CURRENT_STATE_ID} if the snapshot does not exist. For
* non-snapshot paths, it is the id of the latest snapshot found in the path;
* or {@link Snapshot#CURRENT_STATE_ID} if no snapshot is found.
*/
private final int snapshotId;
private INodesInPath(INode[] inodes, byte[][] path, boolean isSnapshot,
int snapshotId) {
Preconditions.checkArgument(inodes != null && path != null);
this.inodes = inodes;
this.path = path;
this.isSnapshot = isSnapshot;
this.snapshotId = snapshotId;
}
private INodesInPath(INode[] inodes, byte[][] path) {
this(inodes, path, false, CURRENT_STATE_ID);
}
/**
* For non-snapshot paths, return the latest snapshot id found in the path.
*/
public int getLatestSnapshotId() {
Preconditions.checkState(!isSnapshot);
return snapshotId;
}
/**
* For snapshot paths, return the id of the snapshot specified in the path.
* For non-snapshot paths, return {@link Snapshot#CURRENT_STATE_ID}.
*/
public int getPathSnapshotId() {
return isSnapshot ? snapshotId : CURRENT_STATE_ID;
}
/**
* @return the i-th inode if i >= 0;
* otherwise, i < 0, return the (length + i)-th inode.
*/
public INode getINode(int i) {
if (inodes == null || inodes.length == 0) {
throw new NoSuchElementException("inodes is null or empty");
}
int index = i >= 0 ? i : inodes.length + i;
if (index < inodes.length && index >= 0) {
return inodes[index];
} else {
throw new NoSuchElementException("inodes.length == " + inodes.length);
}
}
/** @return the last inode. */
public INode getLastINode() {
return getINode(-1);
}
byte[] getLastLocalName() {
return path[path.length - 1];
}
public byte[][] getPathComponents() {
return path;
}
/** @return the full path in string form */
public String getPath() {
return DFSUtil.byteArray2PathString(path);
}
public String getParentPath() {
return getPath(path.length - 1);
}
public String getPath(int pos) {
return DFSUtil.byteArray2PathString(path, 0, pos);
}
/**
* @param offset start endpoint (inclusive)
* @param length number of path components
* @return sub-list of the path
*/
public List<String> getPath(int offset, int length) {
Preconditions.checkArgument(offset >= 0 && length >= 0 && offset + length
<= path.length);
ImmutableList.Builder<String> components = ImmutableList.builder();
for (int i = offset; i < offset + length; i++) {
components.add(DFSUtil.bytes2String(path[i]));
}
return components.build();
}
public int length() {
return inodes.length;
}
public List<INode> getReadOnlyINodes() {
return Collections.unmodifiableList(Arrays.asList(inodes));
}
public INode[] getINodesArray() {
INode[] retArr = new INode[inodes.length];
System.arraycopy(inodes, 0, retArr, 0, inodes.length);
return retArr;
}
/**
* @param length number of ancestral INodes in the returned INodesInPath
* instance
* @return the INodesInPath instance containing ancestral INodes. Note that
* this method only handles non-snapshot paths.
*/
private INodesInPath getAncestorINodesInPath(int length) {
Preconditions.checkArgument(length >= 0 && length < inodes.length);
Preconditions.checkState(!isSnapshot());
final INode[] anodes = new INode[length];
final byte[][] apath = new byte[length][];
System.arraycopy(this.inodes, 0, anodes, 0, length);
System.arraycopy(this.path, 0, apath, 0, length);
return new INodesInPath(anodes, apath, false, snapshotId);
}
/**
* @return an INodesInPath instance containing all the INodes in the parent
* path. We do a deep copy here.
*/
public INodesInPath getParentINodesInPath() {
return inodes.length > 1 ? getAncestorINodesInPath(inodes.length - 1) :
null;
}
/**
* @return a new INodesInPath instance that only contains exisitng INodes.
* Note that this method only handles non-snapshot paths.
*/
public INodesInPath getExistingINodes() {
Preconditions.checkState(!isSnapshot());
int i = 0;
for (; i < inodes.length; i++) {
if (inodes[i] == null) {
break;
}
}
INode[] existing = new INode[i];
byte[][] existingPath = new byte[i][];
System.arraycopy(inodes, 0, existing, 0, i);
System.arraycopy(path, 0, existingPath, 0, i);
return new INodesInPath(existing, existingPath, false, snapshotId);
}
/**
* @return isSnapshot true for a snapshot path
*/
boolean isSnapshot() {
return this.isSnapshot;
}
private static String toString(INode inode) {
return inode == null? null: inode.getLocalName();
}
@Override
public String toString() {
return toString(true);
}
private String toString(boolean vaildateObject) {
if (vaildateObject) {
validate();
}
final StringBuilder b = new StringBuilder(getClass().getSimpleName())
.append(": path = ").append(DFSUtil.byteArray2PathString(path))
.append("\n inodes = ");
if (inodes == null) {
b.append("null");
} else if (inodes.length == 0) {
b.append("[]");
} else {
b.append("[").append(toString(inodes[0]));
for(int i = 1; i < inodes.length; i++) {
b.append(", ").append(toString(inodes[i]));
}
b.append("], length=").append(inodes.length);
}
b.append("\n isSnapshot = ").append(isSnapshot)
.append("\n snapshotId = ").append(snapshotId);
return b.toString();
}
void validate() {
// check parent up to snapshotRootIndex if this is a snapshot path
int i = 0;
if (inodes[i] != null) {
for(i++; i < inodes.length && inodes[i] != null; i++) {
final INodeDirectory parent_i = inodes[i].getParent();
final INodeDirectory parent_i_1 = inodes[i-1].getParent();
if (parent_i != inodes[i-1] &&
(parent_i_1 == null || !parent_i_1.isSnapshottable()
|| parent_i != parent_i_1)) {
throw new AssertionError(
"inodes[" + i + "].getParent() != inodes[" + (i-1)
+ "]\n inodes[" + i + "]=" + inodes[i].toDetailString()
+ "\n inodes[" + (i-1) + "]=" + inodes[i-1].toDetailString()
+ "\n this=" + toString(false));
}
}
}
if (i != inodes.length) {
throw new AssertionError("i = " + i + " != " + inodes.length
+ ", this=" + toString(false));
}
}
}
| 17,925 | 35.287449 | 93 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException;
import javax.annotation.Nonnull;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.protocol.CacheDirective;
import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.CachePoolStats;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.IntrusiveCollection;
import com.google.common.base.Preconditions;
/**
* A CachePool describes a set of cache resources being managed by the NameNode.
* User caching requests are billed to the cache pool specified in the request.
*
* This is an internal class, only used on the NameNode. For identifying or
* describing a cache pool to clients, please use CachePoolInfo.
*
* CachePools must be accessed under the FSNamesystem lock.
*/
@InterfaceAudience.Private
public final class CachePool {
@Nonnull
private final String poolName;
@Nonnull
private String ownerName;
@Nonnull
private String groupName;
/**
* Cache pool permissions.
*
* READ permission means that you can list the cache directives in this pool.
* WRITE permission means that you can add, remove, or modify cache directives
* in this pool.
* EXECUTE permission is unused.
*/
@Nonnull
private FsPermission mode;
/**
* Maximum number of bytes that can be cached in this pool.
*/
private long limit;
/**
* Maximum duration that a CacheDirective in this pool remains valid,
* in milliseconds.
*/
private long maxRelativeExpiryMs;
private long bytesNeeded;
private long bytesCached;
private long filesNeeded;
private long filesCached;
public final static class DirectiveList
extends IntrusiveCollection<CacheDirective> {
private final CachePool cachePool;
private DirectiveList(CachePool cachePool) {
this.cachePool = cachePool;
}
public CachePool getCachePool() {
return cachePool;
}
}
@Nonnull
private final DirectiveList directiveList = new DirectiveList(this);
/**
* Create a new cache pool based on a CachePoolInfo object and the defaults.
* We will fill in information that was not supplied according to the
* defaults.
*/
static CachePool createFromInfoAndDefaults(CachePoolInfo info)
throws IOException {
UserGroupInformation ugi = null;
String ownerName = info.getOwnerName();
if (ownerName == null) {
ugi = NameNode.getRemoteUser();
ownerName = ugi.getShortUserName();
}
String groupName = info.getGroupName();
if (groupName == null) {
if (ugi == null) {
ugi = NameNode.getRemoteUser();
}
groupName = ugi.getPrimaryGroupName();
}
FsPermission mode = (info.getMode() == null) ?
FsPermission.getCachePoolDefault() : info.getMode();
long limit = info.getLimit() == null ?
CachePoolInfo.DEFAULT_LIMIT : info.getLimit();
long maxRelativeExpiry = info.getMaxRelativeExpiryMs() == null ?
CachePoolInfo.DEFAULT_MAX_RELATIVE_EXPIRY :
info.getMaxRelativeExpiryMs();
return new CachePool(info.getPoolName(),
ownerName, groupName, mode, limit, maxRelativeExpiry);
}
/**
* Create a new cache pool based on a CachePoolInfo object.
* No fields in the CachePoolInfo can be blank.
*/
static CachePool createFromInfo(CachePoolInfo info) {
return new CachePool(info.getPoolName(),
info.getOwnerName(), info.getGroupName(),
info.getMode(), info.getLimit(), info.getMaxRelativeExpiryMs());
}
CachePool(String poolName, String ownerName, String groupName,
FsPermission mode, long limit, long maxRelativeExpiry) {
Preconditions.checkNotNull(poolName);
Preconditions.checkNotNull(ownerName);
Preconditions.checkNotNull(groupName);
Preconditions.checkNotNull(mode);
this.poolName = poolName;
this.ownerName = ownerName;
this.groupName = groupName;
this.mode = new FsPermission(mode);
this.limit = limit;
this.maxRelativeExpiryMs = maxRelativeExpiry;
}
public String getPoolName() {
return poolName;
}
public String getOwnerName() {
return ownerName;
}
public CachePool setOwnerName(String ownerName) {
this.ownerName = ownerName;
return this;
}
public String getGroupName() {
return groupName;
}
public CachePool setGroupName(String groupName) {
this.groupName = groupName;
return this;
}
public FsPermission getMode() {
return mode;
}
public CachePool setMode(FsPermission mode) {
this.mode = new FsPermission(mode);
return this;
}
public long getLimit() {
return limit;
}
public CachePool setLimit(long bytes) {
this.limit = bytes;
return this;
}
public long getMaxRelativeExpiryMs() {
return maxRelativeExpiryMs;
}
public CachePool setMaxRelativeExpiryMs(long expiry) {
this.maxRelativeExpiryMs = expiry;
return this;
}
/**
* Get either full or partial information about this CachePool.
*
* @param fullInfo
* If true, only the name will be returned (i.e., what you
* would get if you didn't have read permission for this pool.)
* @return
* Cache pool information.
*/
CachePoolInfo getInfo(boolean fullInfo) {
CachePoolInfo info = new CachePoolInfo(poolName);
if (!fullInfo) {
return info;
}
return info.setOwnerName(ownerName).
setGroupName(groupName).
setMode(new FsPermission(mode)).
setLimit(limit).
setMaxRelativeExpiryMs(maxRelativeExpiryMs);
}
/**
* Resets statistics related to this CachePool
*/
public void resetStatistics() {
bytesNeeded = 0;
bytesCached = 0;
filesNeeded = 0;
filesCached = 0;
}
public void addBytesNeeded(long bytes) {
bytesNeeded += bytes;
}
public void addBytesCached(long bytes) {
bytesCached += bytes;
}
public void addFilesNeeded(long files) {
filesNeeded += files;
}
public void addFilesCached(long files) {
filesCached += files;
}
public long getBytesNeeded() {
return bytesNeeded;
}
public long getBytesCached() {
return bytesCached;
}
public long getBytesOverlimit() {
return Math.max(bytesNeeded-limit, 0);
}
public long getFilesNeeded() {
return filesNeeded;
}
public long getFilesCached() {
return filesCached;
}
/**
* Get statistics about this CachePool.
*
* @return Cache pool statistics.
*/
private CachePoolStats getStats() {
return new CachePoolStats.Builder().
setBytesNeeded(bytesNeeded).
setBytesCached(bytesCached).
setBytesOverlimit(getBytesOverlimit()).
setFilesNeeded(filesNeeded).
setFilesCached(filesCached).
build();
}
/**
* Returns a CachePoolInfo describing this CachePool based on the permissions
* of the calling user. Unprivileged users will see only minimal descriptive
* information about the pool.
*
* @param pc Permission checker to be used to validate the user's permissions,
* or null
* @return CachePoolEntry describing this CachePool
*/
public CachePoolEntry getEntry(FSPermissionChecker pc) {
boolean hasPermission = true;
if (pc != null) {
try {
pc.checkPermission(this, FsAction.READ);
} catch (AccessControlException e) {
hasPermission = false;
}
}
return new CachePoolEntry(getInfo(hasPermission),
hasPermission ? getStats() : new CachePoolStats.Builder().build());
}
public String toString() {
return new StringBuilder().
append("{ ").append("poolName:").append(poolName).
append(", ownerName:").append(ownerName).
append(", groupName:").append(groupName).
append(", mode:").append(mode).
append(", limit:").append(limit).
append(", maxRelativeExpiryMs:").append(maxRelativeExpiryMs).
append(" }").toString();
}
public DirectiveList getDirectiveList() {
return directiveList;
}
}
| 9,200 | 27.310769 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeMap.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.util.Iterator;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
import org.apache.hadoop.util.GSet;
import org.apache.hadoop.util.LightWeightGSet;
import com.google.common.base.Preconditions;
/**
* Storing all the {@link INode}s and maintaining the mapping between INode ID
* and INode.
*/
public class INodeMap {
static INodeMap newInstance(INodeDirectory rootDir) {
// Compute the map capacity by allocating 1% of total memory
int capacity = LightWeightGSet.computeCapacity(1, "INodeMap");
GSet<INode, INodeWithAdditionalFields> map =
new LightWeightGSet<>(capacity);
map.put(rootDir);
return new INodeMap(map);
}
/** Synchronized by external lock. */
private final GSet<INode, INodeWithAdditionalFields> map;
public Iterator<INodeWithAdditionalFields> getMapIterator() {
return map.iterator();
}
private INodeMap(GSet<INode, INodeWithAdditionalFields> map) {
Preconditions.checkArgument(map != null);
this.map = map;
}
/**
* Add an {@link INode} into the {@link INode} map. Replace the old value if
* necessary.
* @param inode The {@link INode} to be added to the map.
*/
public final void put(INode inode) {
if (inode instanceof INodeWithAdditionalFields) {
map.put((INodeWithAdditionalFields)inode);
}
}
/**
* Remove a {@link INode} from the map.
* @param inode The {@link INode} to be removed.
*/
public final void remove(INode inode) {
map.remove(inode);
}
/**
* @return The size of the map.
*/
public int size() {
return map.size();
}
/**
* Get the {@link INode} with the given id from the map.
* @param id ID of the {@link INode}.
* @return The {@link INode} in the map with the given id. Return null if no
* such {@link INode} in the map.
*/
public INode get(long id) {
INode inode = new INodeWithAdditionalFields(id, null, new PermissionStatus(
"", "", new FsPermission((short) 0)), 0, 0) {
@Override
void recordModification(int latestSnapshotId) {
}
@Override
public void destroyAndCollectBlocks(ReclaimContext reclaimContext) {
// Nothing to do
}
@Override
public QuotaCounts computeQuotaUsage(
BlockStoragePolicySuite bsps, byte blockStoragePolicyId,
boolean useCache, int lastSnapshotId) {
return null;
}
@Override
public ContentSummaryComputationContext computeContentSummary(
ContentSummaryComputationContext summary) {
return null;
}
@Override
public void cleanSubtree(
ReclaimContext reclaimContext, int snapshotId, int priorSnapshotId) {
}
@Override
public byte getStoragePolicyID(){
return HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
}
@Override
public byte getLocalStoragePolicyID() {
return HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
}
};
return map.get(inode);
}
/**
* Clear the {@link #map}
*/
public void clear() {
map.clear();
}
}
| 4,194 | 28.542254 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrFeature.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.hdfs.server.namenode.INode;
import com.google.common.collect.ImmutableList;
/**
* Feature for extended attributes.
*/
@InterfaceAudience.Private
public class XAttrFeature implements INode.Feature {
public static final ImmutableList<XAttr> EMPTY_ENTRY_LIST =
ImmutableList.of();
private final ImmutableList<XAttr> xAttrs;
public XAttrFeature(ImmutableList<XAttr> xAttrs) {
this.xAttrs = xAttrs;
}
public ImmutableList<XAttr> getXAttrs() {
return xAttrs;
}
}
| 1,465 | 32.318182 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_BLOCK_MAP_ALLOCATION_PERCENT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_BLOCK_MAP_ALLOCATION_PERCENT_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS_DEFAULT;
import java.io.DataInput;
import java.io.DataOutputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Date;
import java.util.EnumSet;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map.Entry;
import java.util.SortedMap;
import java.util.TreeMap;
import java.util.concurrent.locks.ReentrantLock;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries;
import org.apache.hadoop.fs.CacheFlag;
import org.apache.hadoop.fs.InvalidRequestException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.CacheDirective;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo.Expiration;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveStats;
import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto;
import org.apache.hadoop.hdfs.protocolPB.PBHelper;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList.Type;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection;
import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.Counter;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.Step;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StepType;
import org.apache.hadoop.hdfs.util.ReadOnlyList;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.util.GSet;
import org.apache.hadoop.util.LightWeightGSet;
import org.apache.hadoop.util.Time;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.Lists;
/**
* The Cache Manager handles caching on DataNodes.
*
* This class is instantiated by the FSNamesystem.
* It maintains the mapping of cached blocks to datanodes via processing
* datanode cache reports. Based on these reports and addition and removal of
* caching directives, we will schedule caching and uncaching work.
*/
@InterfaceAudience.LimitedPrivate({"HDFS"})
public final class CacheManager {
public static final Logger LOG = LoggerFactory.getLogger(CacheManager.class);
private static final float MIN_CACHED_BLOCKS_PERCENT = 0.001f;
// TODO: add pending / underCached / schedule cached blocks stats.
/**
* The FSNamesystem that contains this CacheManager.
*/
private final FSNamesystem namesystem;
/**
* The BlockManager associated with the FSN that owns this CacheManager.
*/
private final BlockManager blockManager;
/**
* Cache directives, sorted by ID.
*
* listCacheDirectives relies on the ordering of elements in this map
* to track what has already been listed by the client.
*/
private final TreeMap<Long, CacheDirective> directivesById =
new TreeMap<Long, CacheDirective>();
/**
* The directive ID to use for a new directive. IDs always increase, and are
* never reused.
*/
private long nextDirectiveId;
/**
* Cache directives, sorted by path
*/
private final TreeMap<String, List<CacheDirective>> directivesByPath =
new TreeMap<String, List<CacheDirective>>();
/**
* Cache pools, sorted by name.
*/
private final TreeMap<String, CachePool> cachePools =
new TreeMap<String, CachePool>();
/**
* Maximum number of cache pools to list in one operation.
*/
private final int maxListCachePoolsResponses;
/**
* Maximum number of cache pool directives to list in one operation.
*/
private final int maxListCacheDirectivesNumResponses;
/**
* Interval between scans in milliseconds.
*/
private final long scanIntervalMs;
/**
* All cached blocks.
*/
private final GSet<CachedBlock, CachedBlock> cachedBlocks;
/**
* Lock which protects the CacheReplicationMonitor.
*/
private final ReentrantLock crmLock = new ReentrantLock();
private final SerializerCompat serializerCompat = new SerializerCompat();
/**
* The CacheReplicationMonitor.
*/
private CacheReplicationMonitor monitor;
public static final class PersistState {
public final CacheManagerSection section;
public final List<CachePoolInfoProto> pools;
public final List<CacheDirectiveInfoProto> directives;
public PersistState(CacheManagerSection section,
List<CachePoolInfoProto> pools, List<CacheDirectiveInfoProto> directives) {
this.section = section;
this.pools = pools;
this.directives = directives;
}
}
CacheManager(FSNamesystem namesystem, Configuration conf,
BlockManager blockManager) {
this.namesystem = namesystem;
this.blockManager = blockManager;
this.nextDirectiveId = 1;
this.maxListCachePoolsResponses = conf.getInt(
DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES,
DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES_DEFAULT);
this.maxListCacheDirectivesNumResponses = conf.getInt(
DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES,
DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES_DEFAULT);
scanIntervalMs = conf.getLong(
DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS,
DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS_DEFAULT);
float cachedBlocksPercent = conf.getFloat(
DFS_NAMENODE_PATH_BASED_CACHE_BLOCK_MAP_ALLOCATION_PERCENT,
DFS_NAMENODE_PATH_BASED_CACHE_BLOCK_MAP_ALLOCATION_PERCENT_DEFAULT);
if (cachedBlocksPercent < MIN_CACHED_BLOCKS_PERCENT) {
LOG.info("Using minimum value {} for {}", MIN_CACHED_BLOCKS_PERCENT,
DFS_NAMENODE_PATH_BASED_CACHE_BLOCK_MAP_ALLOCATION_PERCENT);
cachedBlocksPercent = MIN_CACHED_BLOCKS_PERCENT;
}
this.cachedBlocks = new LightWeightGSet<CachedBlock, CachedBlock>(
LightWeightGSet.computeCapacity(cachedBlocksPercent,
"cachedBlocks"));
}
/**
* Resets all tracked directives and pools. Called during 2NN checkpointing to
* reset FSNamesystem state. See {@link FSNamesystem#clear()}.
*/
void clear() {
directivesById.clear();
directivesByPath.clear();
cachePools.clear();
nextDirectiveId = 1;
}
public void startMonitorThread() {
crmLock.lock();
try {
if (this.monitor == null) {
this.monitor = new CacheReplicationMonitor(namesystem, this,
scanIntervalMs, crmLock);
this.monitor.start();
}
} finally {
crmLock.unlock();
}
}
public void stopMonitorThread() {
crmLock.lock();
try {
if (this.monitor != null) {
CacheReplicationMonitor prevMonitor = this.monitor;
this.monitor = null;
IOUtils.closeQuietly(prevMonitor);
}
} finally {
crmLock.unlock();
}
}
public void clearDirectiveStats() {
assert namesystem.hasWriteLock();
for (CacheDirective directive : directivesById.values()) {
directive.resetStatistics();
}
}
/**
* @return Unmodifiable view of the collection of CachePools.
*/
public Collection<CachePool> getCachePools() {
assert namesystem.hasReadLock();
return Collections.unmodifiableCollection(cachePools.values());
}
/**
* @return Unmodifiable view of the collection of CacheDirectives.
*/
public Collection<CacheDirective> getCacheDirectives() {
assert namesystem.hasReadLock();
return Collections.unmodifiableCollection(directivesById.values());
}
@VisibleForTesting
public GSet<CachedBlock, CachedBlock> getCachedBlocks() {
assert namesystem.hasReadLock();
return cachedBlocks;
}
private long getNextDirectiveId() throws IOException {
assert namesystem.hasWriteLock();
if (nextDirectiveId >= Long.MAX_VALUE - 1) {
throw new IOException("No more available IDs.");
}
return nextDirectiveId++;
}
// Helper getter / validation methods
private static void checkWritePermission(FSPermissionChecker pc,
CachePool pool) throws AccessControlException {
if ((pc != null)) {
pc.checkPermission(pool, FsAction.WRITE);
}
}
private static String validatePoolName(CacheDirectiveInfo directive)
throws InvalidRequestException {
String pool = directive.getPool();
if (pool == null) {
throw new InvalidRequestException("No pool specified.");
}
if (pool.isEmpty()) {
throw new InvalidRequestException("Invalid empty pool name.");
}
return pool;
}
private static String validatePath(CacheDirectiveInfo directive)
throws InvalidRequestException {
if (directive.getPath() == null) {
throw new InvalidRequestException("No path specified.");
}
String path = directive.getPath().toUri().getPath();
if (!DFSUtil.isValidName(path)) {
throw new InvalidRequestException("Invalid path '" + path + "'.");
}
return path;
}
private static short validateReplication(CacheDirectiveInfo directive,
short defaultValue) throws InvalidRequestException {
short repl = (directive.getReplication() != null)
? directive.getReplication() : defaultValue;
if (repl <= 0) {
throw new InvalidRequestException("Invalid replication factor " + repl
+ " <= 0");
}
return repl;
}
/**
* Calculates the absolute expiry time of the directive from the
* {@link CacheDirectiveInfo.Expiration}. This converts a relative Expiration
* into an absolute time based on the local clock.
*
* @param info to validate.
* @param maxRelativeExpiryTime of the info's pool.
* @return the expiration time, or the pool's max absolute expiration if the
* info's expiration was not set.
* @throws InvalidRequestException if the info's Expiration is invalid.
*/
private static long validateExpiryTime(CacheDirectiveInfo info,
long maxRelativeExpiryTime) throws InvalidRequestException {
LOG.trace("Validating directive {} pool maxRelativeExpiryTime {}", info,
maxRelativeExpiryTime);
final long now = new Date().getTime();
final long maxAbsoluteExpiryTime = now + maxRelativeExpiryTime;
if (info == null || info.getExpiration() == null) {
return maxAbsoluteExpiryTime;
}
Expiration expiry = info.getExpiration();
if (expiry.getMillis() < 0l) {
throw new InvalidRequestException("Cannot set a negative expiration: "
+ expiry.getMillis());
}
long relExpiryTime, absExpiryTime;
if (expiry.isRelative()) {
relExpiryTime = expiry.getMillis();
absExpiryTime = now + relExpiryTime;
} else {
absExpiryTime = expiry.getMillis();
relExpiryTime = absExpiryTime - now;
}
// Need to cap the expiry so we don't overflow a long when doing math
if (relExpiryTime > Expiration.MAX_RELATIVE_EXPIRY_MS) {
throw new InvalidRequestException("Expiration "
+ expiry.toString() + " is too far in the future!");
}
// Fail if the requested expiry is greater than the max
if (relExpiryTime > maxRelativeExpiryTime) {
throw new InvalidRequestException("Expiration " + expiry.toString()
+ " exceeds the max relative expiration time of "
+ maxRelativeExpiryTime + " ms.");
}
return absExpiryTime;
}
/**
* Throws an exception if the CachePool does not have enough capacity to
* cache the given path at the replication factor.
*
* @param pool CachePool where the path is being cached
* @param path Path that is being cached
* @param replication Replication factor of the path
* @throws InvalidRequestException if the pool does not have enough capacity
*/
private void checkLimit(CachePool pool, String path,
short replication) throws InvalidRequestException {
CacheDirectiveStats stats = computeNeeded(path, replication);
if (pool.getLimit() == CachePoolInfo.LIMIT_UNLIMITED) {
return;
}
if (pool.getBytesNeeded() + (stats.getBytesNeeded() * replication) > pool
.getLimit()) {
throw new InvalidRequestException("Caching path " + path + " of size "
+ stats.getBytesNeeded() / replication + " bytes at replication "
+ replication + " would exceed pool " + pool.getPoolName()
+ "'s remaining capacity of "
+ (pool.getLimit() - pool.getBytesNeeded()) + " bytes.");
}
}
/**
* Computes the needed number of bytes and files for a path.
* @return CacheDirectiveStats describing the needed stats for this path
*/
private CacheDirectiveStats computeNeeded(String path, short replication) {
FSDirectory fsDir = namesystem.getFSDirectory();
INode node;
long requestedBytes = 0;
long requestedFiles = 0;
CacheDirectiveStats.Builder builder = new CacheDirectiveStats.Builder();
try {
node = fsDir.getINode(path);
} catch (UnresolvedLinkException e) {
// We don't cache through symlinks
return builder.build();
}
if (node == null) {
return builder.build();
}
if (node.isFile()) {
requestedFiles = 1;
INodeFile file = node.asFile();
requestedBytes = file.computeFileSize();
} else if (node.isDirectory()) {
INodeDirectory dir = node.asDirectory();
ReadOnlyList<INode> children = dir
.getChildrenList(Snapshot.CURRENT_STATE_ID);
requestedFiles = children.size();
for (INode child : children) {
if (child.isFile()) {
requestedBytes += child.asFile().computeFileSize();
}
}
}
return new CacheDirectiveStats.Builder()
.setBytesNeeded(requestedBytes)
.setFilesCached(requestedFiles)
.build();
}
/**
* Get a CacheDirective by ID, validating the ID and that the directive
* exists.
*/
private CacheDirective getById(long id) throws InvalidRequestException {
// Check for invalid IDs.
if (id <= 0) {
throw new InvalidRequestException("Invalid negative ID.");
}
// Find the directive.
CacheDirective directive = directivesById.get(id);
if (directive == null) {
throw new InvalidRequestException("No directive with ID " + id
+ " found.");
}
return directive;
}
/**
* Get a CachePool by name, validating that it exists.
*/
private CachePool getCachePool(String poolName)
throws InvalidRequestException {
CachePool pool = cachePools.get(poolName);
if (pool == null) {
throw new InvalidRequestException("Unknown pool " + poolName);
}
return pool;
}
// RPC handlers
private void addInternal(CacheDirective directive, CachePool pool) {
boolean addedDirective = pool.getDirectiveList().add(directive);
assert addedDirective;
directivesById.put(directive.getId(), directive);
String path = directive.getPath();
List<CacheDirective> directives = directivesByPath.get(path);
if (directives == null) {
directives = new ArrayList<CacheDirective>(1);
directivesByPath.put(path, directives);
}
directives.add(directive);
// Fix up pool stats
CacheDirectiveStats stats =
computeNeeded(directive.getPath(), directive.getReplication());
directive.addBytesNeeded(stats.getBytesNeeded());
directive.addFilesNeeded(directive.getFilesNeeded());
setNeedsRescan();
}
/**
* Adds a directive, skipping most error checking. This should only be called
* internally in special scenarios like edit log replay.
*/
CacheDirectiveInfo addDirectiveFromEditLog(CacheDirectiveInfo directive)
throws InvalidRequestException {
long id = directive.getId();
CacheDirective entry = new CacheDirective(directive);
CachePool pool = cachePools.get(directive.getPool());
addInternal(entry, pool);
if (nextDirectiveId <= id) {
nextDirectiveId = id + 1;
}
return entry.toInfo();
}
public CacheDirectiveInfo addDirective(
CacheDirectiveInfo info, FSPermissionChecker pc, EnumSet<CacheFlag> flags)
throws IOException {
assert namesystem.hasWriteLock();
CacheDirective directive;
try {
CachePool pool = getCachePool(validatePoolName(info));
checkWritePermission(pc, pool);
String path = validatePath(info);
short replication = validateReplication(info, (short)1);
long expiryTime = validateExpiryTime(info, pool.getMaxRelativeExpiryMs());
// Do quota validation if required
if (!flags.contains(CacheFlag.FORCE)) {
checkLimit(pool, path, replication);
}
// All validation passed
// Add a new entry with the next available ID.
long id = getNextDirectiveId();
directive = new CacheDirective(id, path, replication, expiryTime);
addInternal(directive, pool);
} catch (IOException e) {
LOG.warn("addDirective of " + info + " failed: ", e);
throw e;
}
LOG.info("addDirective of {} successful.", info);
return directive.toInfo();
}
/**
* Factory method that makes a new CacheDirectiveInfo by applying fields in a
* CacheDirectiveInfo to an existing CacheDirective.
*
* @param info with some or all fields set.
* @param defaults directive providing default values for unset fields in
* info.
*
* @return new CacheDirectiveInfo of the info applied to the defaults.
*/
private static CacheDirectiveInfo createFromInfoAndDefaults(
CacheDirectiveInfo info, CacheDirective defaults) {
// Initialize the builder with the default values
CacheDirectiveInfo.Builder builder =
new CacheDirectiveInfo.Builder(defaults.toInfo());
// Replace default with new value if present
if (info.getPath() != null) {
builder.setPath(info.getPath());
}
if (info.getReplication() != null) {
builder.setReplication(info.getReplication());
}
if (info.getPool() != null) {
builder.setPool(info.getPool());
}
if (info.getExpiration() != null) {
builder.setExpiration(info.getExpiration());
}
return builder.build();
}
/**
* Modifies a directive, skipping most error checking. This is for careful
* internal use only. modifyDirective can be non-deterministic since its error
* checking depends on current system time, which poses a problem for edit log
* replay.
*/
void modifyDirectiveFromEditLog(CacheDirectiveInfo info)
throws InvalidRequestException {
// Check for invalid IDs.
Long id = info.getId();
if (id == null) {
throw new InvalidRequestException("Must supply an ID.");
}
CacheDirective prevEntry = getById(id);
CacheDirectiveInfo newInfo = createFromInfoAndDefaults(info, prevEntry);
removeInternal(prevEntry);
addInternal(new CacheDirective(newInfo), getCachePool(newInfo.getPool()));
}
public void modifyDirective(CacheDirectiveInfo info,
FSPermissionChecker pc, EnumSet<CacheFlag> flags) throws IOException {
assert namesystem.hasWriteLock();
String idString =
(info.getId() == null) ?
"(null)" : info.getId().toString();
try {
// Check for invalid IDs.
Long id = info.getId();
if (id == null) {
throw new InvalidRequestException("Must supply an ID.");
}
CacheDirective prevEntry = getById(id);
checkWritePermission(pc, prevEntry.getPool());
// Fill in defaults
CacheDirectiveInfo infoWithDefaults =
createFromInfoAndDefaults(info, prevEntry);
CacheDirectiveInfo.Builder builder =
new CacheDirectiveInfo.Builder(infoWithDefaults);
// Do validation
validatePath(infoWithDefaults);
validateReplication(infoWithDefaults, (short)-1);
// Need to test the pool being set here to avoid rejecting a modify for a
// directive that's already been forced into a pool
CachePool srcPool = prevEntry.getPool();
CachePool destPool = getCachePool(validatePoolName(infoWithDefaults));
if (!srcPool.getPoolName().equals(destPool.getPoolName())) {
checkWritePermission(pc, destPool);
if (!flags.contains(CacheFlag.FORCE)) {
checkLimit(destPool, infoWithDefaults.getPath().toUri().getPath(),
infoWithDefaults.getReplication());
}
}
// Verify the expiration against the destination pool
validateExpiryTime(infoWithDefaults, destPool.getMaxRelativeExpiryMs());
// Indicate changes to the CRM
setNeedsRescan();
// Validation passed
removeInternal(prevEntry);
addInternal(new CacheDirective(builder.build()), destPool);
} catch (IOException e) {
LOG.warn("modifyDirective of " + idString + " failed: ", e);
throw e;
}
LOG.info("modifyDirective of {} successfully applied {}.", idString, info);
}
private void removeInternal(CacheDirective directive)
throws InvalidRequestException {
assert namesystem.hasWriteLock();
// Remove the corresponding entry in directivesByPath.
String path = directive.getPath();
List<CacheDirective> directives = directivesByPath.get(path);
if (directives == null || !directives.remove(directive)) {
throw new InvalidRequestException("Failed to locate entry " +
directive.getId() + " by path " + directive.getPath());
}
if (directives.size() == 0) {
directivesByPath.remove(path);
}
// Fix up the stats from removing the pool
final CachePool pool = directive.getPool();
directive.addBytesNeeded(-directive.getBytesNeeded());
directive.addFilesNeeded(-directive.getFilesNeeded());
directivesById.remove(directive.getId());
pool.getDirectiveList().remove(directive);
assert directive.getPool() == null;
setNeedsRescan();
}
public void removeDirective(long id, FSPermissionChecker pc)
throws IOException {
assert namesystem.hasWriteLock();
try {
CacheDirective directive = getById(id);
checkWritePermission(pc, directive.getPool());
removeInternal(directive);
} catch (IOException e) {
LOG.warn("removeDirective of " + id + " failed: ", e);
throw e;
}
LOG.info("removeDirective of " + id + " successful.");
}
public BatchedListEntries<CacheDirectiveEntry>
listCacheDirectives(long prevId,
CacheDirectiveInfo filter,
FSPermissionChecker pc) throws IOException {
assert namesystem.hasReadLock();
final int NUM_PRE_ALLOCATED_ENTRIES = 16;
String filterPath = null;
if (filter.getPath() != null) {
filterPath = validatePath(filter);
}
if (filter.getReplication() != null) {
throw new InvalidRequestException(
"Filtering by replication is unsupported.");
}
// Querying for a single ID
final Long id = filter.getId();
if (id != null) {
if (!directivesById.containsKey(id)) {
throw new InvalidRequestException("Did not find requested id " + id);
}
// Since we use a tailMap on directivesById, setting prev to id-1 gets
// us the directive with the id (if present)
prevId = id - 1;
}
ArrayList<CacheDirectiveEntry> replies =
new ArrayList<CacheDirectiveEntry>(NUM_PRE_ALLOCATED_ENTRIES);
int numReplies = 0;
SortedMap<Long, CacheDirective> tailMap =
directivesById.tailMap(prevId + 1);
for (Entry<Long, CacheDirective> cur : tailMap.entrySet()) {
if (numReplies >= maxListCacheDirectivesNumResponses) {
return new BatchedListEntries<CacheDirectiveEntry>(replies, true);
}
CacheDirective curDirective = cur.getValue();
CacheDirectiveInfo info = cur.getValue().toInfo();
// If the requested ID is present, it should be the first item.
// Hitting this case means the ID is not present, or we're on the second
// item and should break out.
if (id != null &&
!(info.getId().equals(id))) {
break;
}
if (filter.getPool() != null &&
!info.getPool().equals(filter.getPool())) {
continue;
}
if (filterPath != null &&
!info.getPath().toUri().getPath().equals(filterPath)) {
continue;
}
boolean hasPermission = true;
if (pc != null) {
try {
pc.checkPermission(curDirective.getPool(), FsAction.READ);
} catch (AccessControlException e) {
hasPermission = false;
}
}
if (hasPermission) {
replies.add(new CacheDirectiveEntry(info, cur.getValue().toStats()));
numReplies++;
}
}
return new BatchedListEntries<CacheDirectiveEntry>(replies, false);
}
/**
* Create a cache pool.
*
* Only the superuser should be able to call this function.
*
* @param info The info for the cache pool to create.
* @return Information about the cache pool we created.
*/
public CachePoolInfo addCachePool(CachePoolInfo info)
throws IOException {
assert namesystem.hasWriteLock();
CachePool pool;
try {
CachePoolInfo.validate(info);
String poolName = info.getPoolName();
pool = cachePools.get(poolName);
if (pool != null) {
throw new InvalidRequestException("Cache pool " + poolName
+ " already exists.");
}
pool = CachePool.createFromInfoAndDefaults(info);
cachePools.put(pool.getPoolName(), pool);
} catch (IOException e) {
LOG.info("addCachePool of " + info + " failed: ", e);
throw e;
}
LOG.info("addCachePool of {} successful.", info);
return pool.getInfo(true);
}
/**
* Modify a cache pool.
*
* Only the superuser should be able to call this function.
*
* @param info
* The info for the cache pool to modify.
*/
public void modifyCachePool(CachePoolInfo info)
throws IOException {
assert namesystem.hasWriteLock();
StringBuilder bld = new StringBuilder();
try {
CachePoolInfo.validate(info);
String poolName = info.getPoolName();
CachePool pool = cachePools.get(poolName);
if (pool == null) {
throw new InvalidRequestException("Cache pool " + poolName
+ " does not exist.");
}
String prefix = "";
if (info.getOwnerName() != null) {
pool.setOwnerName(info.getOwnerName());
bld.append(prefix).
append("set owner to ").append(info.getOwnerName());
prefix = "; ";
}
if (info.getGroupName() != null) {
pool.setGroupName(info.getGroupName());
bld.append(prefix).
append("set group to ").append(info.getGroupName());
prefix = "; ";
}
if (info.getMode() != null) {
pool.setMode(info.getMode());
bld.append(prefix).append("set mode to " + info.getMode());
prefix = "; ";
}
if (info.getLimit() != null) {
pool.setLimit(info.getLimit());
bld.append(prefix).append("set limit to " + info.getLimit());
prefix = "; ";
// New limit changes stats, need to set needs refresh
setNeedsRescan();
}
if (info.getMaxRelativeExpiryMs() != null) {
final Long maxRelativeExpiry = info.getMaxRelativeExpiryMs();
pool.setMaxRelativeExpiryMs(maxRelativeExpiry);
bld.append(prefix).append("set maxRelativeExpiry to "
+ maxRelativeExpiry);
prefix = "; ";
}
if (prefix.isEmpty()) {
bld.append("no changes.");
}
} catch (IOException e) {
LOG.info("modifyCachePool of " + info + " failed: ", e);
throw e;
}
LOG.info("modifyCachePool of {} successful; {}", info.getPoolName(),
bld.toString());
}
/**
* Remove a cache pool.
*
* Only the superuser should be able to call this function.
*
* @param poolName
* The name for the cache pool to remove.
*/
public void removeCachePool(String poolName)
throws IOException {
assert namesystem.hasWriteLock();
try {
CachePoolInfo.validateName(poolName);
CachePool pool = cachePools.remove(poolName);
if (pool == null) {
throw new InvalidRequestException(
"Cannot remove non-existent cache pool " + poolName);
}
// Remove all directives in this pool.
Iterator<CacheDirective> iter = pool.getDirectiveList().iterator();
while (iter.hasNext()) {
CacheDirective directive = iter.next();
directivesByPath.remove(directive.getPath());
directivesById.remove(directive.getId());
iter.remove();
}
setNeedsRescan();
} catch (IOException e) {
LOG.info("removeCachePool of " + poolName + " failed: ", e);
throw e;
}
LOG.info("removeCachePool of " + poolName + " successful.");
}
public BatchedListEntries<CachePoolEntry>
listCachePools(FSPermissionChecker pc, String prevKey) {
assert namesystem.hasReadLock();
final int NUM_PRE_ALLOCATED_ENTRIES = 16;
ArrayList<CachePoolEntry> results =
new ArrayList<CachePoolEntry>(NUM_PRE_ALLOCATED_ENTRIES);
SortedMap<String, CachePool> tailMap = cachePools.tailMap(prevKey, false);
int numListed = 0;
for (Entry<String, CachePool> cur : tailMap.entrySet()) {
if (numListed++ >= maxListCachePoolsResponses) {
return new BatchedListEntries<CachePoolEntry>(results, true);
}
results.add(cur.getValue().getEntry(pc));
}
return new BatchedListEntries<CachePoolEntry>(results, false);
}
public void setCachedLocations(LocatedBlock block) {
CachedBlock cachedBlock =
new CachedBlock(block.getBlock().getBlockId(),
(short)0, false);
cachedBlock = cachedBlocks.get(cachedBlock);
if (cachedBlock == null) {
return;
}
List<DatanodeDescriptor> cachedDNs = cachedBlock.getDatanodes(Type.CACHED);
for (DatanodeDescriptor datanode : cachedDNs) {
// Filter out cached blocks that do not have a backing replica.
//
// This should not happen since it means the CacheManager thinks
// something is cached that does not exist, but it's a safety
// measure.
boolean found = false;
for (DatanodeInfo loc : block.getLocations()) {
if (loc.equals(datanode)) {
block.addCachedLoc(loc);
found = true;
break;
}
}
if (!found) {
LOG.warn("Datanode {} is not a valid cache location for block {} "
+ "because that node does not have a backing replica!",
datanode, block.getBlock().getBlockName());
}
}
}
public final void processCacheReport(final DatanodeID datanodeID,
final List<Long> blockIds) throws IOException {
namesystem.writeLock();
final long startTime = Time.monotonicNow();
final long endTime;
try {
final DatanodeDescriptor datanode =
blockManager.getDatanodeManager().getDatanode(datanodeID);
if (datanode == null || !datanode.isAlive) {
throw new IOException(
"processCacheReport from dead or unregistered datanode: " +
datanode);
}
processCacheReportImpl(datanode, blockIds);
} finally {
endTime = Time.monotonicNow();
namesystem.writeUnlock();
}
// Log the block report processing stats from Namenode perspective
final NameNodeMetrics metrics = NameNode.getNameNodeMetrics();
if (metrics != null) {
metrics.addCacheBlockReport((int) (endTime - startTime));
}
LOG.debug("Processed cache report from {}, blocks: {}, " +
"processing time: {} msecs", datanodeID, blockIds.size(),
(endTime - startTime));
}
private void processCacheReportImpl(final DatanodeDescriptor datanode,
final List<Long> blockIds) {
CachedBlocksList cached = datanode.getCached();
cached.clear();
CachedBlocksList cachedList = datanode.getCached();
CachedBlocksList pendingCachedList = datanode.getPendingCached();
for (Iterator<Long> iter = blockIds.iterator(); iter.hasNext(); ) {
long blockId = iter.next();
LOG.trace("Cache report from datanode {} has block {}", datanode,
blockId);
CachedBlock cachedBlock =
new CachedBlock(blockId, (short)0, false);
CachedBlock prevCachedBlock = cachedBlocks.get(cachedBlock);
// Add the block ID from the cache report to the cachedBlocks map
// if it's not already there.
if (prevCachedBlock != null) {
cachedBlock = prevCachedBlock;
} else {
cachedBlocks.put(cachedBlock);
LOG.trace("Added block {} to cachedBlocks", cachedBlock);
}
// Add the block to the datanode's implicit cached block list
// if it's not already there. Similarly, remove it from the pending
// cached block list if it exists there.
if (!cachedBlock.isPresent(cachedList)) {
cachedList.add(cachedBlock);
LOG.trace("Added block {} to CACHED list.", cachedBlock);
}
if (cachedBlock.isPresent(pendingCachedList)) {
pendingCachedList.remove(cachedBlock);
LOG.trace("Removed block {} from PENDING_CACHED list.", cachedBlock);
}
}
}
/**
* Saves the current state of the CacheManager to the DataOutput. Used
* to persist CacheManager state in the FSImage.
* @param out DataOutput to persist state
* @param sdPath path of the storage directory
* @throws IOException
*/
public void saveStateCompat(DataOutputStream out, String sdPath)
throws IOException {
serializerCompat.save(out, sdPath);
}
public PersistState saveState() throws IOException {
ArrayList<CachePoolInfoProto> pools = Lists
.newArrayListWithCapacity(cachePools.size());
ArrayList<CacheDirectiveInfoProto> directives = Lists
.newArrayListWithCapacity(directivesById.size());
for (CachePool pool : cachePools.values()) {
CachePoolInfo p = pool.getInfo(true);
CachePoolInfoProto.Builder b = CachePoolInfoProto.newBuilder()
.setPoolName(p.getPoolName());
if (p.getOwnerName() != null)
b.setOwnerName(p.getOwnerName());
if (p.getGroupName() != null)
b.setGroupName(p.getGroupName());
if (p.getMode() != null)
b.setMode(p.getMode().toShort());
if (p.getLimit() != null)
b.setLimit(p.getLimit());
pools.add(b.build());
}
for (CacheDirective directive : directivesById.values()) {
CacheDirectiveInfo info = directive.toInfo();
CacheDirectiveInfoProto.Builder b = CacheDirectiveInfoProto.newBuilder()
.setId(info.getId());
if (info.getPath() != null) {
b.setPath(info.getPath().toUri().getPath());
}
if (info.getReplication() != null) {
b.setReplication(info.getReplication());
}
if (info.getPool() != null) {
b.setPool(info.getPool());
}
Expiration expiry = info.getExpiration();
if (expiry != null) {
assert (!expiry.isRelative());
b.setExpiration(PBHelper.convert(expiry));
}
directives.add(b.build());
}
CacheManagerSection s = CacheManagerSection.newBuilder()
.setNextDirectiveId(nextDirectiveId).setNumPools(pools.size())
.setNumDirectives(directives.size()).build();
return new PersistState(s, pools, directives);
}
/**
* Reloads CacheManager state from the passed DataInput. Used during namenode
* startup to restore CacheManager state from an FSImage.
* @param in DataInput from which to restore state
* @throws IOException
*/
public void loadStateCompat(DataInput in) throws IOException {
serializerCompat.load(in);
}
public void loadState(PersistState s) throws IOException {
nextDirectiveId = s.section.getNextDirectiveId();
for (CachePoolInfoProto p : s.pools) {
CachePoolInfo info = new CachePoolInfo(p.getPoolName());
if (p.hasOwnerName())
info.setOwnerName(p.getOwnerName());
if (p.hasGroupName())
info.setGroupName(p.getGroupName());
if (p.hasMode())
info.setMode(new FsPermission((short) p.getMode()));
if (p.hasLimit())
info.setLimit(p.getLimit());
addCachePool(info);
}
for (CacheDirectiveInfoProto p : s.directives) {
// Get pool reference by looking it up in the map
final String poolName = p.getPool();
CacheDirective directive = new CacheDirective(p.getId(), new Path(
p.getPath()).toUri().getPath(), (short) p.getReplication(), p
.getExpiration().getMillis());
addCacheDirective(poolName, directive);
}
}
private void addCacheDirective(final String poolName,
final CacheDirective directive) throws IOException {
CachePool pool = cachePools.get(poolName);
if (pool == null) {
throw new IOException("Directive refers to pool " + poolName
+ ", which does not exist.");
}
boolean addedDirective = pool.getDirectiveList().add(directive);
assert addedDirective;
if (directivesById.put(directive.getId(), directive) != null) {
throw new IOException("A directive with ID " + directive.getId()
+ " already exists");
}
List<CacheDirective> directives = directivesByPath.get(directive.getPath());
if (directives == null) {
directives = new LinkedList<CacheDirective>();
directivesByPath.put(directive.getPath(), directives);
}
directives.add(directive);
}
private final class SerializerCompat {
private void save(DataOutputStream out, String sdPath) throws IOException {
out.writeLong(nextDirectiveId);
savePools(out, sdPath);
saveDirectives(out, sdPath);
}
private void load(DataInput in) throws IOException {
nextDirectiveId = in.readLong();
// pools need to be loaded first since directives point to their parent pool
loadPools(in);
loadDirectives(in);
}
/**
* Save cache pools to fsimage
*/
private void savePools(DataOutputStream out,
String sdPath) throws IOException {
StartupProgress prog = NameNode.getStartupProgress();
Step step = new Step(StepType.CACHE_POOLS, sdPath);
prog.beginStep(Phase.SAVING_CHECKPOINT, step);
prog.setTotal(Phase.SAVING_CHECKPOINT, step, cachePools.size());
Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
out.writeInt(cachePools.size());
for (CachePool pool: cachePools.values()) {
FSImageSerialization.writeCachePoolInfo(out, pool.getInfo(true));
counter.increment();
}
prog.endStep(Phase.SAVING_CHECKPOINT, step);
}
/*
* Save cache entries to fsimage
*/
private void saveDirectives(DataOutputStream out, String sdPath)
throws IOException {
StartupProgress prog = NameNode.getStartupProgress();
Step step = new Step(StepType.CACHE_ENTRIES, sdPath);
prog.beginStep(Phase.SAVING_CHECKPOINT, step);
prog.setTotal(Phase.SAVING_CHECKPOINT, step, directivesById.size());
Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
out.writeInt(directivesById.size());
for (CacheDirective directive : directivesById.values()) {
FSImageSerialization.writeCacheDirectiveInfo(out, directive.toInfo());
counter.increment();
}
prog.endStep(Phase.SAVING_CHECKPOINT, step);
}
/**
* Load cache pools from fsimage
*/
private void loadPools(DataInput in)
throws IOException {
StartupProgress prog = NameNode.getStartupProgress();
Step step = new Step(StepType.CACHE_POOLS);
prog.beginStep(Phase.LOADING_FSIMAGE, step);
int numberOfPools = in.readInt();
prog.setTotal(Phase.LOADING_FSIMAGE, step, numberOfPools);
Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step);
for (int i = 0; i < numberOfPools; i++) {
addCachePool(FSImageSerialization.readCachePoolInfo(in));
counter.increment();
}
prog.endStep(Phase.LOADING_FSIMAGE, step);
}
/**
* Load cache directives from the fsimage
*/
private void loadDirectives(DataInput in) throws IOException {
StartupProgress prog = NameNode.getStartupProgress();
Step step = new Step(StepType.CACHE_ENTRIES);
prog.beginStep(Phase.LOADING_FSIMAGE, step);
int numDirectives = in.readInt();
prog.setTotal(Phase.LOADING_FSIMAGE, step, numDirectives);
Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step);
for (int i = 0; i < numDirectives; i++) {
CacheDirectiveInfo info = FSImageSerialization.readCacheDirectiveInfo(in);
// Get pool reference by looking it up in the map
final String poolName = info.getPool();
CacheDirective directive =
new CacheDirective(info.getId(), info.getPath().toUri().getPath(),
info.getReplication(), info.getExpiration().getAbsoluteMillis());
addCacheDirective(poolName, directive);
counter.increment();
}
prog.endStep(Phase.LOADING_FSIMAGE, step);
}
}
public void waitForRescanIfNeeded() {
crmLock.lock();
try {
if (monitor != null) {
monitor.waitForRescanIfNeeded();
}
} finally {
crmLock.unlock();
}
}
private void setNeedsRescan() {
crmLock.lock();
try {
if (monitor != null) {
monitor.setNeedsRescan();
}
} finally {
crmLock.unlock();
}
}
@VisibleForTesting
public Thread getCacheReplicationMonitor() {
crmLock.lock();
try {
return monitor;
} finally {
crmLock.unlock();
}
}
}
| 44,667 | 34.820369 | 118 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CheckableNameNodeResource.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import org.apache.hadoop.classification.InterfaceAudience;
/**
* Implementers of this class represent a NN resource whose availability can be
* checked. A resource can be either "required" or "redundant". All required
* resources must be available for the NN to continue operating. The NN will
* continue to operate as long as *any* redundant resource is available.
*/
@InterfaceAudience.Private
interface CheckableNameNodeResource {
/**
* Is this resource currently available.
*
* @return true if and only if the resource in question is available.
*/
public boolean isResourceAvailable();
/**
* Is this resource required.
*
* @return true if and only if the resource in question is required for NN operation.
*/
public boolean isRequired();
}
| 1,650 | 34.891304 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StreamFile.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException;
import java.io.OutputStream;
import java.io.PrintWriter;
import java.util.Enumeration;
import java.util.List;
import javax.servlet.ServletContext;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DatanodeJspHelper;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.ServletUtil;
import org.mortbay.jetty.InclusiveByteRange;
@InterfaceAudience.Private
public class StreamFile extends DfsServlet {
/** for java.io.Serializable */
private static final long serialVersionUID = 1L;
public static final String CONTENT_LENGTH = "Content-Length";
/* Return a DFS client to use to make the given HTTP request */
protected DFSClient getDFSClient(HttpServletRequest request)
throws IOException, InterruptedException {
final Configuration conf =
(Configuration) getServletContext().getAttribute(JspHelper.CURRENT_CONF);
UserGroupInformation ugi = getUGI(request, conf);
final ServletContext context = getServletContext();
final DataNode datanode = (DataNode) context.getAttribute("datanode");
return DatanodeJspHelper.getDFSClient(request, datanode, conf, ugi);
}
@Override
@SuppressWarnings("unchecked")
public void doGet(HttpServletRequest request, HttpServletResponse response)
throws ServletException, IOException {
final String path = ServletUtil.getDecodedPath(request, "/streamFile");
final String rawPath = ServletUtil.getRawPath(request, "/streamFile");
final String filename = JspHelper.validatePath(path);
final String rawFilename = JspHelper.validatePath(rawPath);
if (filename == null) {
response.setContentType("text/plain");
PrintWriter out = response.getWriter();
out.print("Invalid input");
return;
}
Enumeration<String> reqRanges = request.getHeaders("Range");
if (reqRanges != null && !reqRanges.hasMoreElements()) {
reqRanges = null;
}
DFSClient dfs;
try {
dfs = getDFSClient(request);
} catch (InterruptedException e) {
response.sendError(400, e.getMessage());
return;
}
HdfsDataInputStream in = null;
OutputStream out = null;
try {
in = dfs.createWrappedInputStream(dfs.open(filename));
out = response.getOutputStream();
final long fileLen = in.getVisibleLength();
if (reqRanges != null) {
List<InclusiveByteRange> ranges =
InclusiveByteRange.satisfiableRanges(reqRanges, fileLen);
StreamFile.sendPartialData(in, out, response, fileLen, ranges);
} else {
// No ranges, so send entire file
response.setHeader("Content-Disposition", "attachment; filename=\"" +
rawFilename + "\"");
response.setContentType("application/octet-stream");
response.setHeader(CONTENT_LENGTH, "" + fileLen);
StreamFile.copyFromOffset(in, out, 0L, fileLen);
}
in.close();
in = null;
out.close();
out = null;
dfs.close();
dfs = null;
} catch (IOException ioe) {
if (LOG.isDebugEnabled()) {
LOG.debug("response.isCommitted()=" + response.isCommitted(), ioe);
}
throw ioe;
} finally {
IOUtils.cleanup(LOG, in);
IOUtils.cleanup(LOG, out);
IOUtils.cleanup(LOG, dfs);
}
}
/**
* Send a partial content response with the given range. If there are
* no satisfiable ranges, or if multiple ranges are requested, which
* is unsupported, respond with range not satisfiable.
*
* @param in stream to read from
* @param out stream to write to
* @param response http response to use
* @param contentLength for the response header
* @param ranges to write to respond with
* @throws IOException on error sending the response
*/
static void sendPartialData(FSDataInputStream in,
OutputStream out,
HttpServletResponse response,
long contentLength,
List<InclusiveByteRange> ranges)
throws IOException {
if (ranges == null || ranges.size() != 1) {
response.setContentLength(0);
response.setStatus(HttpServletResponse.SC_REQUESTED_RANGE_NOT_SATISFIABLE);
response.setHeader("Content-Range",
InclusiveByteRange.to416HeaderRangeString(contentLength));
} else {
InclusiveByteRange singleSatisfiableRange = ranges.get(0);
long singleLength = singleSatisfiableRange.getSize(contentLength);
response.setStatus(HttpServletResponse.SC_PARTIAL_CONTENT);
response.setHeader("Content-Range",
singleSatisfiableRange.toHeaderRangeString(contentLength));
copyFromOffset(in, out,
singleSatisfiableRange.getFirst(contentLength),
singleLength);
}
}
/* Copy count bytes at the given offset from one stream to another */
static void copyFromOffset(FSDataInputStream in, OutputStream out,
long offset, long count) throws IOException {
in.seek(offset);
IOUtils.copyBytes(in, out, count, false);
}
}
| 6,509 | 37.52071 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupJournalManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException;
import java.util.Collection;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.protocol.JournalInfo;
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
/**
* A JournalManager implementation that uses RPCs to log transactions
* to a BackupNode.
*/
class BackupJournalManager implements JournalManager {
private final NamenodeRegistration bnReg;
private final JournalInfo journalInfo;
BackupJournalManager(NamenodeRegistration bnReg,
NamenodeRegistration nnReg) {
journalInfo = new JournalInfo(nnReg.getLayoutVersion(),
nnReg.getClusterID(), nnReg.getNamespaceID());
this.bnReg = bnReg;
}
@Override
public void format(NamespaceInfo nsInfo) {
// format() should only get called at startup, before any BNs
// can register with the NN.
throw new UnsupportedOperationException(
"BackupNode journal should never get formatted");
}
@Override
public boolean hasSomeData() {
throw new UnsupportedOperationException();
}
@Override
public EditLogOutputStream startLogSegment(long txId, int layoutVersion)
throws IOException {
EditLogBackupOutputStream stm = new EditLogBackupOutputStream(bnReg,
journalInfo);
stm.startLogSegment(txId);
return stm;
}
@Override
public void finalizeLogSegment(long firstTxId, long lastTxId)
throws IOException {
}
@Override
public void setOutputBufferCapacity(int size) {
}
@Override
public void purgeLogsOlderThan(long minTxIdToKeep)
throws IOException {
}
@Override
public void selectInputStreams(Collection<EditLogInputStream> streams,
long fromTxnId, boolean inProgressOk) {
// This JournalManager is never used for input. Therefore it cannot
// return any transactions
}
@Override
public void recoverUnfinalizedSegments() throws IOException {
}
@Override
public void close() throws IOException {}
public boolean matchesRegistration(NamenodeRegistration bnReg) {
return bnReg.getAddress().equals(this.bnReg.getAddress());
}
@Override
public String toString() {
return "BackupJournalManager";
}
@Override
public void discardSegments(long startTxId) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public void doPreUpgrade() throws IOException {
throw new UnsupportedOperationException();
}
@Override
public void doUpgrade(Storage storage) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public void doFinalize() throws IOException {
throw new UnsupportedOperationException();
}
@Override
public boolean canRollBack(StorageInfo storage, StorageInfo prevStorage,
int targetLayoutVersion) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public void doRollback() throws IOException {
throw new UnsupportedOperationException();
}
@Override
public long getJournalCTime() throws IOException {
throw new UnsupportedOperationException();
}
}
| 4,086 | 28.192857 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameCache.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.util.HashMap;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
/**
* Caches frequently used names to facilitate reuse.
* (example: byte[] representation of the file name in {@link INode}).
*
* This class is used by initially adding all the file names. Cache
* tracks the number of times a name is used in a transient map. It promotes
* a name used more than {@code useThreshold} to the cache.
*
* One all the names are added, {@link #initialized()} should be called to
* finish initialization. The transient map where use count is tracked is
* discarded and cache is ready for use.
*
* <p>
* This class must be synchronized externally.
*
* @param <K> name to be added to the cache
*/
class NameCache<K> {
/**
* Class for tracking use count of a name
*/
private class UseCount {
int count;
final K value; // Internal value for the name
UseCount(final K value) {
count = 1;
this.value = value;
}
void increment() {
count++;
}
int get() {
return count;
}
}
static final Log LOG = LogFactory.getLog(NameCache.class.getName());
/** indicates initialization is in progress */
private boolean initialized = false;
/** names used more than {@code useThreshold} is added to the cache */
private final int useThreshold;
/** of times a cache look up was successful */
private int lookups = 0;
/** Cached names */
final HashMap<K, K> cache = new HashMap<K, K>();
/** Names and with number of occurrences tracked during initialization */
Map<K, UseCount> transientMap = new HashMap<K, UseCount>();
/**
* Constructor
* @param useThreshold names occurring more than this is promoted to the
* cache
*/
NameCache(int useThreshold) {
this.useThreshold = useThreshold;
}
/**
* Add a given name to the cache or track use count.
* exist. If the name already exists, then the internal value is returned.
*
* @param name name to be looked up
* @return internal value for the name if found; otherwise null
*/
K put(final K name) {
K internal = cache.get(name);
if (internal != null) {
lookups++;
return internal;
}
// Track the usage count only during initialization
if (!initialized) {
UseCount useCount = transientMap.get(name);
if (useCount != null) {
useCount.increment();
if (useCount.get() >= useThreshold) {
promote(name);
}
return useCount.value;
}
useCount = new UseCount(name);
transientMap.put(name, useCount);
}
return null;
}
/**
* Lookup count when a lookup for a name returned cached object
* @return number of successful lookups
*/
int getLookupCount() {
return lookups;
}
/**
* Size of the cache
* @return Number of names stored in the cache
*/
int size() {
return cache.size();
}
/**
* Mark the name cache as initialized. The use count is no longer tracked
* and the transient map used for initializing the cache is discarded to
* save heap space.
*/
void initialized() {
LOG.info("initialized with " + size() + " entries " + lookups + " lookups");
this.initialized = true;
transientMap.clear();
transientMap = null;
}
/** Promote a frequently used name to the cache */
private void promote(final K name) {
transientMap.remove(name);
cache.put(name, name);
lookups += useThreshold;
}
public void reset() {
initialized = false;
cache.clear();
if (transientMap == null) {
transientMap = new HashMap<K, UseCount>();
} else {
transientMap.clear();
}
}
}
| 4,609 | 26.771084 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ListPathsServlet.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.hdfs.web.HftpFileSystem;
import org.apache.hadoop.util.ServletUtil;
import org.apache.hadoop.util.VersionInfo;
import org.znerd.xmlenc.*;
import java.io.IOException;
import java.io.PrintWriter;
import java.security.PrivilegedExceptionAction;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.HashMap;
import java.util.Map;
import java.util.Stack;
import java.util.regex.Pattern;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
/**
* Obtain meta-information about a filesystem.
* @see org.apache.hadoop.hdfs.web.HftpFileSystem
*/
@InterfaceAudience.Private
public class ListPathsServlet extends DfsServlet {
/** For java.io.Serializable */
private static final long serialVersionUID = 1L;
public static final ThreadLocal<SimpleDateFormat> df =
new ThreadLocal<SimpleDateFormat>() {
@Override
protected SimpleDateFormat initialValue() {
return HftpFileSystem.getDateFormat();
}
};
/**
* Write a node to output.
* Node information includes path, modification, permission, owner and group.
* For files, it also includes size, replication and block-size.
*/
static void writeInfo(final Path fullpath, final HdfsFileStatus i,
final XMLOutputter doc) throws IOException {
final SimpleDateFormat ldf = df.get();
doc.startTag(i.isDir() ? "directory" : "file");
doc.attribute("path", fullpath.toUri().getPath());
doc.attribute("modified", ldf.format(new Date(i.getModificationTime())));
doc.attribute("accesstime", ldf.format(new Date(i.getAccessTime())));
if (!i.isDir()) {
doc.attribute("size", String.valueOf(i.getLen()));
doc.attribute("replication", String.valueOf(i.getReplication()));
doc.attribute("blocksize", String.valueOf(i.getBlockSize()));
}
doc.attribute("permission", (i.isDir()? "d": "-") + i.getPermission());
doc.attribute("owner", i.getOwner());
doc.attribute("group", i.getGroup());
doc.endTag();
}
/**
* Build a map from the query string, setting values and defaults.
*/
protected Map<String,String> buildRoot(HttpServletRequest request,
XMLOutputter doc) {
final String path = ServletUtil.getDecodedPath(request, "/listPaths");
final String exclude = request.getParameter("exclude") != null
? request.getParameter("exclude") : "";
final String filter = request.getParameter("filter") != null
? request.getParameter("filter") : ".*";
final boolean recur = request.getParameter("recursive") != null
&& "yes".equals(request.getParameter("recursive"));
Map<String, String> root = new HashMap<String, String>();
root.put("path", path);
root.put("recursive", recur ? "yes" : "no");
root.put("filter", filter);
root.put("exclude", exclude);
root.put("time", df.get().format(new Date()));
root.put("version", VersionInfo.getVersion());
return root;
}
/**
* Service a GET request as described below.
* Request:
* {@code
* GET http://<nn>:<port>/listPaths[/<path>][<?option>[&option]*] HTTP/1.1
* }
*
* Where <i>option</i> (default) in:
* recursive ("no")
* filter (".*")
* exclude ("\..*\.crc")
*
* Response: A flat list of files/directories in the following format:
* {@code
* <listing path="..." recursive="(yes|no)" filter="..."
* time="yyyy-MM-dd hh:mm:ss UTC" version="...">
* <directory path="..." modified="yyyy-MM-dd hh:mm:ss"/>
* <file path="..." modified="yyyy-MM-dd'T'hh:mm:ssZ" accesstime="yyyy-MM-dd'T'hh:mm:ssZ"
* blocksize="..."
* replication="..." size="..."/>
* </listing>
* }
*/
@Override
public void doGet(HttpServletRequest request, HttpServletResponse response)
throws ServletException, IOException {
final PrintWriter out = response.getWriter();
final XMLOutputter doc = new XMLOutputter(out, "UTF-8");
final Map<String, String> root = buildRoot(request, doc);
final String path = root.get("path");
final String filePath = ServletUtil.getDecodedPath(request, "/listPaths");
try {
final boolean recur = "yes".equals(root.get("recursive"));
final Pattern filter = Pattern.compile(root.get("filter"));
final Pattern exclude = Pattern.compile(root.get("exclude"));
final Configuration conf =
(Configuration) getServletContext().getAttribute(JspHelper.CURRENT_CONF);
getUGI(request, conf).doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws IOException {
ClientProtocol nn = createNameNodeProxy();
doc.declaration();
doc.startTag("listing");
for (Map.Entry<String, String> m : root.entrySet()) {
doc.attribute(m.getKey(), m.getValue());
}
HdfsFileStatus base = nn.getFileInfo(filePath);
if ((base != null) && base.isDir()) {
writeInfo(base.getFullPath(new Path(path)), base, doc);
}
Stack<String> pathstack = new Stack<String>();
pathstack.push(path);
while (!pathstack.empty()) {
String p = pathstack.pop();
try {
byte[] lastReturnedName = HdfsFileStatus.EMPTY_NAME;
DirectoryListing thisListing;
do {
assert lastReturnedName != null;
thisListing = nn.getListing(p, lastReturnedName, false);
if (thisListing == null) {
if (lastReturnedName.length == 0) {
LOG
.warn("ListPathsServlet - Path " + p
+ " does not exist");
}
break;
}
HdfsFileStatus[] listing = thisListing.getPartialListing();
for (HdfsFileStatus i : listing) {
final Path fullpath = i.getFullPath(new Path(p));
final String localName = fullpath.getName();
if (exclude.matcher(localName).matches()
|| !filter.matcher(localName).matches()) {
continue;
}
if (recur && i.isDir()) {
pathstack.push(new Path(p, localName).toUri().getPath());
}
writeInfo(fullpath, i, doc);
}
lastReturnedName = thisListing.getLastName();
} while (thisListing.hasMore());
} catch (IOException re) {
writeXml(re, p, doc);
}
}
return null;
}
});
} catch(IOException ioe) {
writeXml(ioe, path, doc);
} catch (InterruptedException e) {
LOG.warn("ListPathServlet encountered InterruptedException", e);
response.sendError(400, e.getMessage());
} finally {
if (doc != null) {
doc.endDocument();
}
if (out != null) {
out.close();
}
}
}
}
| 8,335 | 37.063927 | 96 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Content.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import org.apache.hadoop.hdfs.util.EnumCounters;
/**
* The content types such as file, directory and symlink to be computed.
*/
public enum Content {
/** The number of files. */
FILE,
/** The number of directories. */
DIRECTORY,
/** The number of symlinks. */
SYMLINK,
/** The total of file length in bytes. */
LENGTH,
/** The total of disk space usage in bytes including replication. */
DISKSPACE,
/** The number of snapshots. */
SNAPSHOT,
/** The number of snapshottable directories. */
SNAPSHOTTABLE_DIRECTORY;
/** Content counts. */
public static class Counts extends EnumCounters<Content> {
public static Counts newInstance() {
return new Counts();
}
private Counts() {
super(Content.class);
}
}
private static final EnumCounters.Factory<Content, Counts> FACTORY
= new EnumCounters.Factory<Content, Counts>() {
@Override
public Counts newInstance() {
return Counts.newInstance();
}
};
/** A map of counters for the current state and the snapshots. */
public static class CountsMap
extends EnumCounters.Map<CountsMap.Key, Content, Counts> {
/** The key type of the map. */
public static enum Key { CURRENT, SNAPSHOT }
CountsMap() {
super(FACTORY);
}
}
}
| 2,148 | 28.438356 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/VersionInfoMXBean.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@InterfaceAudience.Private
@InterfaceStability.Evolving
public interface VersionInfoMXBean {
/**
* @return the compilation information which contains date, user and branch
*/
public String getCompileInfo();
/**
* @return the software version
*/
public String getSoftwareVersion();
}
| 1,280 | 34.583333 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryAttributes.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.util.EnumCounters;
import com.google.common.base.Preconditions;
/**
* The attributes of an inode.
*/
@InterfaceAudience.Private
public interface INodeDirectoryAttributes extends INodeAttributes {
public QuotaCounts getQuotaCounts();
public boolean metadataEquals(INodeDirectoryAttributes other);
/** A copy of the inode directory attributes */
public static class SnapshotCopy extends INodeAttributes.SnapshotCopy
implements INodeDirectoryAttributes {
public SnapshotCopy(byte[] name, PermissionStatus permissions,
AclFeature aclFeature, long modificationTime,
XAttrFeature xAttrsFeature) {
super(name, permissions, aclFeature, modificationTime, 0L, xAttrsFeature);
}
public SnapshotCopy(INodeDirectory dir) {
super(dir);
}
@Override
public QuotaCounts getQuotaCounts() {
return new QuotaCounts.Builder().nameSpace(-1).
storageSpace(-1).typeSpaces(-1).build();
}
public boolean isDirectory() {
return true;
}
@Override
public boolean metadataEquals(INodeDirectoryAttributes other) {
return other != null
&& getQuotaCounts().equals(other.getQuotaCounts())
&& getPermissionLong() == other.getPermissionLong()
&& getAclFeature() == other.getAclFeature()
&& getXAttrFeature() == other.getXAttrFeature();
}
}
public static class CopyWithQuota extends INodeDirectoryAttributes.SnapshotCopy {
private QuotaCounts quota;
public CopyWithQuota(byte[] name, PermissionStatus permissions,
AclFeature aclFeature, long modificationTime, long nsQuota,
long dsQuota, EnumCounters<StorageType> typeQuotas, XAttrFeature xAttrsFeature) {
super(name, permissions, aclFeature, modificationTime, xAttrsFeature);
this.quota = new QuotaCounts.Builder().nameSpace(nsQuota).
storageSpace(dsQuota).typeSpaces(typeQuotas).build();
}
public CopyWithQuota(INodeDirectory dir) {
super(dir);
Preconditions.checkArgument(dir.isQuotaSet());
final QuotaCounts q = dir.getQuotaCounts();
this.quota = new QuotaCounts.Builder().quotaCount(q).build();
}
@Override
public QuotaCounts getQuotaCounts() {
return new QuotaCounts.Builder().quotaCount(quota).build();
}
}
}
| 3,353 | 35.064516 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException;
import java.io.Closeable;
import static org.apache.hadoop.util.Time.monotonicNow;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* A generic abstract class to support journaling of edits logs into
* a persistent storage.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public abstract class EditLogOutputStream implements Closeable {
// these are statistics counters
private long numSync; // number of sync(s) to disk
private long totalTimeSync; // total time to sync
public EditLogOutputStream() throws IOException {
numSync = totalTimeSync = 0;
}
/**
* Write edits log operation to the stream.
*
* @param op operation
* @throws IOException
*/
abstract public void write(FSEditLogOp op) throws IOException;
/**
* Write raw data to an edit log. This data should already have
* the transaction ID, checksum, etc included. It is for use
* within the BackupNode when replicating edits from the
* NameNode.
*
* @param bytes the bytes to write.
* @param offset offset in the bytes to write from
* @param length number of bytes to write
* @throws IOException
*/
abstract public void writeRaw(byte[] bytes, int offset, int length)
throws IOException;
/**
* Create and initialize underlying persistent edits log storage.
*
* @param layoutVersion The LayoutVersion of the journal
* @throws IOException
*/
abstract public void create(int layoutVersion) throws IOException;
/**
* Close the journal.
* @throws IOException if the journal can't be closed,
* or if there are unflushed edits
*/
@Override
abstract public void close() throws IOException;
/**
* Close the stream without necessarily flushing any pending data.
* This may be called after a previous write or close threw an exception.
*/
abstract public void abort() throws IOException;
/**
* All data that has been written to the stream so far will be flushed.
* New data can be still written to the stream while flushing is performed.
*/
abstract public void setReadyToFlush() throws IOException;
/**
* Flush and sync all data that is ready to be flush
* {@link #setReadyToFlush()} into underlying persistent store.
* @param durable if true, the edits should be made truly durable before
* returning
* @throws IOException
*/
abstract protected void flushAndSync(boolean durable) throws IOException;
/**
* Flush data to persistent store.
* Collect sync metrics.
*/
public void flush() throws IOException {
flush(true);
}
public void flush(boolean durable) throws IOException {
numSync++;
long start = monotonicNow();
flushAndSync(durable);
long end = monotonicNow();
totalTimeSync += (end - start);
}
/**
* Implement the policy when to automatically sync the buffered edits log
* The buffered edits can be flushed when the buffer becomes full or
* a certain period of time is elapsed.
*
* @return true if the buffered data should be automatically synced to disk
*/
public boolean shouldForceSync() {
return false;
}
/**
* Return total time spent in {@link #flushAndSync(boolean)}
*/
long getTotalSyncTime() {
return totalTimeSync;
}
/**
* Return number of calls to {@link #flushAndSync(boolean)}
*/
protected long getNumSync() {
return numSync;
}
/**
* @return a short text snippet suitable for describing the current
* status of the stream
*/
public String generateReport() {
return toString();
}
}
| 4,545 | 29.10596 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageCompression.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.DataInput;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.CompressionCodecFactory;
/**
* Simple container class that handles support for compressed fsimage files.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
class FSImageCompression {
/** Codec to use to save or load image, or null if the image is not compressed */
private CompressionCodec imageCodec;
/**
* Create a "noop" compression - i.e. uncompressed
*/
private FSImageCompression() {
}
/**
* Create compression using a particular codec
*/
private FSImageCompression(CompressionCodec codec) {
imageCodec = codec;
}
public CompressionCodec getImageCodec() {
return imageCodec;
}
/**
* Create a "noop" compression - i.e. uncompressed
*/
static FSImageCompression createNoopCompression() {
return new FSImageCompression();
}
/**
* Create a compression instance based on the user's configuration in the given
* Configuration object.
* @throws IOException if the specified codec is not available.
*/
static FSImageCompression createCompression(Configuration conf)
throws IOException {
boolean compressImage = conf.getBoolean(
DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY,
DFSConfigKeys.DFS_IMAGE_COMPRESS_DEFAULT);
if (!compressImage) {
return createNoopCompression();
}
String codecClassName = conf.get(
DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY,
DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_DEFAULT);
return createCompression(conf, codecClassName);
}
/**
* Create a compression instance using the codec specified by
* <code>codecClassName</code>
*/
static FSImageCompression createCompression(Configuration conf,
String codecClassName)
throws IOException {
CompressionCodecFactory factory = new CompressionCodecFactory(conf);
CompressionCodec codec = factory.getCodecByClassName(codecClassName);
if (codec == null) {
throw new IOException("Not a supported codec: " + codecClassName);
}
return new FSImageCompression(codec);
}
/**
* Create a compression instance based on a header read from an input stream.
* @throws IOException if the specified codec is not available or the
* underlying IO fails.
*/
static FSImageCompression readCompressionHeader(
Configuration conf, DataInput in) throws IOException
{
boolean isCompressed = in.readBoolean();
if (!isCompressed) {
return createNoopCompression();
} else {
String codecClassName = Text.readString(in);
return createCompression(conf, codecClassName);
}
}
/**
* Unwrap a compressed input stream by wrapping it with a decompressor based
* on this codec. If this instance represents no compression, simply adds
* buffering to the input stream.
* @return a buffered stream that provides uncompressed data
* @throws IOException If the decompressor cannot be instantiated or an IO
* error occurs.
*/
DataInputStream unwrapInputStream(InputStream is) throws IOException {
if (imageCodec != null) {
return new DataInputStream(imageCodec.createInputStream(is));
} else {
return new DataInputStream(new BufferedInputStream(is));
}
}
/**
* Write out a header to the given stream that indicates the chosen
* compression codec, and return the same stream wrapped with that codec.
* If no codec is specified, simply adds buffering to the stream, so that
* the returned stream is always buffered.
*
* @param os The stream to write header to and wrap. This stream should
* be unbuffered.
* @return A stream wrapped with the specified compressor, or buffering
* if compression is not enabled.
* @throws IOException if an IO error occurs or the compressor cannot be
* instantiated
*/
DataOutputStream writeHeaderAndWrapStream(OutputStream os)
throws IOException {
DataOutputStream dos = new DataOutputStream(os);
dos.writeBoolean(imageCodec != null);
if (imageCodec != null) {
String codecClassName = imageCodec.getClass().getCanonicalName();
Text.writeString(dos, codecClassName);
return new DataOutputStream(imageCodec.createOutputStream(os));
} else {
// use a buffered output stream
return new DataOutputStream(new BufferedOutputStream(os));
}
}
@Override
public String toString() {
if (imageCodec != null) {
return "codec " + imageCodec.getClass().getCanonicalName();
} else {
return "no compression";
}
}
}
| 5,974 | 32.01105 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/UnsupportedActionException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* This exception is thrown when an operation is not supported.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class UnsupportedActionException extends IOException {
/** for java.io.Serializable */
private static final long serialVersionUID = 1L;
public UnsupportedActionException(String msg) {
super(msg);
}
}
| 1,354 | 33.74359 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentCounts.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.util.EnumCounters;
/**
* The counter to be computed for content types such as file, directory and symlink,
* and the storage type usage such as SSD, DISK, ARCHIVE.
*/
public class ContentCounts {
private EnumCounters<Content> contents;
private EnumCounters<StorageType> types;
public static class Builder {
private EnumCounters<Content> contents;
// storage spaces used by corresponding storage types
private EnumCounters<StorageType> types;
public Builder() {
contents = new EnumCounters<Content>(Content.class);
types = new EnumCounters<StorageType>(StorageType.class);
}
public Builder file(long file) {
contents.set(Content.FILE, file);
return this;
}
public Builder directory(long directory) {
contents.set(Content.DIRECTORY, directory);
return this;
}
public Builder symlink(long symlink) {
contents.set(Content.SYMLINK, symlink);
return this;
}
public Builder length(long length) {
contents.set(Content.LENGTH, length);
return this;
}
public Builder storagespace(long storagespace) {
contents.set(Content.DISKSPACE, storagespace);
return this;
}
public Builder snapshot(long snapshot) {
contents.set(Content.SNAPSHOT, snapshot);
return this;
}
public Builder snapshotable_directory(long snapshotable_directory) {
contents.set(Content.SNAPSHOTTABLE_DIRECTORY, snapshotable_directory);
return this;
}
public ContentCounts build() {
return new ContentCounts(contents, types);
}
}
private ContentCounts(EnumCounters<Content> contents,
EnumCounters<StorageType> types) {
this.contents = contents;
this.types = types;
}
// Get the number of files.
public long getFileCount() {
return contents.get(Content.FILE);
}
// Get the number of directories.
public long getDirectoryCount() {
return contents.get(Content.DIRECTORY);
}
// Get the number of symlinks.
public long getSymlinkCount() {
return contents.get(Content.SYMLINK);
}
// Get the total of file length in bytes.
public long getLength() {
return contents.get(Content.LENGTH);
}
// Get the total of storage space usage in bytes including replication.
public long getStoragespace() {
return contents.get(Content.DISKSPACE);
}
// Get the number of snapshots
public long getSnapshotCount() {
return contents.get(Content.SNAPSHOT);
}
// Get the number of snapshottable directories.
public long getSnapshotableDirectoryCount() {
return contents.get(Content.SNAPSHOTTABLE_DIRECTORY);
}
public long[] getTypeSpaces() {
return types.asArray();
}
public long getTypeSpace(StorageType t) {
return types.get(t);
}
public void addContent(Content c, long val) {
contents.add(c, val);
}
public void addContents(ContentCounts that) {
contents.add(that.contents);
types.add(that.types);
}
public void addTypeSpace(StorageType t, long val) {
types.add(t, val);
}
public void addTypeSpaces(EnumCounters<StorageType> that) {
this.types.add(that);
}
}
| 4,077 | 26.741497 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.util.LongBitFormat;
import org.apache.hadoop.util.LightWeightGSet.LinkedElement;
import com.google.common.base.Preconditions;
/**
* {@link INode} with additional fields including id, name, permission,
* access time and modification time.
*/
@InterfaceAudience.Private
public abstract class INodeWithAdditionalFields extends INode
implements LinkedElement {
static enum PermissionStatusFormat {
MODE(null, 16),
GROUP(MODE.BITS, 25),
USER(GROUP.BITS, 23);
final LongBitFormat BITS;
private PermissionStatusFormat(LongBitFormat previous, int length) {
BITS = new LongBitFormat(name(), previous, length, 0);
}
static String getUser(long permission) {
final int n = (int)USER.BITS.retrieve(permission);
return SerialNumberManager.INSTANCE.getUser(n);
}
static String getGroup(long permission) {
final int n = (int)GROUP.BITS.retrieve(permission);
return SerialNumberManager.INSTANCE.getGroup(n);
}
static short getMode(long permission) {
return (short)MODE.BITS.retrieve(permission);
}
/** Encode the {@link PermissionStatus} to a long. */
static long toLong(PermissionStatus ps) {
long permission = 0L;
final int user = SerialNumberManager.INSTANCE.getUserSerialNumber(
ps.getUserName());
permission = USER.BITS.combine(user, permission);
final int group = SerialNumberManager.INSTANCE.getGroupSerialNumber(
ps.getGroupName());
permission = GROUP.BITS.combine(group, permission);
final int mode = ps.getPermission().toShort();
permission = MODE.BITS.combine(mode, permission);
return permission;
}
}
/** The inode id. */
final private long id;
/**
* The inode name is in java UTF8 encoding;
* The name in HdfsFileStatus should keep the same encoding as this.
* if this encoding is changed, implicitly getFileInfo and listStatus in
* clientProtocol are changed; The decoding at the client
* side should change accordingly.
*/
private byte[] name = null;
/**
* Permission encoded using {@link PermissionStatusFormat}.
* Codes other than {@link #clonePermissionStatus(INodeWithAdditionalFields)}
* and {@link #updatePermissionStatus(PermissionStatusFormat, long)}
* should not modify it.
*/
private long permission = 0L;
/** The last modification time*/
private long modificationTime = 0L;
/** The last access time*/
private long accessTime = 0L;
/** For implementing {@link LinkedElement}. */
private LinkedElement next = null;
/** An array {@link Feature}s. */
private static final Feature[] EMPTY_FEATURE = new Feature[0];
protected Feature[] features = EMPTY_FEATURE;
private INodeWithAdditionalFields(INode parent, long id, byte[] name,
long permission, long modificationTime, long accessTime) {
super(parent);
this.id = id;
this.name = name;
this.permission = permission;
this.modificationTime = modificationTime;
this.accessTime = accessTime;
}
INodeWithAdditionalFields(long id, byte[] name, PermissionStatus permissions,
long modificationTime, long accessTime) {
this(null, id, name, PermissionStatusFormat.toLong(permissions),
modificationTime, accessTime);
}
/** @param other Other node to be copied */
INodeWithAdditionalFields(INodeWithAdditionalFields other) {
this(other.getParentReference() != null ? other.getParentReference()
: other.getParent(), other.getId(), other.getLocalNameBytes(),
other.permission, other.modificationTime, other.accessTime);
}
@Override
public void setNext(LinkedElement next) {
this.next = next;
}
@Override
public LinkedElement getNext() {
return next;
}
/** Get inode id */
@Override
public final long getId() {
return this.id;
}
@Override
public final byte[] getLocalNameBytes() {
return name;
}
@Override
public final void setLocalName(byte[] name) {
this.name = name;
}
/** Clone the {@link PermissionStatus}. */
final void clonePermissionStatus(INodeWithAdditionalFields that) {
this.permission = that.permission;
}
@Override
final PermissionStatus getPermissionStatus(int snapshotId) {
return new PermissionStatus(getUserName(snapshotId), getGroupName(snapshotId),
getFsPermission(snapshotId));
}
private final void updatePermissionStatus(PermissionStatusFormat f, long n) {
this.permission = f.BITS.combine(n, permission);
}
@Override
final String getUserName(int snapshotId) {
if (snapshotId != Snapshot.CURRENT_STATE_ID) {
return getSnapshotINode(snapshotId).getUserName();
}
return PermissionStatusFormat.getUser(permission);
}
@Override
final void setUser(String user) {
int n = SerialNumberManager.INSTANCE.getUserSerialNumber(user);
updatePermissionStatus(PermissionStatusFormat.USER, n);
}
@Override
final String getGroupName(int snapshotId) {
if (snapshotId != Snapshot.CURRENT_STATE_ID) {
return getSnapshotINode(snapshotId).getGroupName();
}
return PermissionStatusFormat.getGroup(permission);
}
@Override
final void setGroup(String group) {
int n = SerialNumberManager.INSTANCE.getGroupSerialNumber(group);
updatePermissionStatus(PermissionStatusFormat.GROUP, n);
}
@Override
final FsPermission getFsPermission(int snapshotId) {
if (snapshotId != Snapshot.CURRENT_STATE_ID) {
return getSnapshotINode(snapshotId).getFsPermission();
}
return new FsPermission(getFsPermissionShort());
}
@Override
public final short getFsPermissionShort() {
return PermissionStatusFormat.getMode(permission);
}
@Override
void setPermission(FsPermission permission) {
final short mode = permission.toShort();
updatePermissionStatus(PermissionStatusFormat.MODE, mode);
}
@Override
public long getPermissionLong() {
return permission;
}
@Override
public final AclFeature getAclFeature(int snapshotId) {
if (snapshotId != Snapshot.CURRENT_STATE_ID) {
return getSnapshotINode(snapshotId).getAclFeature();
}
return getFeature(AclFeature.class);
}
@Override
final long getModificationTime(int snapshotId) {
if (snapshotId != Snapshot.CURRENT_STATE_ID) {
return getSnapshotINode(snapshotId).getModificationTime();
}
return this.modificationTime;
}
/** Update modification time if it is larger than the current value. */
@Override
public final INode updateModificationTime(long mtime, int latestSnapshotId) {
Preconditions.checkState(isDirectory());
if (mtime <= modificationTime) {
return this;
}
return setModificationTime(mtime, latestSnapshotId);
}
final void cloneModificationTime(INodeWithAdditionalFields that) {
this.modificationTime = that.modificationTime;
}
@Override
public final void setModificationTime(long modificationTime) {
this.modificationTime = modificationTime;
}
@Override
final long getAccessTime(int snapshotId) {
if (snapshotId != Snapshot.CURRENT_STATE_ID) {
return getSnapshotINode(snapshotId).getAccessTime();
}
return accessTime;
}
/**
* Set last access time of inode.
*/
@Override
public final void setAccessTime(long accessTime) {
this.accessTime = accessTime;
}
protected void addFeature(Feature f) {
int size = features.length;
Feature[] arr = new Feature[size + 1];
if (size != 0) {
System.arraycopy(features, 0, arr, 0, size);
}
arr[size] = f;
features = arr;
}
protected void removeFeature(Feature f) {
int size = features.length;
Preconditions.checkState(size > 0, "Feature "
+ f.getClass().getSimpleName() + " not found.");
if (size == 1) {
Preconditions.checkState(features[0] == f, "Feature "
+ f.getClass().getSimpleName() + " not found.");
features = EMPTY_FEATURE;
return;
}
Feature[] arr = new Feature[size - 1];
int j = 0;
boolean overflow = false;
for (Feature f1 : features) {
if (f1 != f) {
if (j == size - 1) {
overflow = true;
break;
} else {
arr[j++] = f1;
}
}
}
Preconditions.checkState(!overflow && j == size - 1, "Feature "
+ f.getClass().getSimpleName() + " not found.");
features = arr;
}
protected <T extends Feature> T getFeature(Class<? extends Feature> clazz) {
Preconditions.checkArgument(clazz != null);
for (Feature f : features) {
if (clazz.isAssignableFrom(f.getClass())) {
@SuppressWarnings("unchecked")
T ret = (T) f;
return ret;
}
}
return null;
}
public void removeAclFeature() {
AclFeature f = getAclFeature();
Preconditions.checkNotNull(f);
removeFeature(f);
AclStorage.removeAclFeature(f);
}
public void addAclFeature(AclFeature f) {
AclFeature f1 = getAclFeature();
if (f1 != null)
throw new IllegalStateException("Duplicated ACLFeature");
addFeature(AclStorage.addAclFeature(f));
}
@Override
XAttrFeature getXAttrFeature(int snapshotId) {
if (snapshotId != Snapshot.CURRENT_STATE_ID) {
return getSnapshotINode(snapshotId).getXAttrFeature();
}
return getFeature(XAttrFeature.class);
}
@Override
public void removeXAttrFeature() {
XAttrFeature f = getXAttrFeature();
Preconditions.checkNotNull(f);
removeFeature(f);
}
@Override
public void addXAttrFeature(XAttrFeature f) {
XAttrFeature f1 = getXAttrFeature();
Preconditions.checkState(f1 == null, "Duplicated XAttrFeature");
addFeature(f);
}
public final Feature[] getFeatures() {
return features;
}
}
| 10,892 | 28.440541 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditsDoubleBuffer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException;
import java.io.OutputStream;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.Writer;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.IOUtils;
import com.google.common.base.Preconditions;
/**
* A double-buffer for edits. New edits are written into the first buffer
* while the second is available to be flushed. Each time the double-buffer
* is flushed, the two internal buffers are swapped. This allows edits
* to progress concurrently to flushes without allocating new buffers each
* time.
*/
@InterfaceAudience.Private
public class EditsDoubleBuffer {
private TxnBuffer bufCurrent; // current buffer for writing
private TxnBuffer bufReady; // buffer ready for flushing
private final int initBufferSize;
public EditsDoubleBuffer(int defaultBufferSize) {
initBufferSize = defaultBufferSize;
bufCurrent = new TxnBuffer(initBufferSize);
bufReady = new TxnBuffer(initBufferSize);
}
public void writeOp(FSEditLogOp op) throws IOException {
bufCurrent.writeOp(op);
}
public void writeRaw(byte[] bytes, int offset, int length) throws IOException {
bufCurrent.write(bytes, offset, length);
}
public void close() throws IOException {
Preconditions.checkNotNull(bufCurrent);
Preconditions.checkNotNull(bufReady);
int bufSize = bufCurrent.size();
if (bufSize != 0) {
throw new IOException("FSEditStream has " + bufSize
+ " bytes still to be flushed and cannot be closed.");
}
IOUtils.cleanup(null, bufCurrent, bufReady);
bufCurrent = bufReady = null;
}
public void setReadyToFlush() {
assert isFlushed() : "previous data not flushed yet";
TxnBuffer tmp = bufReady;
bufReady = bufCurrent;
bufCurrent = tmp;
}
/**
* Writes the content of the "ready" buffer to the given output stream,
* and resets it. Does not swap any buffers.
*/
public void flushTo(OutputStream out) throws IOException {
bufReady.writeTo(out); // write data to file
bufReady.reset(); // erase all data in the buffer
}
public boolean shouldForceSync() {
return bufCurrent.size() >= initBufferSize;
}
DataOutputBuffer getReadyBuf() {
return bufReady;
}
DataOutputBuffer getCurrentBuf() {
return bufCurrent;
}
public boolean isFlushed() {
return bufReady.size() == 0;
}
public int countBufferedBytes() {
return bufReady.size() + bufCurrent.size();
}
/**
* @return the transaction ID of the first transaction ready to be flushed
*/
public long getFirstReadyTxId() {
assert bufReady.firstTxId > 0;
return bufReady.firstTxId;
}
/**
* @return the number of transactions that are ready to be flushed
*/
public int countReadyTxns() {
return bufReady.numTxns;
}
/**
* @return the number of bytes that are ready to be flushed
*/
public int countReadyBytes() {
return bufReady.size();
}
private static class TxnBuffer extends DataOutputBuffer {
long firstTxId;
int numTxns;
private final Writer writer;
public TxnBuffer(int initBufferSize) {
super(initBufferSize);
writer = new FSEditLogOp.Writer(this);
reset();
}
public void writeOp(FSEditLogOp op) throws IOException {
if (firstTxId == HdfsServerConstants.INVALID_TXID) {
firstTxId = op.txid;
} else {
assert op.txid > firstTxId;
}
writer.writeOp(op);
numTxns++;
}
@Override
public DataOutputBuffer reset() {
super.reset();
firstTxId = HdfsServerConstants.INVALID_TXID;
numTxns = 0;
return this;
}
}
}
| 4,655 | 27.564417 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachedBlock.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.util.Arrays;
import java.util.LinkedList;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList.Type;
import org.apache.hadoop.util.IntrusiveCollection;
import org.apache.hadoop.util.LightWeightGSet;
import org.apache.hadoop.util.IntrusiveCollection.Element;
import org.apache.hadoop.util.LightWeightGSet.LinkedElement;
/**
* Represents a cached block.
*/
@InterfaceAudience.LimitedPrivate({"HDFS"})
public final class CachedBlock implements Element,
LightWeightGSet.LinkedElement {
private static final Object[] EMPTY_ARRAY = new Object[0];
/**
* Block id.
*/
private final long blockId;
/**
* Used to implement #{LightWeightGSet.LinkedElement}
*/
private LinkedElement nextElement;
/**
* Bit 15: Mark
* Bit 0-14: cache replication factor.
*/
private short replicationAndMark;
/**
* Used to implement the CachedBlocksList.
*
* Since this CachedBlock can be in multiple CachedBlocksList objects,
* we need to be able to store multiple 'prev' and 'next' pointers.
* The triplets array does this.
*
* Each triplet contains a CachedBlockList object followed by a
* prev pointer, followed by a next pointer.
*/
private Object[] triplets;
public CachedBlock(long blockId, short replication, boolean mark) {
this.blockId = blockId;
this.triplets = EMPTY_ARRAY;
setReplicationAndMark(replication, mark);
}
public long getBlockId() {
return blockId;
}
@Override
public int hashCode() {
return (int)(blockId^(blockId>>>32));
}
@Override
public boolean equals(Object o) {
if (o == null) { return false; }
if (o == this) { return true; }
if (o.getClass() != this.getClass()) {
return false;
}
CachedBlock other = (CachedBlock)o;
return other.blockId == blockId;
}
public void setReplicationAndMark(short replication, boolean mark) {
assert replication >= 0;
replicationAndMark = (short)((replication << 1) | (mark ? 0x1 : 0x0));
}
public boolean getMark() {
return ((replicationAndMark & 0x1) != 0);
}
public short getReplication() {
return (short) (replicationAndMark >>> 1);
}
/**
* Return true if this CachedBlock is present on the given list.
*/
public boolean isPresent(CachedBlocksList cachedBlocksList) {
for (int i = 0; i < triplets.length; i += 3) {
CachedBlocksList list = (CachedBlocksList)triplets[i];
if (list == cachedBlocksList) {
return true;
}
}
return false;
}
/**
* Get a list of the datanodes which this block is cached,
* planned to be cached, or planned to be uncached on.
*
* @param type If null, this parameter is ignored.
* If it is non-null, we match only datanodes which
* have it on this list.
* See {@link DatanodeDescriptor.CachedBlocksList.Type}
* for a description of all the lists.
*
* @return The list of datanodes. Modifying this list does not
* alter the state of the CachedBlock.
*/
public List<DatanodeDescriptor> getDatanodes(Type type) {
List<DatanodeDescriptor> nodes = new LinkedList<DatanodeDescriptor>();
for (int i = 0; i < triplets.length; i += 3) {
CachedBlocksList list = (CachedBlocksList)triplets[i];
if ((type == null) || (list.getType() == type)) {
nodes.add(list.getDatanode());
}
}
return nodes;
}
@Override
public void insertInternal(IntrusiveCollection<? extends Element> list, Element prev,
Element next) {
for (int i = 0; i < triplets.length; i += 3) {
if (triplets[i] == list) {
throw new RuntimeException("Trying to re-insert an element that " +
"is already in the list.");
}
}
Object newTriplets[] = Arrays.copyOf(triplets, triplets.length + 3);
newTriplets[triplets.length] = list;
newTriplets[triplets.length + 1] = prev;
newTriplets[triplets.length + 2] = next;
triplets = newTriplets;
}
@Override
public void setPrev(IntrusiveCollection<? extends Element> list, Element prev) {
for (int i = 0; i < triplets.length; i += 3) {
if (triplets[i] == list) {
triplets[i + 1] = prev;
return;
}
}
throw new RuntimeException("Called setPrev on an element that wasn't " +
"in the list.");
}
@Override
public void setNext(IntrusiveCollection<? extends Element> list, Element next) {
for (int i = 0; i < triplets.length; i += 3) {
if (triplets[i] == list) {
triplets[i + 2] = next;
return;
}
}
throw new RuntimeException("Called setNext on an element that wasn't " +
"in the list.");
}
@Override
public void removeInternal(IntrusiveCollection<? extends Element> list) {
for (int i = 0; i < triplets.length; i += 3) {
if (triplets[i] == list) {
Object[] newTriplets = new Object[triplets.length - 3];
System.arraycopy(triplets, 0, newTriplets, 0, i);
System.arraycopy(triplets, i + 3, newTriplets, i,
triplets.length - (i + 3));
triplets = newTriplets;
return;
}
}
throw new RuntimeException("Called remove on an element that wasn't " +
"in the list.");
}
@Override
public Element getPrev(IntrusiveCollection<? extends Element> list) {
for (int i = 0; i < triplets.length; i += 3) {
if (triplets[i] == list) {
return (Element)triplets[i + 1];
}
}
throw new RuntimeException("Called getPrev on an element that wasn't " +
"in the list.");
}
@Override
public Element getNext(IntrusiveCollection<? extends Element> list) {
for (int i = 0; i < triplets.length; i += 3) {
if (triplets[i] == list) {
return (Element)triplets[i + 2];
}
}
throw new RuntimeException("Called getNext on an element that wasn't " +
"in the list.");
}
@Override
public boolean isInList(IntrusiveCollection<? extends Element> list) {
for (int i = 0; i < triplets.length; i += 3) {
if (triplets[i] == list) {
return true;
}
}
return false;
}
@Override
public String toString() {
return new StringBuilder().append("{").
append("blockId=").append(blockId).append(", ").
append("replication=").append(getReplication()).append(", ").
append("mark=").append(getMark()).append("}").
toString();
}
@Override // LightWeightGSet.LinkedElement
public void setNext(LinkedElement next) {
this.nextElement = next;
}
@Override // LightWeightGSet.LinkedElement
public LinkedElement getNext() {
return nextElement;
}
}
| 7,875 | 30.253968 | 94 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseExpiredException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* The lease that was being used to create this file has expired.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class LeaseExpiredException extends IOException {
private static final long serialVersionUID = 1L;
public LeaseExpiredException(String msg) {
super(msg);
}
}
| 1,312 | 33.552632 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.HttpURLConnection;
import java.net.URISyntaxException;
import java.net.URL;
import java.security.DigestInputStream;
import java.security.MessageDigest;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.common.StorageErrorReporter;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
import org.apache.hadoop.hdfs.util.Canceler;
import org.apache.hadoop.hdfs.util.DataTransferThrottler;
import org.apache.hadoop.hdfs.web.URLConnectionFactory;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.MD5Hash;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.util.Time;
import org.apache.http.client.utils.URIBuilder;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.Lists;
import org.mortbay.jetty.EofException;
/**
* This class provides fetching a specified file from the NameNode.
*/
@InterfaceAudience.Private
public class TransferFsImage {
public final static String CONTENT_LENGTH = "Content-Length";
public final static String FILE_LENGTH = "File-Length";
public final static String MD5_HEADER = "X-MD5-Digest";
private final static String CONTENT_TYPE = "Content-Type";
private final static String CONTENT_TRANSFER_ENCODING = "Content-Transfer-Encoding";
private final static int IO_FILE_BUFFER_SIZE;
@VisibleForTesting
static int timeout = 0;
private static final URLConnectionFactory connectionFactory;
private static final boolean isSpnegoEnabled;
static {
Configuration conf = new Configuration();
connectionFactory = URLConnectionFactory
.newDefaultURLConnectionFactory(conf);
isSpnegoEnabled = UserGroupInformation.isSecurityEnabled();
IO_FILE_BUFFER_SIZE = DFSUtil.getIoFileBufferSize(conf);
}
private static final Log LOG = LogFactory.getLog(TransferFsImage.class);
public static void downloadMostRecentImageToDirectory(URL infoServer,
File dir) throws IOException {
String fileId = ImageServlet.getParamStringForMostRecentImage();
getFileClient(infoServer, fileId, Lists.newArrayList(dir),
null, false);
}
public static MD5Hash downloadImageToStorage(URL fsName, long imageTxId,
Storage dstStorage, boolean needDigest) throws IOException {
String fileid = ImageServlet.getParamStringForImage(null,
imageTxId, dstStorage);
String fileName = NNStorage.getCheckpointImageFileName(imageTxId);
List<File> dstFiles = dstStorage.getFiles(
NameNodeDirType.IMAGE, fileName);
if (dstFiles.isEmpty()) {
throw new IOException("No targets in destination storage!");
}
MD5Hash hash = getFileClient(fsName, fileid, dstFiles, dstStorage, needDigest);
LOG.info("Downloaded file " + dstFiles.get(0).getName() + " size " +
dstFiles.get(0).length() + " bytes.");
return hash;
}
static MD5Hash handleUploadImageRequest(HttpServletRequest request,
long imageTxId, Storage dstStorage, InputStream stream,
long advertisedSize, DataTransferThrottler throttler) throws IOException {
String fileName = NNStorage.getCheckpointImageFileName(imageTxId);
List<File> dstFiles = dstStorage.getFiles(NameNodeDirType.IMAGE, fileName);
if (dstFiles.isEmpty()) {
throw new IOException("No targets in destination storage!");
}
MD5Hash advertisedDigest = parseMD5Header(request);
MD5Hash hash = receiveFile(fileName, dstFiles, dstStorage, true,
advertisedSize, advertisedDigest, fileName, stream, throttler);
LOG.info("Downloaded file " + dstFiles.get(0).getName() + " size "
+ dstFiles.get(0).length() + " bytes.");
return hash;
}
static void downloadEditsToStorage(URL fsName, RemoteEditLog log,
NNStorage dstStorage) throws IOException {
assert log.getStartTxId() > 0 && log.getEndTxId() > 0 :
"bad log: " + log;
String fileid = ImageServlet.getParamStringForLog(
log, dstStorage);
String finalFileName = NNStorage.getFinalizedEditsFileName(
log.getStartTxId(), log.getEndTxId());
List<File> finalFiles = dstStorage.getFiles(NameNodeDirType.EDITS,
finalFileName);
assert !finalFiles.isEmpty() : "No checkpoint targets.";
for (File f : finalFiles) {
if (f.exists() && FileUtil.canRead(f)) {
LOG.info("Skipping download of remote edit log " +
log + " since it already is stored locally at " + f);
return;
} else if (LOG.isDebugEnabled()) {
LOG.debug("Dest file: " + f);
}
}
final long milliTime = Time.monotonicNow();
String tmpFileName = NNStorage.getTemporaryEditsFileName(
log.getStartTxId(), log.getEndTxId(), milliTime);
List<File> tmpFiles = dstStorage.getFiles(NameNodeDirType.EDITS,
tmpFileName);
getFileClient(fsName, fileid, tmpFiles, dstStorage, false);
LOG.info("Downloaded file " + tmpFiles.get(0).getName() + " size " +
finalFiles.get(0).length() + " bytes.");
CheckpointFaultInjector.getInstance().beforeEditsRename();
for (StorageDirectory sd : dstStorage.dirIterable(NameNodeDirType.EDITS)) {
File tmpFile = NNStorage.getTemporaryEditsFile(sd,
log.getStartTxId(), log.getEndTxId(), milliTime);
File finalizedFile = NNStorage.getFinalizedEditsFile(sd,
log.getStartTxId(), log.getEndTxId());
if (LOG.isDebugEnabled()) {
LOG.debug("Renaming " + tmpFile + " to " + finalizedFile);
}
boolean success = tmpFile.renameTo(finalizedFile);
if (!success) {
LOG.warn("Unable to rename edits file from " + tmpFile
+ " to " + finalizedFile);
}
}
}
/**
* Requests that the NameNode download an image from this node.
*
* @param fsName the http address for the remote NN
* @param conf Configuration
* @param storage the storage directory to transfer the image from
* @param nnf the NameNodeFile type of the image
* @param txid the transaction ID of the image to be uploaded
* @throws IOException if there is an I/O error
*/
public static void uploadImageFromStorage(URL fsName, Configuration conf,
NNStorage storage, NameNodeFile nnf, long txid) throws IOException {
uploadImageFromStorage(fsName, conf, storage, nnf, txid, null);
}
/**
* Requests that the NameNode download an image from this node. Allows for
* optional external cancelation.
*
* @param fsName the http address for the remote NN
* @param conf Configuration
* @param storage the storage directory to transfer the image from
* @param nnf the NameNodeFile type of the image
* @param txid the transaction ID of the image to be uploaded
* @param canceler optional canceler to check for abort of upload
* @throws IOException if there is an I/O error or cancellation
*/
public static void uploadImageFromStorage(URL fsName, Configuration conf,
NNStorage storage, NameNodeFile nnf, long txid, Canceler canceler)
throws IOException {
URL url = new URL(fsName, ImageServlet.PATH_SPEC);
long startTime = Time.monotonicNow();
try {
uploadImage(url, conf, storage, nnf, txid, canceler);
} catch (HttpPutFailedException e) {
if (e.getResponseCode() == HttpServletResponse.SC_CONFLICT) {
// this is OK - this means that a previous attempt to upload
// this checkpoint succeeded even though we thought it failed.
LOG.info("Image upload with txid " + txid +
" conflicted with a previous image upload to the " +
"same NameNode. Continuing...", e);
return;
} else {
throw e;
}
}
double xferSec = Math.max(
((float) (Time.monotonicNow() - startTime)) / 1000.0, 0.001);
LOG.info("Uploaded image with txid " + txid + " to namenode at " + fsName
+ " in " + xferSec + " seconds");
}
/*
* Uploads the imagefile using HTTP PUT method
*/
private static void uploadImage(URL url, Configuration conf,
NNStorage storage, NameNodeFile nnf, long txId, Canceler canceler)
throws IOException {
File imageFile = storage.findImageFile(nnf, txId);
if (imageFile == null) {
throw new IOException("Could not find image with txid " + txId);
}
HttpURLConnection connection = null;
try {
URIBuilder uriBuilder = new URIBuilder(url.toURI());
// write all params for image upload request as query itself.
// Request body contains the image to be uploaded.
Map<String, String> params = ImageServlet.getParamsForPutImage(storage,
txId, imageFile.length(), nnf);
for (Entry<String, String> entry : params.entrySet()) {
uriBuilder.addParameter(entry.getKey(), entry.getValue());
}
URL urlWithParams = uriBuilder.build().toURL();
connection = (HttpURLConnection) connectionFactory.openConnection(
urlWithParams, UserGroupInformation.isSecurityEnabled());
// Set the request to PUT
connection.setRequestMethod("PUT");
connection.setDoOutput(true);
int chunkSize = conf.getInt(
DFSConfigKeys.DFS_IMAGE_TRANSFER_CHUNKSIZE_KEY,
DFSConfigKeys.DFS_IMAGE_TRANSFER_CHUNKSIZE_DEFAULT);
if (imageFile.length() > chunkSize) {
// using chunked streaming mode to support upload of 2GB+ files and to
// avoid internal buffering.
// this mode should be used only if more than chunkSize data is present
// to upload. otherwise upload may not happen sometimes.
connection.setChunkedStreamingMode(chunkSize);
}
setTimeout(connection);
// set headers for verification
ImageServlet.setVerificationHeadersForPut(connection, imageFile);
// Write the file to output stream.
writeFileToPutRequest(conf, connection, imageFile, canceler);
int responseCode = connection.getResponseCode();
if (responseCode != HttpURLConnection.HTTP_OK) {
throw new HttpPutFailedException(String.format(
"Image uploading failed, status: %d, url: %s, message: %s",
responseCode, urlWithParams, connection.getResponseMessage()),
responseCode);
}
} catch (AuthenticationException e) {
throw new IOException(e);
} catch (URISyntaxException e) {
throw new IOException(e);
} finally {
if (connection != null) {
connection.disconnect();
}
}
}
private static void writeFileToPutRequest(Configuration conf,
HttpURLConnection connection, File imageFile, Canceler canceler)
throws FileNotFoundException, IOException {
connection.setRequestProperty(CONTENT_TYPE, "application/octet-stream");
connection.setRequestProperty(CONTENT_TRANSFER_ENCODING, "binary");
OutputStream output = connection.getOutputStream();
FileInputStream input = new FileInputStream(imageFile);
try {
copyFileToStream(output, imageFile, input,
ImageServlet.getThrottler(conf), canceler);
} finally {
IOUtils.closeStream(input);
IOUtils.closeStream(output);
}
}
/**
* A server-side method to respond to a getfile http request
* Copies the contents of the local file into the output stream.
*/
public static void copyFileToStream(OutputStream out, File localfile,
FileInputStream infile, DataTransferThrottler throttler)
throws IOException {
copyFileToStream(out, localfile, infile, throttler, null);
}
private static void copyFileToStream(OutputStream out, File localfile,
FileInputStream infile, DataTransferThrottler throttler,
Canceler canceler) throws IOException {
byte buf[] = new byte[IO_FILE_BUFFER_SIZE];
try {
CheckpointFaultInjector.getInstance()
.aboutToSendFile(localfile);
if (CheckpointFaultInjector.getInstance().
shouldSendShortFile(localfile)) {
// Test sending image shorter than localfile
long len = localfile.length();
buf = new byte[(int)Math.min(len/2, IO_FILE_BUFFER_SIZE)];
// This will read at most half of the image
// and the rest of the image will be sent over the wire
infile.read(buf);
}
int num = 1;
while (num > 0) {
if (canceler != null && canceler.isCancelled()) {
throw new SaveNamespaceCancelledException(
canceler.getCancellationReason());
}
num = infile.read(buf);
if (num <= 0) {
break;
}
if (CheckpointFaultInjector.getInstance()
.shouldCorruptAByte(localfile)) {
// Simulate a corrupted byte on the wire
LOG.warn("SIMULATING A CORRUPT BYTE IN IMAGE TRANSFER!");
buf[0]++;
}
out.write(buf, 0, num);
if (throttler != null) {
throttler.throttle(num, canceler);
}
}
} catch (EofException e) {
LOG.info("Connection closed by client");
out = null; // so we don't close in the finally
} finally {
if (out != null) {
out.close();
}
}
}
/**
* Client-side Method to fetch file from a server
* Copies the response from the URL to a list of local files.
* @param dstStorage if an error occurs writing to one of the files,
* this storage object will be notified.
* @Return a digest of the received file if getChecksum is true
*/
static MD5Hash getFileClient(URL infoServer,
String queryString, List<File> localPaths,
Storage dstStorage, boolean getChecksum) throws IOException {
URL url = new URL(infoServer, ImageServlet.PATH_SPEC + "?" + queryString);
LOG.info("Opening connection to " + url);
return doGetUrl(url, localPaths, dstStorage, getChecksum);
}
public static MD5Hash doGetUrl(URL url, List<File> localPaths,
Storage dstStorage, boolean getChecksum) throws IOException {
HttpURLConnection connection;
try {
connection = (HttpURLConnection)
connectionFactory.openConnection(url, isSpnegoEnabled);
} catch (AuthenticationException e) {
throw new IOException(e);
}
setTimeout(connection);
if (connection.getResponseCode() != HttpURLConnection.HTTP_OK) {
throw new HttpGetFailedException(
"Image transfer servlet at " + url +
" failed with status code " + connection.getResponseCode() +
"\nResponse message:\n" + connection.getResponseMessage(),
connection);
}
long advertisedSize;
String contentLength = connection.getHeaderField(CONTENT_LENGTH);
if (contentLength != null) {
advertisedSize = Long.parseLong(contentLength);
} else {
throw new IOException(CONTENT_LENGTH + " header is not provided " +
"by the namenode when trying to fetch " + url);
}
MD5Hash advertisedDigest = parseMD5Header(connection);
String fsImageName = connection
.getHeaderField(ImageServlet.HADOOP_IMAGE_EDITS_HEADER);
InputStream stream = connection.getInputStream();
return receiveFile(url.toExternalForm(), localPaths, dstStorage,
getChecksum, advertisedSize, advertisedDigest, fsImageName, stream,
null);
}
private static void setTimeout(HttpURLConnection connection) {
if (timeout <= 0) {
Configuration conf = new HdfsConfiguration();
timeout = conf.getInt(DFSConfigKeys.DFS_IMAGE_TRANSFER_TIMEOUT_KEY,
DFSConfigKeys.DFS_IMAGE_TRANSFER_TIMEOUT_DEFAULT);
LOG.info("Image Transfer timeout configured to " + timeout
+ " milliseconds");
}
if (timeout > 0) {
connection.setConnectTimeout(timeout);
connection.setReadTimeout(timeout);
}
}
private static MD5Hash receiveFile(String url, List<File> localPaths,
Storage dstStorage, boolean getChecksum, long advertisedSize,
MD5Hash advertisedDigest, String fsImageName, InputStream stream,
DataTransferThrottler throttler) throws IOException {
long startTime = Time.monotonicNow();
if (localPaths != null) {
// If the local paths refer to directories, use the server-provided header
// as the filename within that directory
List<File> newLocalPaths = new ArrayList<File>();
for (File localPath : localPaths) {
if (localPath.isDirectory()) {
if (fsImageName == null) {
throw new IOException("No filename header provided by server");
}
newLocalPaths.add(new File(localPath, fsImageName));
} else {
newLocalPaths.add(localPath);
}
}
localPaths = newLocalPaths;
}
long received = 0;
MessageDigest digester = null;
if (getChecksum) {
digester = MD5Hash.getDigester();
stream = new DigestInputStream(stream, digester);
}
boolean finishedReceiving = false;
List<FileOutputStream> outputStreams = Lists.newArrayList();
try {
if (localPaths != null) {
for (File f : localPaths) {
try {
if (f.exists()) {
LOG.warn("Overwriting existing file " + f
+ " with file downloaded from " + url);
}
outputStreams.add(new FileOutputStream(f));
} catch (IOException ioe) {
LOG.warn("Unable to download file " + f, ioe);
// This will be null if we're downloading the fsimage to a file
// outside of an NNStorage directory.
if (dstStorage != null &&
(dstStorage instanceof StorageErrorReporter)) {
((StorageErrorReporter)dstStorage).reportErrorOnFile(f);
}
}
}
if (outputStreams.isEmpty()) {
throw new IOException(
"Unable to download to any storage directory");
}
}
int num = 1;
byte[] buf = new byte[IO_FILE_BUFFER_SIZE];
while (num > 0) {
num = stream.read(buf);
if (num > 0) {
received += num;
for (FileOutputStream fos : outputStreams) {
fos.write(buf, 0, num);
}
if (throttler != null) {
throttler.throttle(num);
}
}
}
finishedReceiving = true;
} finally {
stream.close();
for (FileOutputStream fos : outputStreams) {
fos.getChannel().force(true);
fos.close();
}
// Something went wrong and did not finish reading.
// Remove the temporary files.
if (!finishedReceiving) {
deleteTmpFiles(localPaths);
}
if (finishedReceiving && received != advertisedSize) {
// only throw this exception if we think we read all of it on our end
// -- otherwise a client-side IOException would be masked by this
// exception that makes it look like a server-side problem!
deleteTmpFiles(localPaths);
throw new IOException("File " + url + " received length " + received +
" is not of the advertised size " +
advertisedSize);
}
}
double xferSec = Math.max(
((float)(Time.monotonicNow() - startTime)) / 1000.0, 0.001);
long xferKb = received / 1024;
LOG.info(String.format("Transfer took %.2fs at %.2f KB/s",
xferSec, xferKb / xferSec));
if (digester != null) {
MD5Hash computedDigest = new MD5Hash(digester.digest());
if (advertisedDigest != null &&
!computedDigest.equals(advertisedDigest)) {
deleteTmpFiles(localPaths);
throw new IOException("File " + url + " computed digest " +
computedDigest + " does not match advertised digest " +
advertisedDigest);
}
return computedDigest;
} else {
return null;
}
}
private static void deleteTmpFiles(List<File> files) {
if (files == null) {
return;
}
LOG.info("Deleting temporary files: " + files);
for (File file : files) {
if (!file.delete()) {
LOG.warn("Deleting " + file + " has failed");
}
}
}
private static MD5Hash parseMD5Header(HttpURLConnection connection) {
String header = connection.getHeaderField(MD5_HEADER);
return (header != null) ? new MD5Hash(header) : null;
}
private static MD5Hash parseMD5Header(HttpServletRequest request) {
String header = request.getHeader(MD5_HEADER);
return (header != null) ? new MD5Hash(header) : null;
}
public static class HttpGetFailedException extends IOException {
private static final long serialVersionUID = 1L;
private final int responseCode;
HttpGetFailedException(String msg, HttpURLConnection connection) throws IOException {
super(msg);
this.responseCode = connection.getResponseCode();
}
public int getResponseCode() {
return responseCode;
}
}
public static class HttpPutFailedException extends IOException {
private static final long serialVersionUID = 1L;
private final int responseCode;
HttpPutFailedException(String msg, int responseCode) throws IOException {
super(msg);
this.responseCode = responseCode;
}
public int getResponseCode() {
return responseCode;
}
}
}
| 23,216 | 36.087859 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.util.Time.now;
import java.io.Closeable;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.URI;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HAUtil;
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeStartupOption;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.Storage.FormatConfirmable;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.common.Storage.StorageState;
import org.apache.hadoop.hdfs.server.common.Util;
import org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress;
import org.apache.hadoop.hdfs.server.protocol.CheckpointCommand;
import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.hdfs.util.Canceler;
import org.apache.hadoop.hdfs.util.EnumCounters;
import org.apache.hadoop.hdfs.util.MD5FileUtils;
import org.apache.hadoop.io.MD5Hash;
import org.apache.hadoop.util.Time;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
/**
* FSImage handles checkpointing and logging of the namespace edits.
*
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class FSImage implements Closeable {
public static final Log LOG = LogFactory.getLog(FSImage.class.getName());
protected FSEditLog editLog = null;
private boolean isUpgradeFinalized = false;
protected NNStorage storage;
/**
* The last transaction ID that was either loaded from an image
* or loaded by loading edits files.
*/
protected long lastAppliedTxId = 0;
final private Configuration conf;
protected NNStorageRetentionManager archivalManager;
/* Used to make sure there are no concurrent checkpoints for a given txid
* The checkpoint here could be one of the following operations.
* a. checkpoint when NN is in standby.
* b. admin saveNameSpace operation.
* c. download checkpoint file from any remote checkpointer.
*/
private final Set<Long> currentlyCheckpointing =
Collections.<Long>synchronizedSet(new HashSet<Long>());
/**
* Construct an FSImage
* @param conf Configuration
* @throws IOException if default directories are invalid.
*/
public FSImage(Configuration conf) throws IOException {
this(conf,
FSNamesystem.getNamespaceDirs(conf),
FSNamesystem.getNamespaceEditsDirs(conf));
}
/**
* Construct the FSImage. Set the default checkpoint directories.
*
* Setup storage and initialize the edit log.
*
* @param conf Configuration
* @param imageDirs Directories the image can be stored in.
* @param editsDirs Directories the editlog can be stored in.
* @throws IOException if directories are invalid.
*/
protected FSImage(Configuration conf,
Collection<URI> imageDirs,
List<URI> editsDirs)
throws IOException {
this.conf = conf;
storage = new NNStorage(conf, imageDirs, editsDirs);
if(conf.getBoolean(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_KEY,
DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_DEFAULT)) {
storage.setRestoreFailedStorage(true);
}
this.editLog = new FSEditLog(conf, storage, editsDirs);
archivalManager = new NNStorageRetentionManager(conf, storage, editLog);
}
void format(FSNamesystem fsn, String clusterId) throws IOException {
long fileCount = fsn.getTotalFiles();
// Expect 1 file, which is the root inode
Preconditions.checkState(fileCount == 1,
"FSImage.format should be called with an uninitialized namesystem, has " +
fileCount + " files");
NamespaceInfo ns = NNStorage.newNamespaceInfo();
LOG.info("Allocated new BlockPoolId: " + ns.getBlockPoolID());
ns.clusterID = clusterId;
storage.format(ns);
editLog.formatNonFileJournals(ns);
saveFSImageInAllDirs(fsn, 0);
}
/**
* Check whether the storage directories and non-file journals exist.
* If running in interactive mode, will prompt the user for each
* directory to allow them to format anyway. Otherwise, returns
* false, unless 'force' is specified.
*
* @param force if true, format regardless of whether dirs exist
* @param interactive prompt the user when a dir exists
* @return true if formatting should proceed
* @throws IOException if some storage cannot be accessed
*/
boolean confirmFormat(boolean force, boolean interactive) throws IOException {
List<FormatConfirmable> confirms = Lists.newArrayList();
for (StorageDirectory sd : storage.dirIterable(null)) {
confirms.add(sd);
}
confirms.addAll(editLog.getFormatConfirmables());
return Storage.confirmFormat(confirms, force, interactive);
}
/**
* Analyze storage directories.
* Recover from previous transitions if required.
* Perform fs state transition if necessary depending on the namespace info.
* Read storage info.
*
* @throws IOException
* @return true if the image needs to be saved or false otherwise
*/
boolean recoverTransitionRead(StartupOption startOpt, FSNamesystem target,
MetaRecoveryContext recovery)
throws IOException {
assert startOpt != StartupOption.FORMAT :
"NameNode formatting should be performed before reading the image";
Collection<URI> imageDirs = storage.getImageDirectories();
Collection<URI> editsDirs = editLog.getEditURIs();
// none of the data dirs exist
if((imageDirs.size() == 0 || editsDirs.size() == 0)
&& startOpt != StartupOption.IMPORT)
throw new IOException(
"All specified directories are not accessible or do not exist.");
// 1. For each data directory calculate its state and
// check whether all is consistent before transitioning.
Map<StorageDirectory, StorageState> dataDirStates =
new HashMap<StorageDirectory, StorageState>();
boolean isFormatted = recoverStorageDirs(startOpt, storage, dataDirStates);
if (LOG.isTraceEnabled()) {
LOG.trace("Data dir states:\n " +
Joiner.on("\n ").withKeyValueSeparator(": ")
.join(dataDirStates));
}
if (!isFormatted && startOpt != StartupOption.ROLLBACK
&& startOpt != StartupOption.IMPORT) {
throw new IOException("NameNode is not formatted.");
}
int layoutVersion = storage.getLayoutVersion();
if (startOpt == StartupOption.METADATAVERSION) {
System.out.println("HDFS Image Version: " + layoutVersion);
System.out.println("Software format version: " +
HdfsServerConstants.NAMENODE_LAYOUT_VERSION);
return false;
}
if (layoutVersion < Storage.LAST_PRE_UPGRADE_LAYOUT_VERSION) {
NNStorage.checkVersionUpgradable(storage.getLayoutVersion());
}
if (startOpt != StartupOption.UPGRADE
&& startOpt != StartupOption.UPGRADEONLY
&& !RollingUpgradeStartupOption.STARTED.matches(startOpt)
&& layoutVersion < Storage.LAST_PRE_UPGRADE_LAYOUT_VERSION
&& layoutVersion != HdfsServerConstants.NAMENODE_LAYOUT_VERSION) {
throw new IOException(
"\nFile system image contains an old layout version "
+ storage.getLayoutVersion() + ".\nAn upgrade to version "
+ HdfsServerConstants.NAMENODE_LAYOUT_VERSION + " is required.\n"
+ "Please restart NameNode with the \""
+ RollingUpgradeStartupOption.STARTED.getOptionString()
+ "\" option if a rolling upgrade is already started;"
+ " or restart NameNode with the \""
+ StartupOption.UPGRADE.getName() + "\" option to start"
+ " a new upgrade.");
}
storage.processStartupOptionsForUpgrade(startOpt, layoutVersion);
// 2. Format unformatted dirs.
for (Iterator<StorageDirectory> it = storage.dirIterator(); it.hasNext();) {
StorageDirectory sd = it.next();
StorageState curState = dataDirStates.get(sd);
switch(curState) {
case NON_EXISTENT:
throw new IOException(StorageState.NON_EXISTENT +
" state cannot be here");
case NOT_FORMATTED:
LOG.info("Storage directory " + sd.getRoot() + " is not formatted.");
LOG.info("Formatting ...");
sd.clearDirectory(); // create empty currrent dir
break;
default:
break;
}
}
// 3. Do transitions
switch(startOpt) {
case UPGRADE:
case UPGRADEONLY:
doUpgrade(target);
return false; // upgrade saved image already
case IMPORT:
doImportCheckpoint(target);
return false; // import checkpoint saved image already
case ROLLBACK:
throw new AssertionError("Rollback is now a standalone command, "
+ "NameNode should not be starting with this option.");
case REGULAR:
default:
// just load the image
}
return loadFSImage(target, startOpt, recovery);
}
/**
* For each storage directory, performs recovery of incomplete transitions
* (eg. upgrade, rollback, checkpoint) and inserts the directory's storage
* state into the dataDirStates map.
* @param dataDirStates output of storage directory states
* @return true if there is at least one valid formatted storage directory
*/
public static boolean recoverStorageDirs(StartupOption startOpt,
NNStorage storage, Map<StorageDirectory, StorageState> dataDirStates)
throws IOException {
boolean isFormatted = false;
// This loop needs to be over all storage dirs, even shared dirs, to make
// sure that we properly examine their state, but we make sure we don't
// mutate the shared dir below in the actual loop.
for (Iterator<StorageDirectory> it =
storage.dirIterator(); it.hasNext();) {
StorageDirectory sd = it.next();
StorageState curState;
if (startOpt == StartupOption.METADATAVERSION) {
/* All we need is the layout version. */
storage.readProperties(sd);
return true;
}
try {
curState = sd.analyzeStorage(startOpt, storage);
// sd is locked but not opened
switch(curState) {
case NON_EXISTENT:
// name-node fails if any of the configured storage dirs are missing
throw new InconsistentFSStateException(sd.getRoot(),
"storage directory does not exist or is not accessible.");
case NOT_FORMATTED:
break;
case NORMAL:
break;
default: // recovery is possible
sd.doRecover(curState);
}
if (curState != StorageState.NOT_FORMATTED
&& startOpt != StartupOption.ROLLBACK) {
// read and verify consistency with other directories
storage.readProperties(sd, startOpt);
isFormatted = true;
}
if (startOpt == StartupOption.IMPORT && isFormatted)
// import of a checkpoint is allowed only into empty image directories
throw new IOException("Cannot import image from a checkpoint. "
+ " NameNode already contains an image in " + sd.getRoot());
} catch (IOException ioe) {
sd.unlock();
throw ioe;
}
dataDirStates.put(sd,curState);
}
return isFormatted;
}
/** Check if upgrade is in progress. */
public static void checkUpgrade(NNStorage storage) throws IOException {
// Upgrade or rolling upgrade is allowed only if there are
// no previous fs states in any of the directories
for (Iterator<StorageDirectory> it = storage.dirIterator(false); it.hasNext();) {
StorageDirectory sd = it.next();
if (sd.getPreviousDir().exists())
throw new InconsistentFSStateException(sd.getRoot(),
"previous fs state should not exist during upgrade. "
+ "Finalize or rollback first.");
}
}
void checkUpgrade() throws IOException {
checkUpgrade(storage);
}
/**
* @return true if there is rollback fsimage (for rolling upgrade) in NameNode
* directory.
*/
public boolean hasRollbackFSImage() throws IOException {
final FSImageStorageInspector inspector = new FSImageTransactionalStorageInspector(
EnumSet.of(NameNodeFile.IMAGE_ROLLBACK));
storage.inspectStorageDirs(inspector);
try {
List<FSImageFile> images = inspector.getLatestImages();
return images != null && !images.isEmpty();
} catch (FileNotFoundException e) {
return false;
}
}
void doUpgrade(FSNamesystem target) throws IOException {
checkUpgrade();
// load the latest image
this.loadFSImage(target, StartupOption.UPGRADE, null);
// Do upgrade for each directory
target.checkRollingUpgrade("upgrade namenode");
long oldCTime = storage.getCTime();
storage.cTime = now(); // generate new cTime for the state
int oldLV = storage.getLayoutVersion();
storage.layoutVersion = HdfsServerConstants.NAMENODE_LAYOUT_VERSION;
List<StorageDirectory> errorSDs =
Collections.synchronizedList(new ArrayList<StorageDirectory>());
assert !editLog.isSegmentOpen() : "Edits log must not be open.";
LOG.info("Starting upgrade of local storage directories."
+ "\n old LV = " + oldLV
+ "; old CTime = " + oldCTime
+ ".\n new LV = " + storage.getLayoutVersion()
+ "; new CTime = " + storage.getCTime());
// Do upgrade for each directory
for (Iterator<StorageDirectory> it = storage.dirIterator(false); it.hasNext();) {
StorageDirectory sd = it.next();
try {
NNUpgradeUtil.doPreUpgrade(conf, sd);
} catch (Exception e) {
LOG.error("Failed to move aside pre-upgrade storage " +
"in image directory " + sd.getRoot(), e);
errorSDs.add(sd);
continue;
}
}
if (target.isHaEnabled()) {
editLog.doPreUpgradeOfSharedLog();
}
storage.reportErrorsOnDirectories(errorSDs);
errorSDs.clear();
saveFSImageInAllDirs(target, editLog.getLastWrittenTxId());
// upgrade shared edit storage first
if (target.isHaEnabled()) {
editLog.doUpgradeOfSharedLog();
}
for (Iterator<StorageDirectory> it = storage.dirIterator(false); it.hasNext();) {
StorageDirectory sd = it.next();
try {
NNUpgradeUtil.doUpgrade(sd, storage);
} catch (IOException ioe) {
errorSDs.add(sd);
continue;
}
}
storage.reportErrorsOnDirectories(errorSDs);
isUpgradeFinalized = false;
if (!storage.getRemovedStorageDirs().isEmpty()) {
// during upgrade, it's a fatal error to fail any storage directory
throw new IOException("Upgrade failed in "
+ storage.getRemovedStorageDirs().size()
+ " storage directory(ies), previously logged.");
}
}
void doRollback(FSNamesystem fsns) throws IOException {
// Rollback is allowed only if there is
// a previous fs states in at least one of the storage directories.
// Directories that don't have previous state do not rollback
boolean canRollback = false;
FSImage prevState = new FSImage(conf);
try {
prevState.getStorage().layoutVersion = HdfsServerConstants.NAMENODE_LAYOUT_VERSION;
for (Iterator<StorageDirectory> it = storage.dirIterator(false); it.hasNext();) {
StorageDirectory sd = it.next();
if (!NNUpgradeUtil.canRollBack(sd, storage, prevState.getStorage(),
HdfsServerConstants.NAMENODE_LAYOUT_VERSION)) {
continue;
}
LOG.info("Can perform rollback for " + sd);
canRollback = true;
}
if (fsns.isHaEnabled()) {
// If HA is enabled, check if the shared log can be rolled back as well.
editLog.initJournalsForWrite();
boolean canRollBackSharedEditLog = editLog.canRollBackSharedLog(
prevState.getStorage(), HdfsServerConstants.NAMENODE_LAYOUT_VERSION);
if (canRollBackSharedEditLog) {
LOG.info("Can perform rollback for shared edit log.");
canRollback = true;
}
}
if (!canRollback)
throw new IOException("Cannot rollback. None of the storage "
+ "directories contain previous fs state.");
// Now that we know all directories are going to be consistent
// Do rollback for each directory containing previous state
for (Iterator<StorageDirectory> it = storage.dirIterator(false); it.hasNext();) {
StorageDirectory sd = it.next();
LOG.info("Rolling back storage directory " + sd.getRoot()
+ ".\n new LV = " + prevState.getStorage().getLayoutVersion()
+ "; new CTime = " + prevState.getStorage().getCTime());
NNUpgradeUtil.doRollBack(sd);
}
if (fsns.isHaEnabled()) {
// If HA is enabled, try to roll back the shared log as well.
editLog.doRollback();
}
isUpgradeFinalized = true;
} finally {
prevState.close();
}
}
/**
* Load image from a checkpoint directory and save it into the current one.
* @param target the NameSystem to import into
* @throws IOException
*/
void doImportCheckpoint(FSNamesystem target) throws IOException {
Collection<URI> checkpointDirs =
FSImage.getCheckpointDirs(conf, null);
List<URI> checkpointEditsDirs =
FSImage.getCheckpointEditsDirs(conf, null);
if (checkpointDirs == null || checkpointDirs.isEmpty()) {
throw new IOException("Cannot import image from a checkpoint. "
+ "\"dfs.namenode.checkpoint.dir\" is not set." );
}
if (checkpointEditsDirs == null || checkpointEditsDirs.isEmpty()) {
throw new IOException("Cannot import image from a checkpoint. "
+ "\"dfs.namenode.checkpoint.dir\" is not set." );
}
FSImage realImage = target.getFSImage();
FSImage ckptImage = new FSImage(conf,
checkpointDirs, checkpointEditsDirs);
// load from the checkpoint dirs
try {
ckptImage.recoverTransitionRead(StartupOption.REGULAR, target, null);
} finally {
ckptImage.close();
}
// return back the real image
realImage.getStorage().setStorageInfo(ckptImage.getStorage());
realImage.getEditLog().setNextTxId(ckptImage.getEditLog().getLastWrittenTxId()+1);
realImage.initEditLog(StartupOption.IMPORT);
realImage.getStorage().setBlockPoolID(ckptImage.getBlockPoolID());
// and save it but keep the same checkpointTime
saveNamespace(target);
getStorage().writeAll();
}
void finalizeUpgrade(boolean finalizeEditLog) throws IOException {
LOG.info("Finalizing upgrade for local dirs. " +
(storage.getLayoutVersion() == 0 ? "" :
"\n cur LV = " + storage.getLayoutVersion()
+ "; cur CTime = " + storage.getCTime()));
for (Iterator<StorageDirectory> it = storage.dirIterator(false); it.hasNext();) {
StorageDirectory sd = it.next();
NNUpgradeUtil.doFinalize(sd);
}
if (finalizeEditLog) {
// We only do this in the case that HA is enabled and we're active. In any
// other case the NN will have done the upgrade of the edits directories
// already by virtue of the fact that they're local.
editLog.doFinalizeOfSharedLog();
}
isUpgradeFinalized = true;
}
boolean isUpgradeFinalized() {
return isUpgradeFinalized;
}
public FSEditLog getEditLog() {
return editLog;
}
void openEditLogForWrite(int layoutVersion) throws IOException {
assert editLog != null : "editLog must be initialized";
editLog.openForWrite(layoutVersion);
storage.writeTransactionIdFileToStorage(editLog.getCurSegmentTxId());
}
/**
* Toss the current image and namesystem, reloading from the specified
* file.
*/
void reloadFromImageFile(File file, FSNamesystem target) throws IOException {
target.clear();
LOG.debug("Reloading namespace from " + file);
loadFSImage(file, target, null, false);
}
/**
* Choose latest image from one of the directories,
* load it and merge with the edits.
*
* Saving and loading fsimage should never trigger symlink resolution.
* The paths that are persisted do not have *intermediate* symlinks
* because intermediate symlinks are resolved at the time files,
* directories, and symlinks are created. All paths accessed while
* loading or saving fsimage should therefore only see symlinks as
* the final path component, and the functions called below do not
* resolve symlinks that are the final path component.
*
* @return whether the image should be saved
* @throws IOException
*/
private boolean loadFSImage(FSNamesystem target, StartupOption startOpt,
MetaRecoveryContext recovery)
throws IOException {
final boolean rollingRollback
= RollingUpgradeStartupOption.ROLLBACK.matches(startOpt);
final EnumSet<NameNodeFile> nnfs;
if (rollingRollback) {
// if it is rollback of rolling upgrade, only load from the rollback image
nnfs = EnumSet.of(NameNodeFile.IMAGE_ROLLBACK);
} else {
// otherwise we can load from both IMAGE and IMAGE_ROLLBACK
nnfs = EnumSet.of(NameNodeFile.IMAGE, NameNodeFile.IMAGE_ROLLBACK);
}
final FSImageStorageInspector inspector = storage
.readAndInspectDirs(nnfs, startOpt);
isUpgradeFinalized = inspector.isUpgradeFinalized();
List<FSImageFile> imageFiles = inspector.getLatestImages();
StartupProgress prog = NameNode.getStartupProgress();
prog.beginPhase(Phase.LOADING_FSIMAGE);
File phaseFile = imageFiles.get(0).getFile();
prog.setFile(Phase.LOADING_FSIMAGE, phaseFile.getAbsolutePath());
prog.setSize(Phase.LOADING_FSIMAGE, phaseFile.length());
boolean needToSave = inspector.needToSave();
Iterable<EditLogInputStream> editStreams = null;
initEditLog(startOpt);
if (NameNodeLayoutVersion.supports(
LayoutVersion.Feature.TXID_BASED_LAYOUT, getLayoutVersion())) {
// If we're open for write, we're either non-HA or we're the active NN, so
// we better be able to load all the edits. If we're the standby NN, it's
// OK to not be able to read all of edits right now.
// In the meanwhile, for HA upgrade, we will still write editlog thus need
// this toAtLeastTxId to be set to the max-seen txid
// For rollback in rolling upgrade, we need to set the toAtLeastTxId to
// the txid right before the upgrade marker.
long toAtLeastTxId = editLog.isOpenForWrite() ? inspector
.getMaxSeenTxId() : 0;
if (rollingRollback) {
// note that the first image in imageFiles is the special checkpoint
// for the rolling upgrade
toAtLeastTxId = imageFiles.get(0).getCheckpointTxId() + 2;
}
editStreams = editLog.selectInputStreams(
imageFiles.get(0).getCheckpointTxId() + 1,
toAtLeastTxId, recovery, false);
} else {
editStreams = FSImagePreTransactionalStorageInspector
.getEditLogStreams(storage);
}
int maxOpSize = conf.getInt(DFSConfigKeys.DFS_NAMENODE_MAX_OP_SIZE_KEY,
DFSConfigKeys.DFS_NAMENODE_MAX_OP_SIZE_DEFAULT);
for (EditLogInputStream elis : editStreams) {
elis.setMaxOpSize(maxOpSize);
}
for (EditLogInputStream l : editStreams) {
LOG.debug("Planning to load edit log stream: " + l);
}
if (!editStreams.iterator().hasNext()) {
LOG.info("No edit log streams selected.");
}
FSImageFile imageFile = null;
for (int i = 0; i < imageFiles.size(); i++) {
try {
imageFile = imageFiles.get(i);
loadFSImageFile(target, recovery, imageFile, startOpt);
break;
} catch (IOException ioe) {
LOG.error("Failed to load image from " + imageFile, ioe);
target.clear();
imageFile = null;
}
}
// Failed to load any images, error out
if (imageFile == null) {
FSEditLog.closeAllStreams(editStreams);
throw new IOException("Failed to load an FSImage file!");
}
prog.endPhase(Phase.LOADING_FSIMAGE);
if (!rollingRollback) {
long txnsAdvanced = loadEdits(editStreams, target, startOpt, recovery);
needToSave |= needsResaveBasedOnStaleCheckpoint(imageFile.getFile(),
txnsAdvanced);
if (RollingUpgradeStartupOption.DOWNGRADE.matches(startOpt)) {
// rename rollback image if it is downgrade
renameCheckpoint(NameNodeFile.IMAGE_ROLLBACK, NameNodeFile.IMAGE);
}
} else {
// Trigger the rollback for rolling upgrade. Here lastAppliedTxId equals
// to the last txid in rollback fsimage.
rollingRollback(lastAppliedTxId + 1, imageFiles.get(0).getCheckpointTxId());
needToSave = false;
}
editLog.setNextTxId(lastAppliedTxId + 1);
return needToSave;
}
/** rollback for rolling upgrade. */
private void rollingRollback(long discardSegmentTxId, long ckptId)
throws IOException {
// discard discard unnecessary editlog segments starting from the given id
this.editLog.discardSegments(discardSegmentTxId);
// rename the special checkpoint
renameCheckpoint(ckptId, NameNodeFile.IMAGE_ROLLBACK, NameNodeFile.IMAGE,
true);
// purge all the checkpoints after the marker
archivalManager.purgeCheckpoinsAfter(NameNodeFile.IMAGE, ckptId);
// HDFS-7939: purge all old fsimage_rollback_*
archivalManager.purgeCheckpoints(NameNodeFile.IMAGE_ROLLBACK);
String nameserviceId = DFSUtil.getNamenodeNameServiceId(conf);
if (HAUtil.isHAEnabled(conf, nameserviceId)) {
// close the editlog since it is currently open for write
this.editLog.close();
// reopen the editlog for read
this.editLog.initSharedJournalsForRead();
}
}
void loadFSImageFile(FSNamesystem target, MetaRecoveryContext recovery,
FSImageFile imageFile, StartupOption startupOption) throws IOException {
LOG.debug("Planning to load image :\n" + imageFile);
StorageDirectory sdForProperties = imageFile.sd;
storage.readProperties(sdForProperties, startupOption);
if (NameNodeLayoutVersion.supports(
LayoutVersion.Feature.TXID_BASED_LAYOUT, getLayoutVersion())) {
// For txid-based layout, we should have a .md5 file
// next to the image file
boolean isRollingRollback = RollingUpgradeStartupOption.ROLLBACK
.matches(startupOption);
loadFSImage(imageFile.getFile(), target, recovery, isRollingRollback);
} else if (NameNodeLayoutVersion.supports(
LayoutVersion.Feature.FSIMAGE_CHECKSUM, getLayoutVersion())) {
// In 0.22, we have the checksum stored in the VERSION file.
String md5 = storage.getDeprecatedProperty(
NNStorage.DEPRECATED_MESSAGE_DIGEST_PROPERTY);
if (md5 == null) {
throw new InconsistentFSStateException(sdForProperties.getRoot(),
"Message digest property " +
NNStorage.DEPRECATED_MESSAGE_DIGEST_PROPERTY +
" not set for storage directory " + sdForProperties.getRoot());
}
loadFSImage(imageFile.getFile(), new MD5Hash(md5), target, recovery,
false);
} else {
// We don't have any record of the md5sum
loadFSImage(imageFile.getFile(), null, target, recovery, false);
}
}
public void initEditLog(StartupOption startOpt) throws IOException {
Preconditions.checkState(getNamespaceID() != 0,
"Must know namespace ID before initting edit log");
String nameserviceId = DFSUtil.getNamenodeNameServiceId(conf);
if (!HAUtil.isHAEnabled(conf, nameserviceId)) {
// If this NN is not HA
editLog.initJournalsForWrite();
editLog.recoverUnclosedStreams();
} else if (HAUtil.isHAEnabled(conf, nameserviceId)
&& (startOpt == StartupOption.UPGRADE
|| startOpt == StartupOption.UPGRADEONLY
|| RollingUpgradeStartupOption.ROLLBACK.matches(startOpt))) {
// This NN is HA, but we're doing an upgrade or a rollback of rolling
// upgrade so init the edit log for write.
editLog.initJournalsForWrite();
if (startOpt == StartupOption.UPGRADE
|| startOpt == StartupOption.UPGRADEONLY) {
long sharedLogCTime = editLog.getSharedLogCTime();
if (this.storage.getCTime() < sharedLogCTime) {
throw new IOException("It looks like the shared log is already " +
"being upgraded but this NN has not been upgraded yet. You " +
"should restart this NameNode with the '" +
StartupOption.BOOTSTRAPSTANDBY.getName() + "' option to bring " +
"this NN in sync with the other.");
}
}
editLog.recoverUnclosedStreams();
} else {
// This NN is HA and we're not doing an upgrade.
editLog.initSharedJournalsForRead();
}
}
/**
* @param imageFile the image file that was loaded
* @param numEditsLoaded the number of edits loaded from edits logs
* @return true if the NameNode should automatically save the namespace
* when it is started, due to the latest checkpoint being too old.
*/
private boolean needsResaveBasedOnStaleCheckpoint(
File imageFile, long numEditsLoaded) {
final long checkpointPeriod = conf.getLong(
DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY,
DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_DEFAULT);
final long checkpointTxnCount = conf.getLong(
DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY,
DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_DEFAULT);
long checkpointAge = Time.now() - imageFile.lastModified();
return (checkpointAge > checkpointPeriod * 1000) ||
(numEditsLoaded > checkpointTxnCount);
}
/**
* Load the specified list of edit files into the image.
*/
public long loadEdits(Iterable<EditLogInputStream> editStreams,
FSNamesystem target) throws IOException {
return loadEdits(editStreams, target, null, null);
}
private long loadEdits(Iterable<EditLogInputStream> editStreams,
FSNamesystem target, StartupOption startOpt, MetaRecoveryContext recovery)
throws IOException {
LOG.debug("About to load edits:\n " + Joiner.on("\n ").join(editStreams));
StartupProgress prog = NameNode.getStartupProgress();
prog.beginPhase(Phase.LOADING_EDITS);
long prevLastAppliedTxId = lastAppliedTxId;
try {
FSEditLogLoader loader = new FSEditLogLoader(target, lastAppliedTxId);
// Load latest edits
for (EditLogInputStream editIn : editStreams) {
LOG.info("Reading " + editIn + " expecting start txid #" +
(lastAppliedTxId + 1));
try {
loader.loadFSEdits(editIn, lastAppliedTxId + 1, startOpt, recovery);
} finally {
// Update lastAppliedTxId even in case of error, since some ops may
// have been successfully applied before the error.
lastAppliedTxId = loader.getLastAppliedTxId();
}
// If we are in recovery mode, we may have skipped over some txids.
if (editIn.getLastTxId() != HdfsServerConstants.INVALID_TXID) {
lastAppliedTxId = editIn.getLastTxId();
}
}
} finally {
FSEditLog.closeAllStreams(editStreams);
// update the counts
updateCountForQuota(target.getBlockManager().getStoragePolicySuite(),
target.dir.rootDir);
}
prog.endPhase(Phase.LOADING_EDITS);
return lastAppliedTxId - prevLastAppliedTxId;
}
/**
* Update the count of each directory with quota in the namespace.
* A directory's count is defined as the total number inodes in the tree
* rooted at the directory.
*
* This is an update of existing state of the filesystem and does not
* throw QuotaExceededException.
*/
static void updateCountForQuota(BlockStoragePolicySuite bsps,
INodeDirectory root) {
updateCountForQuotaRecursively(bsps, root.getStoragePolicyID(), root,
new QuotaCounts.Builder().build());
}
private static void updateCountForQuotaRecursively(BlockStoragePolicySuite bsps,
byte blockStoragePolicyId, INodeDirectory dir, QuotaCounts counts) {
final long parentNamespace = counts.getNameSpace();
final long parentStoragespace = counts.getStorageSpace();
final EnumCounters<StorageType> parentTypeSpaces = counts.getTypeSpaces();
dir.computeQuotaUsage4CurrentDirectory(bsps, blockStoragePolicyId, counts);
for (INode child : dir.getChildrenList(Snapshot.CURRENT_STATE_ID)) {
final byte childPolicyId = child.getStoragePolicyIDForQuota(blockStoragePolicyId);
if (child.isDirectory()) {
updateCountForQuotaRecursively(bsps, childPolicyId,
child.asDirectory(), counts);
} else {
// file or symlink: count here to reduce recursive calls.
counts.add(child.computeQuotaUsage(bsps, childPolicyId, false,
Snapshot.CURRENT_STATE_ID));
}
}
if (dir.isQuotaSet()) {
// check if quota is violated. It indicates a software bug.
final QuotaCounts q = dir.getQuotaCounts();
final long namespace = counts.getNameSpace() - parentNamespace;
final long nsQuota = q.getNameSpace();
if (Quota.isViolated(nsQuota, namespace)) {
LOG.warn("Namespace quota violation in image for "
+ dir.getFullPathName()
+ " quota = " + nsQuota + " < consumed = " + namespace);
}
final long ssConsumed = counts.getStorageSpace() - parentStoragespace;
final long ssQuota = q.getStorageSpace();
if (Quota.isViolated(ssQuota, ssConsumed)) {
LOG.warn("Storagespace quota violation in image for "
+ dir.getFullPathName()
+ " quota = " + ssQuota + " < consumed = " + ssConsumed);
}
final EnumCounters<StorageType> typeSpaces =
new EnumCounters<StorageType>(StorageType.class);
for (StorageType t : StorageType.getTypesSupportingQuota()) {
final long typeSpace = counts.getTypeSpaces().get(t) -
parentTypeSpaces.get(t);
final long typeQuota = q.getTypeSpaces().get(t);
if (Quota.isViolated(typeQuota, typeSpace)) {
LOG.warn("Storage type quota violation in image for "
+ dir.getFullPathName()
+ " type = " + t.toString() + " quota = "
+ typeQuota + " < consumed " + typeSpace);
}
}
dir.getDirectoryWithQuotaFeature().setSpaceConsumed(namespace, ssConsumed,
typeSpaces);
}
}
/**
* Load the image namespace from the given image file, verifying
* it against the MD5 sum stored in its associated .md5 file.
*/
private void loadFSImage(File imageFile, FSNamesystem target,
MetaRecoveryContext recovery, boolean requireSameLayoutVersion)
throws IOException {
MD5Hash expectedMD5 = MD5FileUtils.readStoredMd5ForFile(imageFile);
if (expectedMD5 == null) {
throw new IOException("No MD5 file found corresponding to image file "
+ imageFile);
}
loadFSImage(imageFile, expectedMD5, target, recovery,
requireSameLayoutVersion);
}
/**
* Load in the filesystem image from file. It's a big list of
* filenames and blocks.
*/
private void loadFSImage(File curFile, MD5Hash expectedMd5,
FSNamesystem target, MetaRecoveryContext recovery,
boolean requireSameLayoutVersion) throws IOException {
// BlockPoolId is required when the FsImageLoader loads the rolling upgrade
// information. Make sure the ID is properly set.
target.setBlockPoolId(this.getBlockPoolID());
FSImageFormat.LoaderDelegator loader = FSImageFormat.newLoader(conf, target);
loader.load(curFile, requireSameLayoutVersion);
// Check that the image digest we loaded matches up with what
// we expected
MD5Hash readImageMd5 = loader.getLoadedImageMd5();
if (expectedMd5 != null &&
!expectedMd5.equals(readImageMd5)) {
throw new IOException("Image file " + curFile +
" is corrupt with MD5 checksum of " + readImageMd5 +
" but expecting " + expectedMd5);
}
long txId = loader.getLoadedImageTxId();
LOG.info("Loaded image for txid " + txId + " from " + curFile);
lastAppliedTxId = txId;
storage.setMostRecentCheckpointInfo(txId, curFile.lastModified());
}
/**
* Save the contents of the FS image to the file.
*/
void saveFSImage(SaveNamespaceContext context, StorageDirectory sd,
NameNodeFile dstType) throws IOException {
long txid = context.getTxId();
File newFile = NNStorage.getStorageFile(sd, NameNodeFile.IMAGE_NEW, txid);
File dstFile = NNStorage.getStorageFile(sd, dstType, txid);
FSImageFormatProtobuf.Saver saver = new FSImageFormatProtobuf.Saver(context);
FSImageCompression compression = FSImageCompression.createCompression(conf);
saver.save(newFile, compression);
MD5FileUtils.saveMD5File(dstFile, saver.getSavedDigest());
storage.setMostRecentCheckpointInfo(txid, Time.now());
}
/**
* Save FSimage in the legacy format. This is not for NN consumption,
* but for tools like OIV.
*/
public void saveLegacyOIVImage(FSNamesystem source, String targetDir,
Canceler canceler) throws IOException {
FSImageCompression compression =
FSImageCompression.createCompression(conf);
long txid = getLastAppliedOrWrittenTxId();
SaveNamespaceContext ctx = new SaveNamespaceContext(source, txid,
canceler);
FSImageFormat.Saver saver = new FSImageFormat.Saver(ctx);
String imageFileName = NNStorage.getLegacyOIVImageFileName(txid);
File imageFile = new File(targetDir, imageFileName);
saver.save(imageFile, compression);
archivalManager.purgeOldLegacyOIVImages(targetDir, txid);
}
/**
* FSImageSaver is being run in a separate thread when saving
* FSImage. There is one thread per each copy of the image.
*
* FSImageSaver assumes that it was launched from a thread that holds
* FSNamesystem lock and waits for the execution of FSImageSaver thread
* to finish.
* This way we are guaranteed that the namespace is not being updated
* while multiple instances of FSImageSaver are traversing it
* and writing it out.
*/
private class FSImageSaver implements Runnable {
private final SaveNamespaceContext context;
private final StorageDirectory sd;
private final NameNodeFile nnf;
public FSImageSaver(SaveNamespaceContext context, StorageDirectory sd,
NameNodeFile nnf) {
this.context = context;
this.sd = sd;
this.nnf = nnf;
}
@Override
public void run() {
try {
saveFSImage(context, sd, nnf);
} catch (SaveNamespaceCancelledException snce) {
LOG.info("Cancelled image saving for " + sd.getRoot() +
": " + snce.getMessage());
// don't report an error on the storage dir!
} catch (Throwable t) {
LOG.error("Unable to save image for " + sd.getRoot(), t);
context.reportErrorOnStorageDirectory(sd);
}
}
@Override
public String toString() {
return "FSImageSaver for " + sd.getRoot() +
" of type " + sd.getStorageDirType();
}
}
private void waitForThreads(List<Thread> threads) {
for (Thread thread : threads) {
while (thread.isAlive()) {
try {
thread.join();
} catch (InterruptedException iex) {
LOG.error("Caught interrupted exception while waiting for thread " +
thread.getName() + " to finish. Retrying join");
}
}
}
}
/**
* Update version of all storage directories.
*/
public synchronized void updateStorageVersion() throws IOException {
storage.writeAll();
}
/**
* @see #saveNamespace(FSNamesystem, NameNodeFile, Canceler)
*/
public synchronized void saveNamespace(FSNamesystem source)
throws IOException {
saveNamespace(source, NameNodeFile.IMAGE, null);
}
/**
* Save the contents of the FS image to a new image file in each of the
* current storage directories.
*/
public synchronized void saveNamespace(FSNamesystem source, NameNodeFile nnf,
Canceler canceler) throws IOException {
assert editLog != null : "editLog must be initialized";
LOG.info("Save namespace ...");
storage.attemptRestoreRemovedStorage();
boolean editLogWasOpen = editLog.isSegmentOpen();
if (editLogWasOpen) {
editLog.endCurrentLogSegment(true);
}
long imageTxId = getLastAppliedOrWrittenTxId();
if (!addToCheckpointing(imageTxId)) {
throw new IOException(
"FS image is being downloaded from another NN at txid " + imageTxId);
}
try {
try {
saveFSImageInAllDirs(source, nnf, imageTxId, canceler);
if (!source.isRollingUpgrade()) {
storage.writeAll();
}
} finally {
if (editLogWasOpen) {
editLog.startLogSegment(imageTxId + 1, true,
source.getEffectiveLayoutVersion());
// Take this opportunity to note the current transaction.
// Even if the namespace save was cancelled, this marker
// is only used to determine what transaction ID is required
// for startup. So, it doesn't hurt to update it unnecessarily.
storage.writeTransactionIdFileToStorage(imageTxId + 1);
}
}
} finally {
removeFromCheckpointing(imageTxId);
}
}
/**
* @see #saveFSImageInAllDirs(FSNamesystem, NameNodeFile, long, Canceler)
*/
protected synchronized void saveFSImageInAllDirs(FSNamesystem source, long txid)
throws IOException {
if (!addToCheckpointing(txid)) {
throw new IOException(("FS image is being downloaded from another NN"));
}
try {
saveFSImageInAllDirs(source, NameNodeFile.IMAGE, txid, null);
} finally {
removeFromCheckpointing(txid);
}
}
public boolean addToCheckpointing(long txid) {
return currentlyCheckpointing.add(txid);
}
public void removeFromCheckpointing(long txid) {
currentlyCheckpointing.remove(txid);
}
private synchronized void saveFSImageInAllDirs(FSNamesystem source,
NameNodeFile nnf, long txid, Canceler canceler) throws IOException {
StartupProgress prog = NameNode.getStartupProgress();
prog.beginPhase(Phase.SAVING_CHECKPOINT);
if (storage.getNumStorageDirs(NameNodeDirType.IMAGE) == 0) {
throw new IOException("No image directories available!");
}
if (canceler == null) {
canceler = new Canceler();
}
SaveNamespaceContext ctx = new SaveNamespaceContext(
source, txid, canceler);
try {
List<Thread> saveThreads = new ArrayList<Thread>();
// save images into current
for (Iterator<StorageDirectory> it
= storage.dirIterator(NameNodeDirType.IMAGE); it.hasNext();) {
StorageDirectory sd = it.next();
FSImageSaver saver = new FSImageSaver(ctx, sd, nnf);
Thread saveThread = new Thread(saver, saver.toString());
saveThreads.add(saveThread);
saveThread.start();
}
waitForThreads(saveThreads);
saveThreads.clear();
storage.reportErrorsOnDirectories(ctx.getErrorSDs());
if (storage.getNumStorageDirs(NameNodeDirType.IMAGE) == 0) {
throw new IOException(
"Failed to save in any storage directories while saving namespace.");
}
if (canceler.isCancelled()) {
deleteCancelledCheckpoint(txid);
ctx.checkCancelled(); // throws
assert false : "should have thrown above!";
}
renameCheckpoint(txid, NameNodeFile.IMAGE_NEW, nnf, false);
// Since we now have a new checkpoint, we can clean up some
// old edit logs and checkpoints.
purgeOldStorage(nnf);
archivalManager.purgeCheckpoints(NameNodeFile.IMAGE_NEW);
} finally {
// Notify any threads waiting on the checkpoint to be canceled
// that it is complete.
ctx.markComplete();
ctx = null;
}
prog.endPhase(Phase.SAVING_CHECKPOINT);
}
/**
* Purge any files in the storage directories that are no longer
* necessary.
*/
void purgeOldStorage(NameNodeFile nnf) {
try {
archivalManager.purgeOldStorage(nnf);
} catch (Exception e) {
LOG.warn("Unable to purge old storage " + nnf.getName(), e);
}
}
/**
* Rename FSImage with the specific txid
*/
private void renameCheckpoint(long txid, NameNodeFile fromNnf,
NameNodeFile toNnf, boolean renameMD5) throws IOException {
ArrayList<StorageDirectory> al = null;
for (StorageDirectory sd : storage.dirIterable(NameNodeDirType.IMAGE)) {
try {
renameImageFileInDir(sd, fromNnf, toNnf, txid, renameMD5);
} catch (IOException ioe) {
LOG.warn("Unable to rename checkpoint in " + sd, ioe);
if (al == null) {
al = Lists.newArrayList();
}
al.add(sd);
}
}
if(al != null) storage.reportErrorsOnDirectories(al);
}
/**
* Rename all the fsimage files with the specific NameNodeFile type. The
* associated checksum files will also be renamed.
*/
void renameCheckpoint(NameNodeFile fromNnf, NameNodeFile toNnf)
throws IOException {
ArrayList<StorageDirectory> al = null;
FSImageTransactionalStorageInspector inspector =
new FSImageTransactionalStorageInspector(EnumSet.of(fromNnf));
storage.inspectStorageDirs(inspector);
for (FSImageFile image : inspector.getFoundImages()) {
try {
renameImageFileInDir(image.sd, fromNnf, toNnf, image.txId, true);
} catch (IOException ioe) {
LOG.warn("Unable to rename checkpoint in " + image.sd, ioe);
if (al == null) {
al = Lists.newArrayList();
}
al.add(image.sd);
}
}
if(al != null) {
storage.reportErrorsOnDirectories(al);
}
}
/**
* Deletes the checkpoint file in every storage directory,
* since the checkpoint was cancelled.
*/
private void deleteCancelledCheckpoint(long txid) throws IOException {
ArrayList<StorageDirectory> al = Lists.newArrayList();
for (StorageDirectory sd : storage.dirIterable(NameNodeDirType.IMAGE)) {
File ckpt = NNStorage.getStorageFile(sd, NameNodeFile.IMAGE_NEW, txid);
if (ckpt.exists() && !ckpt.delete()) {
LOG.warn("Unable to delete cancelled checkpoint in " + sd);
al.add(sd);
}
}
storage.reportErrorsOnDirectories(al);
}
private void renameImageFileInDir(StorageDirectory sd, NameNodeFile fromNnf,
NameNodeFile toNnf, long txid, boolean renameMD5) throws IOException {
final File fromFile = NNStorage.getStorageFile(sd, fromNnf, txid);
final File toFile = NNStorage.getStorageFile(sd, toNnf, txid);
// renameTo fails on Windows if the destination file already exists.
if(LOG.isDebugEnabled()) {
LOG.debug("renaming " + fromFile.getAbsolutePath()
+ " to " + toFile.getAbsolutePath());
}
if (!fromFile.renameTo(toFile)) {
if (!toFile.delete() || !fromFile.renameTo(toFile)) {
throw new IOException("renaming " + fromFile.getAbsolutePath() + " to " +
toFile.getAbsolutePath() + " FAILED");
}
}
if (renameMD5) {
MD5FileUtils.renameMD5File(fromFile, toFile);
}
}
CheckpointSignature rollEditLog(int layoutVersion) throws IOException {
getEditLog().rollEditLog(layoutVersion);
// Record this log segment ID in all of the storage directories, so
// we won't miss this log segment on a restart if the edits directories
// go missing.
storage.writeTransactionIdFileToStorage(getEditLog().getCurSegmentTxId());
return new CheckpointSignature(this);
}
/**
* Start checkpoint.
* <p>
* If backup storage contains image that is newer than or incompatible with
* what the active name-node has, then the backup node should shutdown.<br>
* If the backup image is older than the active one then it should
* be discarded and downloaded from the active node.<br>
* If the images are the same then the backup image will be used as current.
*
* @param bnReg the backup node registration.
* @param nnReg this (active) name-node registration.
* @return {@link NamenodeCommand} if backup node should shutdown or
* {@link CheckpointCommand} prescribing what backup node should
* do with its image.
* @throws IOException
*/
NamenodeCommand startCheckpoint(NamenodeRegistration bnReg, // backup node
NamenodeRegistration nnReg,
int layoutVersion) // active name-node
throws IOException {
LOG.info("Start checkpoint at txid " + getEditLog().getLastWrittenTxId());
String msg = null;
// Verify that checkpoint is allowed
if(bnReg.getNamespaceID() != storage.getNamespaceID())
msg = "Name node " + bnReg.getAddress()
+ " has incompatible namespace id: " + bnReg.getNamespaceID()
+ " expected: " + storage.getNamespaceID();
else if(bnReg.isRole(NamenodeRole.NAMENODE))
msg = "Name node " + bnReg.getAddress()
+ " role " + bnReg.getRole() + ": checkpoint is not allowed.";
else if(bnReg.getLayoutVersion() < storage.getLayoutVersion()
|| (bnReg.getLayoutVersion() == storage.getLayoutVersion()
&& bnReg.getCTime() > storage.getCTime()))
// remote node has newer image age
msg = "Name node " + bnReg.getAddress()
+ " has newer image layout version: LV = " +bnReg.getLayoutVersion()
+ " cTime = " + bnReg.getCTime()
+ ". Current version: LV = " + storage.getLayoutVersion()
+ " cTime = " + storage.getCTime();
if(msg != null) {
LOG.error(msg);
return new NamenodeCommand(NamenodeProtocol.ACT_SHUTDOWN);
}
boolean needToReturnImg = true;
if(storage.getNumStorageDirs(NameNodeDirType.IMAGE) == 0)
// do not return image if there are no image directories
needToReturnImg = false;
CheckpointSignature sig = rollEditLog(layoutVersion);
return new CheckpointCommand(sig, needToReturnImg);
}
/**
* End checkpoint.
* <p>
* Validate the current storage info with the given signature.
*
* @param sig to validate the current storage info against
* @throws IOException if the checkpoint fields are inconsistent
*/
void endCheckpoint(CheckpointSignature sig) throws IOException {
LOG.info("End checkpoint at txid " + getEditLog().getLastWrittenTxId());
sig.validateStorageInfo(this);
}
/**
* This is called by the 2NN after having downloaded an image, and by
* the NN after having received a new image from the 2NN. It
* renames the image from fsimage_N.ckpt to fsimage_N and also
* saves the related .md5 file into place.
*/
public synchronized void saveDigestAndRenameCheckpointImage(NameNodeFile nnf,
long txid, MD5Hash digest) throws IOException {
// Write and rename MD5 file
List<StorageDirectory> badSds = Lists.newArrayList();
for (StorageDirectory sd : storage.dirIterable(NameNodeDirType.IMAGE)) {
File imageFile = NNStorage.getImageFile(sd, nnf, txid);
try {
MD5FileUtils.saveMD5File(imageFile, digest);
} catch (IOException ioe) {
badSds.add(sd);
}
}
storage.reportErrorsOnDirectories(badSds);
CheckpointFaultInjector.getInstance().afterMD5Rename();
// Rename image from tmp file
renameCheckpoint(txid, NameNodeFile.IMAGE_NEW, nnf, false);
// So long as this is the newest image available,
// advertise it as such to other checkpointers
// from now on
if (txid > storage.getMostRecentCheckpointTxId()) {
storage.setMostRecentCheckpointInfo(txid, Time.now());
}
}
@Override
synchronized public void close() throws IOException {
if (editLog != null) { // 2NN doesn't have any edit log
getEditLog().close();
}
storage.close();
}
/**
* Retrieve checkpoint dirs from configuration.
*
* @param conf the Configuration
* @param defaultValue a default value for the attribute, if null
* @return a Collection of URIs representing the values in
* dfs.namenode.checkpoint.dir configuration property
*/
static Collection<URI> getCheckpointDirs(Configuration conf,
String defaultValue) {
Collection<String> dirNames = conf.getTrimmedStringCollection(
DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY);
if (dirNames.size() == 0 && defaultValue != null) {
dirNames.add(defaultValue);
}
return Util.stringCollectionAsURIs(dirNames);
}
static List<URI> getCheckpointEditsDirs(Configuration conf,
String defaultName) {
Collection<String> dirNames = conf.getTrimmedStringCollection(
DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY);
if (dirNames.size() == 0 && defaultName != null) {
dirNames.add(defaultName);
}
return Util.stringCollectionAsURIs(dirNames);
}
public NNStorage getStorage() {
return storage;
}
public int getLayoutVersion() {
return storage.getLayoutVersion();
}
public int getNamespaceID() {
return storage.getNamespaceID();
}
public String getClusterID() {
return storage.getClusterID();
}
public String getBlockPoolID() {
return storage.getBlockPoolID();
}
public synchronized long getLastAppliedTxId() {
return lastAppliedTxId;
}
public long getLastAppliedOrWrittenTxId() {
return Math.max(lastAppliedTxId,
editLog != null ? editLog.getLastWrittenTxId() : 0);
}
public void updateLastAppliedTxIdFromWritten() {
this.lastAppliedTxId = editLog.getLastWrittenTxId();
}
// Should be OK for this to not be synchronized since all of the places which
// mutate this value are themselves synchronized so it shouldn't be possible
// to see this flop back and forth. In the worst case this will just return an
// old value.
public long getMostRecentCheckpointTxId() {
return storage.getMostRecentCheckpointTxId();
}
}
| 57,008 | 37.389899 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.protocol.QuotaByStorageTypeExceededException;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.util.EnumCounters;
/**
* Quota feature for {@link INodeDirectory}.
*/
public final class DirectoryWithQuotaFeature implements INode.Feature {
public static final long DEFAULT_NAMESPACE_QUOTA = Long.MAX_VALUE;
public static final long DEFAULT_STORAGE_SPACE_QUOTA = HdfsConstants.QUOTA_RESET;
private QuotaCounts quota;
private QuotaCounts usage;
public static class Builder {
private QuotaCounts quota;
private QuotaCounts usage;
public Builder() {
this.quota = new QuotaCounts.Builder().nameSpace(DEFAULT_NAMESPACE_QUOTA).
storageSpace(DEFAULT_STORAGE_SPACE_QUOTA).
typeSpaces(DEFAULT_STORAGE_SPACE_QUOTA).build();
this.usage = new QuotaCounts.Builder().nameSpace(1).build();
}
public Builder nameSpaceQuota(long nameSpaceQuota) {
this.quota.setNameSpace(nameSpaceQuota);
return this;
}
public Builder storageSpaceQuota(long spaceQuota) {
this.quota.setStorageSpace(spaceQuota);
return this;
}
public Builder typeQuotas(EnumCounters<StorageType> typeQuotas) {
this.quota.setTypeSpaces(typeQuotas);
return this;
}
public Builder typeQuota(StorageType type, long quota) {
this.quota.setTypeSpace(type, quota);
return this;
}
public DirectoryWithQuotaFeature build() {
return new DirectoryWithQuotaFeature(this);
}
}
private DirectoryWithQuotaFeature(Builder builder) {
this.quota = builder.quota;
this.usage = builder.usage;
}
/** @return the quota set or -1 if it is not set. */
QuotaCounts getQuota() {
return new QuotaCounts.Builder().quotaCount(this.quota).build();
}
/** Set this directory's quota
*
* @param nsQuota Namespace quota to be set
* @param ssQuota Storagespace quota to be set
* @param type Storage type of the storage space quota to be set.
* To set storagespace/namespace quota, type must be null.
*/
void setQuota(long nsQuota, long ssQuota, StorageType type) {
if (type != null) {
this.quota.setTypeSpace(type, ssQuota);
} else {
setQuota(nsQuota, ssQuota);
}
}
void setQuota(long nsQuota, long ssQuota) {
this.quota.setNameSpace(nsQuota);
this.quota.setStorageSpace(ssQuota);
}
void setQuota(long quota, StorageType type) {
this.quota.setTypeSpace(type, quota);
}
/** Set storage type quota in a batch. (Only used by FSImage load)
*
* @param tsQuotas type space counts for all storage types supporting quota
*/
void setQuota(EnumCounters<StorageType> tsQuotas) {
this.quota.setTypeSpaces(tsQuotas);
}
/**
* Add current quota usage to counts and return the updated counts
* @param counts counts to be added with current quota usage
* @return counts that have been added with the current qutoa usage
*/
QuotaCounts AddCurrentSpaceUsage(QuotaCounts counts) {
counts.add(this.usage);
return counts;
}
ContentSummaryComputationContext computeContentSummary(final INodeDirectory dir,
final ContentSummaryComputationContext summary) {
final long original = summary.getCounts().getStoragespace();
long oldYieldCount = summary.getYieldCount();
dir.computeDirectoryContentSummary(summary, Snapshot.CURRENT_STATE_ID);
// Check only when the content has not changed in the middle.
if (oldYieldCount == summary.getYieldCount()) {
checkStoragespace(dir, summary.getCounts().getStoragespace() - original);
}
return summary;
}
private void checkStoragespace(final INodeDirectory dir, final long computed) {
if (-1 != quota.getStorageSpace() && usage.getStorageSpace() != computed) {
NameNode.LOG.error("BUG: Inconsistent storagespace for directory "
+ dir.getFullPathName() + ". Cached = " + usage.getStorageSpace()
+ " != Computed = " + computed);
}
}
void addSpaceConsumed(final INodeDirectory dir, final QuotaCounts counts,
boolean verify) throws QuotaExceededException {
if (dir.isQuotaSet()) {
// The following steps are important:
// check quotas in this inode and all ancestors before changing counts
// so that no change is made if there is any quota violation.
// (1) verify quota in this inode
if (verify) {
verifyQuota(counts);
}
// (2) verify quota and then add count in ancestors
dir.addSpaceConsumed2Parent(counts, verify);
// (3) add count in this inode
addSpaceConsumed2Cache(counts);
} else {
dir.addSpaceConsumed2Parent(counts, verify);
}
}
/** Update the space/namespace/type usage of the tree
*
* @param delta the change of the namespace/space/type usage
*/
public void addSpaceConsumed2Cache(QuotaCounts delta) {
usage.add(delta);
}
/**
* Sets namespace and storagespace take by the directory rooted
* at this INode. This should be used carefully. It does not check
* for quota violations.
*
* @param namespace size of the directory to be set
* @param storagespace storage space take by all the nodes under this directory
* @param typespaces counters of storage type usage
*/
void setSpaceConsumed(long namespace, long storagespace,
EnumCounters<StorageType> typespaces) {
usage.setNameSpace(namespace);
usage.setStorageSpace(storagespace);
usage.setTypeSpaces(typespaces);
}
void setSpaceConsumed(QuotaCounts c) {
usage.setNameSpace(c.getNameSpace());
usage.setStorageSpace(c.getStorageSpace());
usage.setTypeSpaces(c.getTypeSpaces());
}
/** @return the namespace and storagespace and typespace consumed. */
public QuotaCounts getSpaceConsumed() {
return new QuotaCounts.Builder().quotaCount(usage).build();
}
/** Verify if the namespace quota is violated after applying delta. */
private void verifyNamespaceQuota(long delta) throws NSQuotaExceededException {
if (Quota.isViolated(quota.getNameSpace(), usage.getNameSpace(), delta)) {
throw new NSQuotaExceededException(quota.getNameSpace(),
usage.getNameSpace() + delta);
}
}
/** Verify if the storagespace quota is violated after applying delta. */
private void verifyStoragespaceQuota(long delta) throws DSQuotaExceededException {
if (Quota.isViolated(quota.getStorageSpace(), usage.getStorageSpace(), delta)) {
throw new DSQuotaExceededException(quota.getStorageSpace(),
usage.getStorageSpace() + delta);
}
}
private void verifyQuotaByStorageType(EnumCounters<StorageType> typeDelta)
throws QuotaByStorageTypeExceededException {
if (!isQuotaByStorageTypeSet()) {
return;
}
for (StorageType t: StorageType.getTypesSupportingQuota()) {
if (!isQuotaByStorageTypeSet(t)) {
continue;
}
if (Quota.isViolated(quota.getTypeSpace(t), usage.getTypeSpace(t),
typeDelta.get(t))) {
throw new QuotaByStorageTypeExceededException(
quota.getTypeSpace(t), usage.getTypeSpace(t) + typeDelta.get(t), t);
}
}
}
/**
* @throws QuotaExceededException if namespace, storagespace or storage type
* space quota is violated after applying the deltas.
*/
void verifyQuota(QuotaCounts counts) throws QuotaExceededException {
verifyNamespaceQuota(counts.getNameSpace());
verifyStoragespaceQuota(counts.getStorageSpace());
verifyQuotaByStorageType(counts.getTypeSpaces());
}
boolean isQuotaSet() {
return quota.anyNsSsCountGreaterOrEqual(0) ||
quota.anyTypeSpaceCountGreaterOrEqual(0);
}
boolean isQuotaByStorageTypeSet() {
return quota.anyTypeSpaceCountGreaterOrEqual(0);
}
boolean isQuotaByStorageTypeSet(StorageType t) {
return quota.getTypeSpace(t) >= 0;
}
private String namespaceString() {
return "namespace: " + (quota.getNameSpace() < 0? "-":
usage.getNameSpace() + "/" + quota.getNameSpace());
}
private String storagespaceString() {
return "storagespace: " + (quota.getStorageSpace() < 0? "-":
usage.getStorageSpace() + "/" + quota.getStorageSpace());
}
private String typeSpaceString() {
StringBuilder sb = new StringBuilder();
for (StorageType t : StorageType.getTypesSupportingQuota()) {
sb.append("StorageType: " + t +
(quota.getTypeSpace(t) < 0? "-":
usage.getTypeSpace(t) + "/" + usage.getTypeSpace(t)));
}
return sb.toString();
}
@Override
public String toString() {
return "Quota[" + namespaceString() + ", " + storagespaceString() +
", " + typeSpaceString() + "]";
}
}
| 9,874 | 34.142349 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageStorageInspector.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.File;
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
/**
* Interface responsible for inspecting a set of storage directories and devising
* a plan to load the namespace from them.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
abstract class FSImageStorageInspector {
/**
* Inspect the contents of the given storage directory.
*/
abstract void inspectDirectory(StorageDirectory sd) throws IOException;
/**
* @return false if any of the storage directories have an unfinalized upgrade
*/
abstract boolean isUpgradeFinalized();
/**
* Get the image files which should be loaded into the filesystem.
* @throws IOException if not enough files are available (eg no image found in any directory)
*/
abstract List<FSImageFile> getLatestImages() throws IOException;
/**
* Get the minimum tx id which should be loaded with this set of images.
*/
abstract long getMaxSeenTxId();
/**
* @return true if the directories are in such a state that the image should be re-saved
* following the load
*/
abstract boolean needToSave();
/**
* Record of an image that has been located and had its filename parsed.
*/
static class FSImageFile {
final StorageDirectory sd;
final long txId;
private final File file;
FSImageFile(StorageDirectory sd, File file, long txId) {
assert txId >= 0 || txId == HdfsServerConstants.INVALID_TXID
: "Invalid txid on " + file +": " + txId;
this.sd = sd;
this.txId = txId;
this.file = file;
}
File getFile() {
return file;
}
public long getCheckpointTxId() {
return txId;
}
@Override
public String toString() {
return String.format("FSImageFile(file=%s, cpktTxId=%019d)",
file.toString(), txId);
}
}
}
| 2,989 | 30.145833 | 95 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributes.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.server.namenode.INodeWithAdditionalFields.PermissionStatusFormat;
import org.apache.hadoop.hdfs.server.namenode.XAttrFeature;
/**
* The attributes of an inode.
*/
@InterfaceAudience.Private
public interface INodeAttributes {
public boolean isDirectory();
/**
* @return null if the local name is null;
* otherwise, return the local name byte array.
*/
public byte[] getLocalNameBytes();
/** @return the user name. */
public String getUserName();
/** @return the group name. */
public String getGroupName();
/** @return the permission. */
public FsPermission getFsPermission();
/** @return the permission as a short. */
public short getFsPermissionShort();
/** @return the permission information as a long. */
public long getPermissionLong();
/** @return the ACL feature. */
public AclFeature getAclFeature();
/** @return the XAttrs feature. */
public XAttrFeature getXAttrFeature();
/** @return the modification time. */
public long getModificationTime();
/** @return the access time. */
public long getAccessTime();
/** A read-only copy of the inode attributes. */
public static abstract class SnapshotCopy implements INodeAttributes {
private final byte[] name;
private final long permission;
private final AclFeature aclFeature;
private final long modificationTime;
private final long accessTime;
private XAttrFeature xAttrFeature;
SnapshotCopy(byte[] name, PermissionStatus permissions,
AclFeature aclFeature, long modificationTime, long accessTime,
XAttrFeature xAttrFeature) {
this.name = name;
this.permission = PermissionStatusFormat.toLong(permissions);
if (aclFeature != null) {
aclFeature = AclStorage.addAclFeature(aclFeature);
}
this.aclFeature = aclFeature;
this.modificationTime = modificationTime;
this.accessTime = accessTime;
this.xAttrFeature = xAttrFeature;
}
SnapshotCopy(INode inode) {
this.name = inode.getLocalNameBytes();
this.permission = inode.getPermissionLong();
if (inode.getAclFeature() != null) {
aclFeature = AclStorage.addAclFeature(inode.getAclFeature());
} else {
aclFeature = null;
}
this.modificationTime = inode.getModificationTime();
this.accessTime = inode.getAccessTime();
this.xAttrFeature = inode.getXAttrFeature();
}
@Override
public final byte[] getLocalNameBytes() {
return name;
}
@Override
public final String getUserName() {
return PermissionStatusFormat.getUser(permission);
}
@Override
public final String getGroupName() {
return PermissionStatusFormat.getGroup(permission);
}
@Override
public final FsPermission getFsPermission() {
return new FsPermission(getFsPermissionShort());
}
@Override
public final short getFsPermissionShort() {
return PermissionStatusFormat.getMode(permission);
}
@Override
public long getPermissionLong() {
return permission;
}
@Override
public AclFeature getAclFeature() {
return aclFeature;
}
@Override
public final long getModificationTime() {
return modificationTime;
}
@Override
public final long getAccessTime() {
return accessTime;
}
@Override
public final XAttrFeature getXAttrFeature() {
return xAttrFeature;
}
}
}
| 4,521 | 28.363636 | 95 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionFaultInjector.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException;
import com.google.common.annotations.VisibleForTesting;
/**
* Used to inject certain faults for testing.
*/
public class EncryptionFaultInjector {
@VisibleForTesting
public static EncryptionFaultInjector instance =
new EncryptionFaultInjector();
@VisibleForTesting
public static EncryptionFaultInjector getInstance() {
return instance;
}
@VisibleForTesting
public void startFileAfterGenerateKey() throws IOException {}
}
| 1,335 | 32.4 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.FileNotFoundException;
import java.io.PrintWriter;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.fs.PathIsNotDirectoryException;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.protocol.SnapshotException;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount;
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectorySnapshottableFeature;
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature;
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiffList;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.util.Diff.ListType;
import org.apache.hadoop.hdfs.util.ReadOnlyList;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import static org.apache.hadoop.hdfs.protocol.HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
/**
* Directory INode class.
*/
public class INodeDirectory extends INodeWithAdditionalFields
implements INodeDirectoryAttributes {
/** Cast INode to INodeDirectory. */
public static INodeDirectory valueOf(INode inode, Object path
) throws FileNotFoundException, PathIsNotDirectoryException {
if (inode == null) {
throw new FileNotFoundException("Directory does not exist: "
+ DFSUtil.path2String(path));
}
if (!inode.isDirectory()) {
throw new PathIsNotDirectoryException(DFSUtil.path2String(path));
}
return inode.asDirectory();
}
protected static final int DEFAULT_FILES_PER_DIRECTORY = 5;
final static byte[] ROOT_NAME = DFSUtil.string2Bytes("");
private List<INode> children = null;
/** constructor */
public INodeDirectory(long id, byte[] name, PermissionStatus permissions,
long mtime) {
super(id, name, permissions, mtime, 0L);
}
/**
* Copy constructor
* @param other The INodeDirectory to be copied
* @param adopt Indicate whether or not need to set the parent field of child
* INodes to the new node
* @param featuresToCopy any number of features to copy to the new node.
* The method will do a reference copy, not a deep copy.
*/
public INodeDirectory(INodeDirectory other, boolean adopt,
Feature... featuresToCopy) {
super(other);
this.children = other.children;
if (adopt && this.children != null) {
for (INode child : children) {
child.setParent(this);
}
}
this.features = featuresToCopy;
AclFeature aclFeature = getFeature(AclFeature.class);
if (aclFeature != null) {
// for the de-duplication of AclFeature
removeFeature(aclFeature);
addFeature(AclStorage.addAclFeature(aclFeature));
}
}
/** @return true unconditionally. */
@Override
public final boolean isDirectory() {
return true;
}
/** @return this object. */
@Override
public final INodeDirectory asDirectory() {
return this;
}
@Override
public byte getLocalStoragePolicyID() {
XAttrFeature f = getXAttrFeature();
ImmutableList<XAttr> xattrs = f == null ? ImmutableList.<XAttr> of() : f
.getXAttrs();
for (XAttr xattr : xattrs) {
if (BlockStoragePolicySuite.isStoragePolicyXAttr(xattr)) {
return (xattr.getValue())[0];
}
}
return BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
}
@Override
public byte getStoragePolicyID() {
byte id = getLocalStoragePolicyID();
if (id != BLOCK_STORAGE_POLICY_ID_UNSPECIFIED) {
return id;
}
// if it is unspecified, check its parent
return getParent() != null ? getParent().getStoragePolicyID() : BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
}
void setQuota(BlockStoragePolicySuite bsps, long nsQuota, long ssQuota, StorageType type) {
DirectoryWithQuotaFeature quota = getDirectoryWithQuotaFeature();
if (quota != null) {
// already has quota; so set the quota to the new values
if (type != null) {
quota.setQuota(ssQuota, type);
} else {
quota.setQuota(nsQuota, ssQuota);
}
if (!isQuotaSet() && !isRoot()) {
removeFeature(quota);
}
} else {
final QuotaCounts c = computeQuotaUsage(bsps);
DirectoryWithQuotaFeature.Builder builder =
new DirectoryWithQuotaFeature.Builder().nameSpaceQuota(nsQuota);
if (type != null) {
builder.typeQuota(type, ssQuota);
} else {
builder.storageSpaceQuota(ssQuota);
}
addDirectoryWithQuotaFeature(builder.build()).setSpaceConsumed(c);
}
}
@Override
public QuotaCounts getQuotaCounts() {
final DirectoryWithQuotaFeature q = getDirectoryWithQuotaFeature();
return q != null? q.getQuota(): super.getQuotaCounts();
}
@Override
public void addSpaceConsumed(QuotaCounts counts, boolean verify)
throws QuotaExceededException {
final DirectoryWithQuotaFeature q = getDirectoryWithQuotaFeature();
if (q != null) {
q.addSpaceConsumed(this, counts, verify);
} else {
addSpaceConsumed2Parent(counts, verify);
}
}
/**
* If the directory contains a {@link DirectoryWithQuotaFeature}, return it;
* otherwise, return null.
*/
public final DirectoryWithQuotaFeature getDirectoryWithQuotaFeature() {
return getFeature(DirectoryWithQuotaFeature.class);
}
/** Is this directory with quota? */
final boolean isWithQuota() {
return getDirectoryWithQuotaFeature() != null;
}
DirectoryWithQuotaFeature addDirectoryWithQuotaFeature(
DirectoryWithQuotaFeature q) {
Preconditions.checkState(!isWithQuota(), "Directory is already with quota");
addFeature(q);
return q;
}
int searchChildren(byte[] name) {
return children == null? -1: Collections.binarySearch(children, name);
}
public DirectoryWithSnapshotFeature addSnapshotFeature(
DirectoryDiffList diffs) {
Preconditions.checkState(!isWithSnapshot(),
"Directory is already with snapshot");
DirectoryWithSnapshotFeature sf = new DirectoryWithSnapshotFeature(diffs);
addFeature(sf);
return sf;
}
/**
* If feature list contains a {@link DirectoryWithSnapshotFeature}, return it;
* otherwise, return null.
*/
public final DirectoryWithSnapshotFeature getDirectoryWithSnapshotFeature() {
return getFeature(DirectoryWithSnapshotFeature.class);
}
/** Is this file has the snapshot feature? */
public final boolean isWithSnapshot() {
return getDirectoryWithSnapshotFeature() != null;
}
public DirectoryDiffList getDiffs() {
DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
return sf != null ? sf.getDiffs() : null;
}
@Override
public INodeDirectoryAttributes getSnapshotINode(int snapshotId) {
DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
return sf == null ? this : sf.getDiffs().getSnapshotINode(snapshotId, this);
}
@Override
public String toDetailString() {
DirectoryWithSnapshotFeature sf = this.getDirectoryWithSnapshotFeature();
return super.toDetailString() + (sf == null ? "" : ", " + sf.getDiffs());
}
public DirectorySnapshottableFeature getDirectorySnapshottableFeature() {
return getFeature(DirectorySnapshottableFeature.class);
}
public boolean isSnapshottable() {
return getDirectorySnapshottableFeature() != null;
}
public Snapshot getSnapshot(byte[] snapshotName) {
return getDirectorySnapshottableFeature().getSnapshot(snapshotName);
}
public void setSnapshotQuota(int snapshotQuota) {
getDirectorySnapshottableFeature().setSnapshotQuota(snapshotQuota);
}
public Snapshot addSnapshot(int id, String name) throws SnapshotException,
QuotaExceededException {
return getDirectorySnapshottableFeature().addSnapshot(this, id, name);
}
public Snapshot removeSnapshot(
ReclaimContext reclaimContext, String snapshotName)
throws SnapshotException {
return getDirectorySnapshottableFeature().removeSnapshot(
reclaimContext, this, snapshotName);
}
public void renameSnapshot(String path, String oldName, String newName)
throws SnapshotException {
getDirectorySnapshottableFeature().renameSnapshot(path, oldName, newName);
}
/** add DirectorySnapshottableFeature */
public void addSnapshottableFeature() {
Preconditions.checkState(!isSnapshottable(),
"this is already snapshottable, this=%s", this);
DirectoryWithSnapshotFeature s = this.getDirectoryWithSnapshotFeature();
final DirectorySnapshottableFeature snapshottable =
new DirectorySnapshottableFeature(s);
if (s != null) {
this.removeFeature(s);
}
this.addFeature(snapshottable);
}
/** remove DirectorySnapshottableFeature */
public void removeSnapshottableFeature() {
DirectorySnapshottableFeature s = getDirectorySnapshottableFeature();
Preconditions.checkState(s != null,
"The dir does not have snapshottable feature: this=%s", this);
this.removeFeature(s);
if (s.getDiffs().asList().size() > 0) {
// add a DirectoryWithSnapshotFeature back
DirectoryWithSnapshotFeature sf = new DirectoryWithSnapshotFeature(
s.getDiffs());
addFeature(sf);
}
}
/**
* Replace the given child with a new child. Note that we no longer need to
* replace an normal INodeDirectory or INodeFile into an
* INodeDirectoryWithSnapshot or INodeFileUnderConstruction. The only cases
* for child replacement is for reference nodes.
*/
public void replaceChild(INode oldChild, final INode newChild,
final INodeMap inodeMap) {
Preconditions.checkNotNull(children);
final int i = searchChildren(newChild.getLocalNameBytes());
Preconditions.checkState(i >= 0);
Preconditions.checkState(oldChild == children.get(i)
|| oldChild == children.get(i).asReference().getReferredINode()
.asReference().getReferredINode());
oldChild = children.get(i);
if (oldChild.isReference() && newChild.isReference()) {
// both are reference nodes, e.g., DstReference -> WithName
final INodeReference.WithCount withCount =
(WithCount) oldChild.asReference().getReferredINode();
withCount.removeReference(oldChild.asReference());
}
children.set(i, newChild);
// replace the instance in the created list of the diff list
DirectoryWithSnapshotFeature sf = this.getDirectoryWithSnapshotFeature();
if (sf != null) {
sf.getDiffs().replaceChild(ListType.CREATED, oldChild, newChild);
}
// update the inodeMap
if (inodeMap != null) {
inodeMap.put(newChild);
}
}
INodeReference.WithName replaceChild4ReferenceWithName(INode oldChild,
int latestSnapshotId) {
Preconditions.checkArgument(latestSnapshotId != Snapshot.CURRENT_STATE_ID);
if (oldChild instanceof INodeReference.WithName) {
return (INodeReference.WithName)oldChild;
}
final INodeReference.WithCount withCount;
if (oldChild.isReference()) {
Preconditions.checkState(oldChild instanceof INodeReference.DstReference);
withCount = (INodeReference.WithCount) oldChild.asReference()
.getReferredINode();
} else {
withCount = new INodeReference.WithCount(null, oldChild);
}
final INodeReference.WithName ref = new INodeReference.WithName(this,
withCount, oldChild.getLocalNameBytes(), latestSnapshotId);
replaceChild(oldChild, ref, null);
return ref;
}
@Override
public void recordModification(int latestSnapshotId) {
if (isInLatestSnapshot(latestSnapshotId)
&& !shouldRecordInSrcSnapshot(latestSnapshotId)) {
// add snapshot feature if necessary
DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
if (sf == null) {
sf = addSnapshotFeature(null);
}
// record self in the diff list if necessary
sf.getDiffs().saveSelf2Snapshot(latestSnapshotId, this, null);
}
}
/**
* Save the child to the latest snapshot.
*
* @return the child inode, which may be replaced.
*/
public INode saveChild2Snapshot(final INode child, final int latestSnapshotId,
final INode snapshotCopy) {
if (latestSnapshotId == Snapshot.CURRENT_STATE_ID) {
return child;
}
// add snapshot feature if necessary
DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
if (sf == null) {
sf = this.addSnapshotFeature(null);
}
return sf.saveChild2Snapshot(this, child, latestSnapshotId, snapshotCopy);
}
/**
* @param name the name of the child
* @param snapshotId
* if it is not {@link Snapshot#CURRENT_STATE_ID}, get the result
* from the corresponding snapshot; otherwise, get the result from
* the current directory.
* @return the child inode.
*/
public INode getChild(byte[] name, int snapshotId) {
DirectoryWithSnapshotFeature sf;
if (snapshotId == Snapshot.CURRENT_STATE_ID ||
(sf = getDirectoryWithSnapshotFeature()) == null) {
ReadOnlyList<INode> c = getCurrentChildrenList();
final int i = ReadOnlyList.Util.binarySearch(c, name);
return i < 0 ? null : c.get(i);
}
return sf.getChild(this, name, snapshotId);
}
/**
* Search for the given INode in the children list and the deleted lists of
* snapshots.
* @return {@link Snapshot#CURRENT_STATE_ID} if the inode is in the children
* list; {@link Snapshot#NO_SNAPSHOT_ID} if the inode is neither in the
* children list nor in any snapshot; otherwise the snapshot id of the
* corresponding snapshot diff list.
*/
public int searchChild(INode inode) {
INode child = getChild(inode.getLocalNameBytes(), Snapshot.CURRENT_STATE_ID);
if (child != inode) {
// inode is not in parent's children list, thus inode must be in
// snapshot. identify the snapshot id and later add it into the path
DirectoryDiffList diffs = getDiffs();
if (diffs == null) {
return Snapshot.NO_SNAPSHOT_ID;
}
return diffs.findSnapshotDeleted(inode);
} else {
return Snapshot.CURRENT_STATE_ID;
}
}
/**
* @param snapshotId
* if it is not {@link Snapshot#CURRENT_STATE_ID}, get the result
* from the corresponding snapshot; otherwise, get the result from
* the current directory.
* @return the current children list if the specified snapshot is null;
* otherwise, return the children list corresponding to the snapshot.
* Note that the returned list is never null.
*/
public ReadOnlyList<INode> getChildrenList(final int snapshotId) {
DirectoryWithSnapshotFeature sf;
if (snapshotId == Snapshot.CURRENT_STATE_ID
|| (sf = this.getDirectoryWithSnapshotFeature()) == null) {
return getCurrentChildrenList();
}
return sf.getChildrenList(this, snapshotId);
}
private ReadOnlyList<INode> getCurrentChildrenList() {
return children == null ? ReadOnlyList.Util.<INode> emptyList()
: ReadOnlyList.Util.asReadOnlyList(children);
}
/**
* Given a child's name, return the index of the next child
*
* @param name a child's name
* @return the index of the next child
*/
static int nextChild(ReadOnlyList<INode> children, byte[] name) {
if (name.length == 0) { // empty name
return 0;
}
int nextPos = ReadOnlyList.Util.binarySearch(children, name) + 1;
if (nextPos >= 0) {
return nextPos;
}
return -nextPos;
}
/**
* Remove the specified child from this directory.
*/
public boolean removeChild(INode child, int latestSnapshotId) {
if (isInLatestSnapshot(latestSnapshotId)) {
// create snapshot feature if necessary
DirectoryWithSnapshotFeature sf = this.getDirectoryWithSnapshotFeature();
if (sf == null) {
sf = this.addSnapshotFeature(null);
}
return sf.removeChild(this, child, latestSnapshotId);
}
return removeChild(child);
}
/**
* Remove the specified child from this directory.
* The basic remove method which actually calls children.remove(..).
*
* @param child the child inode to be removed
*
* @return true if the child is removed; false if the child is not found.
*/
public boolean removeChild(final INode child) {
final int i = searchChildren(child.getLocalNameBytes());
if (i < 0) {
return false;
}
final INode removed = children.remove(i);
Preconditions.checkState(removed == child);
return true;
}
/**
* Add a child inode to the directory.
*
* @param node INode to insert
* @param setModTime set modification time for the parent node
* not needed when replaying the addition and
* the parent already has the proper mod time
* @return false if the child with this name already exists;
* otherwise, return true;
*/
public boolean addChild(INode node, final boolean setModTime,
final int latestSnapshotId) throws QuotaExceededException {
final int low = searchChildren(node.getLocalNameBytes());
if (low >= 0) {
return false;
}
if (isInLatestSnapshot(latestSnapshotId)) {
// create snapshot feature if necessary
DirectoryWithSnapshotFeature sf = this.getDirectoryWithSnapshotFeature();
if (sf == null) {
sf = this.addSnapshotFeature(null);
}
return sf.addChild(this, node, setModTime, latestSnapshotId);
}
addChild(node, low);
if (setModTime) {
// update modification time of the parent directory
updateModificationTime(node.getModificationTime(), latestSnapshotId);
}
return true;
}
public boolean addChild(INode node) {
final int low = searchChildren(node.getLocalNameBytes());
if (low >= 0) {
return false;
}
addChild(node, low);
return true;
}
/**
* Add the node to the children list at the given insertion point.
* The basic add method which actually calls children.add(..).
*/
private void addChild(final INode node, final int insertionPoint) {
if (children == null) {
children = new ArrayList<>(DEFAULT_FILES_PER_DIRECTORY);
}
node.setParent(this);
children.add(-insertionPoint - 1, node);
if (node.getGroupName() == null) {
node.setGroup(getGroupName());
}
}
@Override
public QuotaCounts computeQuotaUsage(BlockStoragePolicySuite bsps,
byte blockStoragePolicyId, boolean useCache, int lastSnapshotId) {
final DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
QuotaCounts counts = new QuotaCounts.Builder().build();
// we are computing the quota usage for a specific snapshot here, i.e., the
// computation only includes files/directories that exist at the time of the
// given snapshot
if (sf != null && lastSnapshotId != Snapshot.CURRENT_STATE_ID
&& !(useCache && isQuotaSet())) {
ReadOnlyList<INode> childrenList = getChildrenList(lastSnapshotId);
for (INode child : childrenList) {
final byte childPolicyId = child.getStoragePolicyIDForQuota(
blockStoragePolicyId);
counts.add(child.computeQuotaUsage(bsps, childPolicyId, useCache,
lastSnapshotId));
}
counts.addNameSpace(1);
return counts;
}
// compute the quota usage in the scope of the current directory tree
final DirectoryWithQuotaFeature q = getDirectoryWithQuotaFeature();
if (useCache && q != null && q.isQuotaSet()) { // use the cached quota
return q.AddCurrentSpaceUsage(counts);
} else {
useCache = q != null && !q.isQuotaSet() ? false : useCache;
return computeDirectoryQuotaUsage(bsps, blockStoragePolicyId, counts,
useCache, lastSnapshotId);
}
}
private QuotaCounts computeDirectoryQuotaUsage(BlockStoragePolicySuite bsps,
byte blockStoragePolicyId, QuotaCounts counts, boolean useCache,
int lastSnapshotId) {
if (children != null) {
for (INode child : children) {
final byte childPolicyId = child.getStoragePolicyIDForQuota(
blockStoragePolicyId);
counts.add(child.computeQuotaUsage(bsps, childPolicyId, useCache,
lastSnapshotId));
}
}
return computeQuotaUsage4CurrentDirectory(bsps, blockStoragePolicyId,
counts);
}
/** Add quota usage for this inode excluding children. */
public QuotaCounts computeQuotaUsage4CurrentDirectory(
BlockStoragePolicySuite bsps, byte storagePolicyId, QuotaCounts counts) {
counts.addNameSpace(1);
// include the diff list
DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
if (sf != null) {
counts.add(sf.computeQuotaUsage4CurrentDirectory(bsps, storagePolicyId));
}
return counts;
}
@Override
public ContentSummaryComputationContext computeContentSummary(
ContentSummaryComputationContext summary) {
final DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
if (sf != null) {
sf.computeContentSummary4Snapshot(summary.getBlockStoragePolicySuite(),
summary.getCounts());
}
final DirectoryWithQuotaFeature q = getDirectoryWithQuotaFeature();
if (q != null) {
return q.computeContentSummary(this, summary);
} else {
return computeDirectoryContentSummary(summary, Snapshot.CURRENT_STATE_ID);
}
}
protected ContentSummaryComputationContext computeDirectoryContentSummary(
ContentSummaryComputationContext summary, int snapshotId) {
ReadOnlyList<INode> childrenList = getChildrenList(snapshotId);
// Explicit traversing is done to enable repositioning after relinquishing
// and reacquiring locks.
for (int i = 0; i < childrenList.size(); i++) {
INode child = childrenList.get(i);
byte[] childName = child.getLocalNameBytes();
long lastYieldCount = summary.getYieldCount();
child.computeContentSummary(summary);
// Check whether the computation was paused in the subtree.
// The counts may be off, but traversing the rest of children
// should be made safe.
if (lastYieldCount == summary.getYieldCount()) {
continue;
}
// The locks were released and reacquired. Check parent first.
if (getParent() == null) {
// Stop further counting and return whatever we have so far.
break;
}
// Obtain the children list again since it may have been modified.
childrenList = getChildrenList(snapshotId);
// Reposition in case the children list is changed. Decrement by 1
// since it will be incremented when loops.
i = nextChild(childrenList, childName) - 1;
}
// Increment the directory count for this directory.
summary.getCounts().addContent(Content.DIRECTORY, 1);
// Relinquish and reacquire locks if necessary.
summary.yield();
return summary;
}
/**
* This method is usually called by the undo section of rename.
*
* Before calling this function, in the rename operation, we replace the
* original src node (of the rename operation) with a reference node (WithName
* instance) in both the children list and a created list, delete the
* reference node from the children list, and add it to the corresponding
* deleted list.
*
* To undo the above operations, we have the following steps in particular:
*
* <pre>
* 1) remove the WithName node from the deleted list (if it exists)
* 2) replace the WithName node in the created list with srcChild
* 3) add srcChild back as a child of srcParent. Note that we already add
* the node into the created list of a snapshot diff in step 2, we do not need
* to add srcChild to the created list of the latest snapshot.
* </pre>
*
* We do not need to update quota usage because the old child is in the
* deleted list before.
*
* @param oldChild
* The reference node to be removed/replaced
* @param newChild
* The node to be added back
* @throws QuotaExceededException should not throw this exception
*/
public void undoRename4ScrParent(final INodeReference oldChild,
final INode newChild) throws QuotaExceededException {
DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
assert sf != null : "Directory does not have snapshot feature";
sf.getDiffs().removeChild(ListType.DELETED, oldChild);
sf.getDiffs().replaceChild(ListType.CREATED, oldChild, newChild);
addChild(newChild, true, Snapshot.CURRENT_STATE_ID);
}
/**
* Undo the rename operation for the dst tree, i.e., if the rename operation
* (with OVERWRITE option) removes a file/dir from the dst tree, add it back
* and delete possible record in the deleted list.
*/
public void undoRename4DstParent(final BlockStoragePolicySuite bsps,
final INode deletedChild,
int latestSnapshotId) throws QuotaExceededException {
DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
assert sf != null : "Directory does not have snapshot feature";
boolean removeDeletedChild = sf.getDiffs().removeChild(ListType.DELETED,
deletedChild);
int sid = removeDeletedChild ? Snapshot.CURRENT_STATE_ID : latestSnapshotId;
final boolean added = addChild(deletedChild, true, sid);
// update quota usage if adding is successfully and the old child has not
// been stored in deleted list before
if (added && !removeDeletedChild) {
final QuotaCounts counts = deletedChild.computeQuotaUsage(bsps);
addSpaceConsumed(counts, false);
}
}
/** Set the children list to null. */
public void clearChildren() {
this.children = null;
}
@Override
public void clear() {
super.clear();
clearChildren();
}
/** Call cleanSubtree(..) recursively down the subtree. */
public void cleanSubtreeRecursively(
ReclaimContext reclaimContext, final int snapshot, int prior,
final Map<INode, INode> excludedNodes) {
// in case of deletion snapshot, since this call happens after we modify
// the diff list, the snapshot to be deleted has been combined or renamed
// to its latest previous snapshot. (besides, we also need to consider nodes
// created after prior but before snapshot. this will be done in
// DirectoryWithSnapshotFeature)
int s = snapshot != Snapshot.CURRENT_STATE_ID
&& prior != Snapshot.NO_SNAPSHOT_ID ? prior : snapshot;
for (INode child : getChildrenList(s)) {
if (snapshot == Snapshot.CURRENT_STATE_ID || excludedNodes == null ||
!excludedNodes.containsKey(child)) {
child.cleanSubtree(reclaimContext, snapshot, prior);
}
}
}
@Override
public void destroyAndCollectBlocks(ReclaimContext reclaimContext) {
reclaimContext.quotaDelta().add(
new QuotaCounts.Builder().nameSpace(1).build());
final DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
if (sf != null) {
sf.clear(reclaimContext, this);
}
for (INode child : getChildrenList(Snapshot.CURRENT_STATE_ID)) {
child.destroyAndCollectBlocks(reclaimContext);
}
if (getAclFeature() != null) {
AclStorage.removeAclFeature(getAclFeature());
}
clear();
reclaimContext.removedINodes.add(this);
}
@Override
public void cleanSubtree(ReclaimContext reclaimContext, final int snapshotId,
int priorSnapshotId) {
DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
// there is snapshot data
if (sf != null) {
sf.cleanDirectory(reclaimContext, this, snapshotId, priorSnapshotId);
} else {
// there is no snapshot data
if (priorSnapshotId == Snapshot.NO_SNAPSHOT_ID &&
snapshotId == Snapshot.CURRENT_STATE_ID) {
// destroy the whole subtree and collect blocks that should be deleted
destroyAndCollectBlocks(reclaimContext);
} else {
// make a copy the quota delta
QuotaCounts old = reclaimContext.quotaDelta().getCountsCopy();
// process recursively down the subtree
cleanSubtreeRecursively(reclaimContext, snapshotId, priorSnapshotId,
null);
QuotaCounts current = reclaimContext.quotaDelta().getCountsCopy();
current.subtract(old);
if (isQuotaSet()) {
reclaimContext.quotaDelta().addQuotaDirUpdate(this, current);
}
}
}
}
/**
* Compare the metadata with another INodeDirectory
*/
@Override
public boolean metadataEquals(INodeDirectoryAttributes other) {
return other != null
&& getQuotaCounts().equals(other.getQuotaCounts())
&& getPermissionLong() == other.getPermissionLong()
&& getAclFeature() == other.getAclFeature()
&& getXAttrFeature() == other.getXAttrFeature();
}
/*
* The following code is to dump the tree recursively for testing.
*
* \- foo (INodeDirectory@33dd2717)
* \- sub1 (INodeDirectory@442172)
* +- file1 (INodeFile@78392d4)
* +- file2 (INodeFile@78392d5)
* +- sub11 (INodeDirectory@8400cff)
* \- file3 (INodeFile@78392d6)
* \- z_file4 (INodeFile@45848712)
*/
static final String DUMPTREE_EXCEPT_LAST_ITEM = "+-";
static final String DUMPTREE_LAST_ITEM = "\\-";
@VisibleForTesting
@Override
public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix,
final int snapshot) {
super.dumpTreeRecursively(out, prefix, snapshot);
out.print(", childrenSize=" + getChildrenList(snapshot).size());
final DirectoryWithQuotaFeature q = getDirectoryWithQuotaFeature();
if (q != null) {
out.print(", " + q);
}
if (this instanceof Snapshot.Root) {
out.print(", snapshotId=" + snapshot);
}
out.println();
if (prefix.length() >= 2) {
prefix.setLength(prefix.length() - 2);
prefix.append(" ");
}
dumpTreeRecursively(out, prefix, new Iterable<SnapshotAndINode>() {
final Iterator<INode> i = getChildrenList(snapshot).iterator();
@Override
public Iterator<SnapshotAndINode> iterator() {
return new Iterator<SnapshotAndINode>() {
@Override
public boolean hasNext() {
return i.hasNext();
}
@Override
public SnapshotAndINode next() {
return new SnapshotAndINode(snapshot, i.next());
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
};
}
});
final DirectorySnapshottableFeature s = getDirectorySnapshottableFeature();
if (s != null) {
s.dumpTreeRecursively(this, out, prefix, snapshot);
}
}
/**
* Dump the given subtrees.
* @param prefix The prefix string that each line should print.
* @param subs The subtrees.
*/
@VisibleForTesting
public static void dumpTreeRecursively(PrintWriter out,
StringBuilder prefix, Iterable<SnapshotAndINode> subs) {
if (subs != null) {
for(final Iterator<SnapshotAndINode> i = subs.iterator(); i.hasNext();) {
final SnapshotAndINode pair = i.next();
prefix.append(i.hasNext()? DUMPTREE_EXCEPT_LAST_ITEM: DUMPTREE_LAST_ITEM);
pair.inode.dumpTreeRecursively(out, prefix, pair.snapshotId);
prefix.setLength(prefix.length() - 2);
}
}
}
/** A pair of Snapshot and INode objects. */
public static class SnapshotAndINode {
public final int snapshotId;
public final INode inode;
public SnapshotAndINode(int snapshot, INode inode) {
this.snapshotId = snapshot;
this.inode = inode;
}
}
public final int getChildrenNum(final int snapshotId) {
return getChildrenList(snapshotId).size();
}
}
| 33,269 | 34.889968 | 104 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import com.google.common.base.Preconditions;
import org.apache.commons.io.Charsets;
import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.InvalidPathException;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.AclException;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import java.io.IOException;
import java.util.AbstractMap;
import java.util.List;
import java.util.Map;
import static org.apache.hadoop.util.Time.now;
class FSDirMkdirOp {
static HdfsFileStatus mkdirs(FSNamesystem fsn, String src,
PermissionStatus permissions, boolean createParent) throws IOException {
FSDirectory fsd = fsn.getFSDirectory();
if(NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("DIR* NameSystem.mkdirs: " + src);
}
if (!DFSUtil.isValidName(src)) {
throw new InvalidPathException(src);
}
FSPermissionChecker pc = fsd.getPermissionChecker();
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
fsd.writeLock();
try {
src = fsd.resolvePath(pc, src, pathComponents);
INodesInPath iip = fsd.getINodesInPath4Write(src);
if (fsd.isPermissionEnabled()) {
fsd.checkTraverse(pc, iip);
}
final INode lastINode = iip.getLastINode();
if (lastINode != null && lastINode.isFile()) {
throw new FileAlreadyExistsException("Path is not a directory: " + src);
}
INodesInPath existing = lastINode != null ? iip : iip.getExistingINodes();
if (lastINode == null) {
if (fsd.isPermissionEnabled()) {
fsd.checkAncestorAccess(pc, iip, FsAction.WRITE);
}
if (!createParent) {
fsd.verifyParentDir(iip, src);
}
// validate that we have enough inodes. This is, at best, a
// heuristic because the mkdirs() operation might need to
// create multiple inodes.
fsn.checkFsObjectLimit();
List<String> nonExisting = iip.getPath(existing.length(),
iip.length() - existing.length());
int length = nonExisting.size();
if (length > 1) {
List<String> ancestors = nonExisting.subList(0, length - 1);
// Ensure that the user can traversal the path by adding implicit
// u+wx permission to all ancestor directories
existing = createChildrenDirectories(fsd, existing, ancestors,
addImplicitUwx(permissions, permissions));
if (existing == null) {
throw new IOException("Failed to create directory: " + src);
}
}
if ((existing = createChildrenDirectories(fsd, existing,
nonExisting.subList(length - 1, length), permissions)) == null) {
throw new IOException("Failed to create directory: " + src);
}
}
return fsd.getAuditFileInfo(existing);
} finally {
fsd.writeUnlock();
}
}
/**
* For a given absolute path, create all ancestors as directories along the
* path. All ancestors inherit their parent's permission plus an implicit
* u+wx permission. This is used by create() and addSymlink() for
* implicitly creating all directories along the path.
*
* For example, path="/foo/bar/spam", "/foo" is an existing directory,
* "/foo/bar" is not existing yet, the function will create directory bar.
*
* @return a tuple which contains both the new INodesInPath (with all the
* existing and newly created directories) and the last component in the
* relative path. Or return null if there are errors.
*/
static Map.Entry<INodesInPath, String> createAncestorDirectories(
FSDirectory fsd, INodesInPath iip, PermissionStatus permission)
throws IOException {
final String last = new String(iip.getLastLocalName(), Charsets.UTF_8);
INodesInPath existing = iip.getExistingINodes();
List<String> children = iip.getPath(existing.length(),
iip.length() - existing.length());
int size = children.size();
if (size > 1) { // otherwise all ancestors have been created
List<String> directories = children.subList(0, size - 1);
INode parentINode = existing.getLastINode();
// Ensure that the user can traversal the path by adding implicit
// u+wx permission to all ancestor directories
existing = createChildrenDirectories(fsd, existing, directories,
addImplicitUwx(parentINode.getPermissionStatus(), permission));
if (existing == null) {
return null;
}
}
return new AbstractMap.SimpleImmutableEntry<>(existing, last);
}
/**
* Create the directory {@code parent} / {@code children} and all ancestors
* along the path.
*
* @param fsd FSDirectory
* @param existing The INodesInPath instance containing all the existing
* ancestral INodes
* @param children The relative path from the parent towards children,
* starting with "/"
* @param perm the permission of the directory. Note that all ancestors
* created along the path has implicit {@code u+wx} permissions.
*
* @return {@link INodesInPath} which contains all inodes to the
* target directory, After the execution parentPath points to the path of
* the returned INodesInPath. The function return null if the operation has
* failed.
*/
private static INodesInPath createChildrenDirectories(FSDirectory fsd,
INodesInPath existing, List<String> children, PermissionStatus perm)
throws IOException {
assert fsd.hasWriteLock();
for (String component : children) {
existing = createSingleDirectory(fsd, existing, component, perm);
if (existing == null) {
return null;
}
}
return existing;
}
static void mkdirForEditLog(FSDirectory fsd, long inodeId, String src,
PermissionStatus permissions, List<AclEntry> aclEntries, long timestamp)
throws QuotaExceededException, UnresolvedLinkException, AclException,
FileAlreadyExistsException {
assert fsd.hasWriteLock();
INodesInPath iip = fsd.getINodesInPath(src, false);
final byte[] localName = iip.getLastLocalName();
final INodesInPath existing = iip.getParentINodesInPath();
Preconditions.checkState(existing.getLastINode() != null);
unprotectedMkdir(fsd, inodeId, existing, localName, permissions, aclEntries,
timestamp);
}
private static INodesInPath createSingleDirectory(FSDirectory fsd,
INodesInPath existing, String localName, PermissionStatus perm)
throws IOException {
assert fsd.hasWriteLock();
existing = unprotectedMkdir(fsd, fsd.allocateNewInodeId(), existing,
localName.getBytes(Charsets.UTF_8), perm, null, now());
if (existing == null) {
return null;
}
final INode newNode = existing.getLastINode();
// Directory creation also count towards FilesCreated
// to match count of FilesDeleted metric.
NameNode.getNameNodeMetrics().incrFilesCreated();
String cur = existing.getPath();
fsd.getEditLog().logMkDir(cur, newNode);
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("mkdirs: created directory " + cur);
}
return existing;
}
private static PermissionStatus addImplicitUwx(PermissionStatus parentPerm,
PermissionStatus perm) {
FsPermission p = parentPerm.getPermission();
FsPermission ancestorPerm = new FsPermission(
p.getUserAction().or(FsAction.WRITE_EXECUTE),
p.getGroupAction(),
p.getOtherAction());
return new PermissionStatus(perm.getUserName(), perm.getGroupName(),
ancestorPerm);
}
/**
* create a directory at path specified by parent
*/
private static INodesInPath unprotectedMkdir(FSDirectory fsd, long inodeId,
INodesInPath parent, byte[] name, PermissionStatus permission,
List<AclEntry> aclEntries, long timestamp)
throws QuotaExceededException, AclException, FileAlreadyExistsException {
assert fsd.hasWriteLock();
assert parent.getLastINode() != null;
if (!parent.getLastINode().isDirectory()) {
throw new FileAlreadyExistsException("Parent path is not a directory: " +
parent.getPath() + " " + DFSUtil.bytes2String(name));
}
final INodeDirectory dir = new INodeDirectory(inodeId, name, permission,
timestamp);
INodesInPath iip = fsd.addLastINode(parent, dir, true);
if (iip != null && aclEntries != null) {
AclStorage.updateINodeAcl(dir, aclEntries, Snapshot.CURRENT_STATE_ID);
}
return iip;
}
}
| 9,850 | 39.208163 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.util.Time.monotonicNow;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.PriorityQueue;
import java.util.SortedMap;
import java.util.TreeMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.util.Daemon;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
/**
* LeaseManager does the lease housekeeping for writing on files.
* This class also provides useful static methods for lease recovery.
*
* Lease Recovery Algorithm
* 1) Namenode retrieves lease information
* 2) For each file f in the lease, consider the last block b of f
* 2.1) Get the datanodes which contains b
* 2.2) Assign one of the datanodes as the primary datanode p
* 2.3) p obtains a new generation stamp from the namenode
* 2.4) p gets the block info from each datanode
* 2.5) p computes the minimum block length
* 2.6) p updates the datanodes, which have a valid generation stamp,
* with the new generation stamp and the minimum block length
* 2.7) p acknowledges the namenode the update results
* 2.8) Namenode updates the BlockInfo
* 2.9) Namenode removes f from the lease
* and removes the lease once all files have been removed
* 2.10) Namenode commit changes to edit log
*/
@InterfaceAudience.Private
public class LeaseManager {
public static final Log LOG = LogFactory.getLog(LeaseManager.class);
private final FSNamesystem fsnamesystem;
private long softLimit = HdfsServerConstants.LEASE_SOFTLIMIT_PERIOD;
private long hardLimit = HdfsServerConstants.LEASE_HARDLIMIT_PERIOD;
//
// Used for handling lock-leases
// Mapping: leaseHolder -> Lease
//
private final SortedMap<String, Lease> leases = new TreeMap<>();
// Set of: Lease
private final PriorityQueue<Lease> sortedLeases = new PriorityQueue<>(512,
new Comparator<Lease>() {
@Override
public int compare(Lease o1, Lease o2) {
return Long.signum(o1.getLastUpdate() - o2.getLastUpdate());
}
});
// INodeID -> Lease
private final HashMap<Long, Lease> leasesById = new HashMap<>();
private Daemon lmthread;
private volatile boolean shouldRunMonitor;
LeaseManager(FSNamesystem fsnamesystem) {this.fsnamesystem = fsnamesystem;}
Lease getLease(String holder) {
return leases.get(holder);
}
/**
* This method iterates through all the leases and counts the number of blocks
* which are not COMPLETE. The FSNamesystem read lock MUST be held before
* calling this method.
*/
synchronized long getNumUnderConstructionBlocks() {
assert this.fsnamesystem.hasReadLock() : "The FSNamesystem read lock wasn't"
+ "acquired before counting under construction blocks";
long numUCBlocks = 0;
for (Long id : getINodeIdWithLeases()) {
final INodeFile cons = fsnamesystem.getFSDirectory().getInode(id).asFile();
Preconditions.checkState(cons.isUnderConstruction());
BlockInfo[] blocks = cons.getBlocks();
if(blocks == null) {
continue;
}
for(BlockInfo b : blocks) {
if(!b.isComplete())
numUCBlocks++;
}
}
LOG.info("Number of blocks under construction: " + numUCBlocks);
return numUCBlocks;
}
Collection<Long> getINodeIdWithLeases() {return leasesById.keySet();}
/** @return the lease containing src */
public synchronized Lease getLease(INodeFile src) {return leasesById.get(src.getId());}
/** @return the number of leases currently in the system */
@VisibleForTesting
public synchronized int countLease() {
return sortedLeases.size();
}
/** @return the number of paths contained in all leases */
synchronized long countPath() {
return leasesById.size();
}
/**
* Adds (or re-adds) the lease for the specified file.
*/
synchronized Lease addLease(String holder, long inodeId) {
Lease lease = getLease(holder);
if (lease == null) {
lease = new Lease(holder);
leases.put(holder, lease);
sortedLeases.add(lease);
} else {
renewLease(lease);
}
leasesById.put(inodeId, lease);
lease.files.add(inodeId);
return lease;
}
/**
* Remove the specified lease and src.
*/
private synchronized void removeLease(Lease lease, long inodeId) {
leasesById.remove(inodeId);
if (!lease.removeFile(inodeId)) {
if (LOG.isDebugEnabled()) {
LOG.debug("inode " + inodeId + " not found in lease.files (=" + lease
+ ")");
}
}
if (!lease.hasFiles()) {
leases.remove(lease.holder);
if (!sortedLeases.remove(lease)) {
LOG.error(lease + " not found in sortedLeases");
}
}
}
/**
* Remove the lease for the specified holder and src
*/
synchronized void removeLease(String holder, INodeFile src) {
Lease lease = getLease(holder);
if (lease != null) {
removeLease(lease, src.getId());
} else {
LOG.warn("Removing non-existent lease! holder=" + holder +
" src=" + src.getFullPathName());
}
}
synchronized void removeAllLeases() {
sortedLeases.clear();
leasesById.clear();
leases.clear();
}
/**
* Reassign lease for file src to the new holder.
*/
synchronized Lease reassignLease(Lease lease, INodeFile src,
String newHolder) {
assert newHolder != null : "new lease holder is null";
if (lease != null) {
removeLease(lease, src.getId());
}
return addLease(newHolder, src.getId());
}
/**
* Renew the lease(s) held by the given client
*/
synchronized void renewLease(String holder) {
renewLease(getLease(holder));
}
synchronized void renewLease(Lease lease) {
if (lease != null) {
sortedLeases.remove(lease);
lease.renew();
sortedLeases.add(lease);
}
}
/**
* Renew all of the currently open leases.
*/
synchronized void renewAllLeases() {
for (Lease l : leases.values()) {
renewLease(l);
}
}
/************************************************************
* A Lease governs all the locks held by a single client.
* For each client there's a corresponding lease, whose
* timestamp is updated when the client periodically
* checks in. If the client dies and allows its lease to
* expire, all the corresponding locks can be released.
*************************************************************/
class Lease {
private final String holder;
private long lastUpdate;
private final HashSet<Long> files = new HashSet<>();
/** Only LeaseManager object can create a lease */
private Lease(String holder) {
this.holder = holder;
renew();
}
/** Only LeaseManager object can renew a lease */
private void renew() {
this.lastUpdate = monotonicNow();
}
/** @return true if the Hard Limit Timer has expired */
public boolean expiredHardLimit() {
return monotonicNow() - lastUpdate > hardLimit;
}
/** @return true if the Soft Limit Timer has expired */
public boolean expiredSoftLimit() {
return monotonicNow() - lastUpdate > softLimit;
}
/** Does this lease contain any path? */
boolean hasFiles() {return !files.isEmpty();}
boolean removeFile(long inodeId) {
return files.remove(inodeId);
}
@Override
public String toString() {
return "[Lease. Holder: " + holder
+ ", pending creates: " + files.size() + "]";
}
@Override
public int hashCode() {
return holder.hashCode();
}
private Collection<Long> getFiles() {
return Collections.unmodifiableCollection(files);
}
String getHolder() {
return holder;
}
@VisibleForTesting
long getLastUpdate() {
return lastUpdate;
}
}
@VisibleForTesting
synchronized void removeLeases(Collection<Long> inodes) {
for (long inode : inodes) {
Lease lease = leasesById.get(inode);
if (lease != null) {
removeLease(lease, inode);
}
}
}
public void setLeasePeriod(long softLimit, long hardLimit) {
this.softLimit = softLimit;
this.hardLimit = hardLimit;
}
/******************************************************
* Monitor checks for leases that have expired,
* and disposes of them.
******************************************************/
class Monitor implements Runnable {
final String name = getClass().getSimpleName();
/** Check leases periodically. */
@Override
public void run() {
for(; shouldRunMonitor && fsnamesystem.isRunning(); ) {
boolean needSync = false;
try {
fsnamesystem.writeLockInterruptibly();
try {
if (!fsnamesystem.isInSafeMode()) {
needSync = checkLeases();
}
} finally {
fsnamesystem.writeUnlock();
// lease reassignments should to be sync'ed.
if (needSync) {
fsnamesystem.getEditLog().logSync();
}
}
Thread.sleep(HdfsServerConstants.NAMENODE_LEASE_RECHECK_INTERVAL);
} catch(InterruptedException ie) {
if (LOG.isDebugEnabled()) {
LOG.debug(name + " is interrupted", ie);
}
} catch(Throwable e) {
LOG.warn("Unexpected throwable: ", e);
}
}
}
}
/** Check the leases beginning from the oldest.
* @return true is sync is needed.
*/
@VisibleForTesting
synchronized boolean checkLeases() {
boolean needSync = false;
assert fsnamesystem.hasWriteLock();
while(!sortedLeases.isEmpty() && sortedLeases.peek().expiredHardLimit()) {
Lease leaseToCheck = sortedLeases.poll();
LOG.info(leaseToCheck + " has expired hard limit");
final List<Long> removing = new ArrayList<>();
// need to create a copy of the oldest lease files, because
// internalReleaseLease() removes files corresponding to empty files,
// i.e. it needs to modify the collection being iterated over
// causing ConcurrentModificationException
Collection<Long> files = leaseToCheck.getFiles();
Long[] leaseINodeIds = files.toArray(new Long[files.size()]);
FSDirectory fsd = fsnamesystem.getFSDirectory();
String p = null;
for(Long id : leaseINodeIds) {
try {
INodesInPath iip = INodesInPath.fromINode(fsd.getInode(id));
p = iip.getPath();
// Sanity check to make sure the path is correct
if (!p.startsWith("/")) {
throw new IOException("Invalid path in the lease " + p);
}
boolean completed = fsnamesystem.internalReleaseLease(
leaseToCheck, p, iip,
HdfsServerConstants.NAMENODE_LEASE_HOLDER);
if (LOG.isDebugEnabled()) {
if (completed) {
LOG.debug("Lease recovery for inode " + id + " is complete. " +
"File closed.");
} else {
LOG.debug("Started block recovery " + p + " lease " + leaseToCheck);
}
}
// If a lease recovery happened, we need to sync later.
if (!needSync && !completed) {
needSync = true;
}
} catch (IOException e) {
LOG.error("Cannot release the path " + p + " in the lease "
+ leaseToCheck, e);
removing.add(id);
}
}
for(Long id : removing) {
removeLease(leaseToCheck, id);
}
}
return needSync;
}
@Override
public synchronized String toString() {
return getClass().getSimpleName() + "= {"
+ "\n leases=" + leases
+ "\n sortedLeases=" + sortedLeases
+ "\n leasesById=" + leasesById
+ "\n}";
}
void startMonitor() {
Preconditions.checkState(lmthread == null,
"Lease Monitor already running");
shouldRunMonitor = true;
lmthread = new Daemon(new Monitor());
lmthread.start();
}
void stopMonitor() {
if (lmthread != null) {
shouldRunMonitor = false;
try {
lmthread.interrupt();
lmthread.join(3000);
} catch (InterruptedException ie) {
LOG.warn("Encountered exception ", ie);
}
lmthread = null;
}
}
/**
* Trigger the currently-running Lease monitor to re-check
* its leases immediately. This is for use by unit tests.
*/
@VisibleForTesting
public void triggerMonitorCheckNow() {
Preconditions.checkState(lmthread != null,
"Lease monitor is not running");
lmthread.interrupt();
}
@VisibleForTesting
public void runLeaseChecks() {
checkLeases();
}
}
| 13,964 | 29.692308 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY;
import static org.apache.hadoop.util.Time.monotonicNow;
import java.io.File;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
import org.apache.hadoop.hdfs.server.protocol.CheckpointCommand;
import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
import org.apache.hadoop.io.MD5Hash;
import org.apache.hadoop.util.Daemon;
import com.google.common.collect.Lists;
/**
* The Checkpointer is responsible for supporting periodic checkpoints
* of the HDFS metadata.
*
* The Checkpointer is a daemon that periodically wakes up
* up (determined by the schedule specified in the configuration),
* triggers a periodic checkpoint and then goes back to sleep.
*
* The start of a checkpoint is triggered by one of the two factors:
* (1) time or (2) the size of the edits file.
*/
class Checkpointer extends Daemon {
public static final Log LOG =
LogFactory.getLog(Checkpointer.class.getName());
private final BackupNode backupNode;
volatile boolean shouldRun;
private String infoBindAddress;
private CheckpointConf checkpointConf;
private final Configuration conf;
private BackupImage getFSImage() {
return (BackupImage)backupNode.getFSImage();
}
private NamenodeProtocol getRemoteNamenodeProxy(){
return backupNode.namenode;
}
/**
* Create a connection to the primary namenode.
*/
Checkpointer(Configuration conf, BackupNode bnNode) throws IOException {
this.conf = conf;
this.backupNode = bnNode;
try {
initialize(conf);
} catch(IOException e) {
LOG.warn("Checkpointer got exception", e);
shutdown();
throw e;
}
}
/**
* Initialize checkpoint.
*/
private void initialize(Configuration conf) throws IOException {
// Create connection to the namenode.
shouldRun = true;
// Initialize other scheduling parameters from the configuration
checkpointConf = new CheckpointConf(conf);
// Pull out exact http address for posting url to avoid ip aliasing issues
String fullInfoAddr = conf.get(DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY,
DFS_NAMENODE_BACKUP_HTTP_ADDRESS_DEFAULT);
infoBindAddress = fullInfoAddr.substring(0, fullInfoAddr.indexOf(":"));
LOG.info("Checkpoint Period : " +
checkpointConf.getPeriod() + " secs " +
"(" + checkpointConf.getPeriod()/60 + " min)");
LOG.info("Transactions count is : " +
checkpointConf.getTxnCount() +
", to trigger checkpoint");
}
/**
* Shut down the checkpointer.
*/
void shutdown() {
shouldRun = false;
backupNode.stop();
}
//
// The main work loop
//
@Override
public void run() {
// Check the size of the edit log once every 5 minutes.
long periodMSec = 5 * 60; // 5 minutes
if(checkpointConf.getPeriod() < periodMSec) {
periodMSec = checkpointConf.getPeriod();
}
periodMSec *= 1000;
long lastCheckpointTime = 0;
if (!backupNode.shouldCheckpointAtStartup()) {
lastCheckpointTime = monotonicNow();
}
while(shouldRun) {
try {
long now = monotonicNow();
boolean shouldCheckpoint = false;
if(now >= lastCheckpointTime + periodMSec) {
shouldCheckpoint = true;
} else {
long txns = countUncheckpointedTxns();
if(txns >= checkpointConf.getTxnCount())
shouldCheckpoint = true;
}
if(shouldCheckpoint) {
doCheckpoint();
lastCheckpointTime = now;
}
} catch(IOException e) {
LOG.error("Exception in doCheckpoint: ", e);
} catch(Throwable e) {
LOG.error("Throwable Exception in doCheckpoint: ", e);
shutdown();
break;
}
try {
Thread.sleep(periodMSec);
} catch(InterruptedException ie) {
// do nothing
}
}
}
private long countUncheckpointedTxns() throws IOException {
long curTxId = getRemoteNamenodeProxy().getTransactionID();
long uncheckpointedTxns = curTxId -
getFSImage().getStorage().getMostRecentCheckpointTxId();
assert uncheckpointedTxns >= 0;
return uncheckpointedTxns;
}
/**
* Create a new checkpoint
*/
void doCheckpoint() throws IOException {
BackupImage bnImage = getFSImage();
NNStorage bnStorage = bnImage.getStorage();
long startTime = monotonicNow();
bnImage.freezeNamespaceAtNextRoll();
NamenodeCommand cmd =
getRemoteNamenodeProxy().startCheckpoint(backupNode.getRegistration());
CheckpointCommand cpCmd = null;
switch(cmd.getAction()) {
case NamenodeProtocol.ACT_SHUTDOWN:
shutdown();
throw new IOException("Name-node " + backupNode.nnRpcAddress
+ " requested shutdown.");
case NamenodeProtocol.ACT_CHECKPOINT:
cpCmd = (CheckpointCommand)cmd;
break;
default:
throw new IOException("Unsupported NamenodeCommand: "+cmd.getAction());
}
bnImage.waitUntilNamespaceFrozen();
CheckpointSignature sig = cpCmd.getSignature();
// Make sure we're talking to the same NN!
sig.validateStorageInfo(bnImage);
long lastApplied = bnImage.getLastAppliedTxId();
LOG.debug("Doing checkpoint. Last applied: " + lastApplied);
RemoteEditLogManifest manifest =
getRemoteNamenodeProxy().getEditLogManifest(bnImage.getLastAppliedTxId() + 1);
boolean needReloadImage = false;
if (!manifest.getLogs().isEmpty()) {
RemoteEditLog firstRemoteLog = manifest.getLogs().get(0);
// we don't have enough logs to roll forward using only logs. Need
// to download and load the image.
if (firstRemoteLog.getStartTxId() > lastApplied + 1) {
LOG.info("Unable to roll forward using only logs. Downloading " +
"image with txid " + sig.mostRecentCheckpointTxId);
MD5Hash downloadedHash = TransferFsImage.downloadImageToStorage(
backupNode.nnHttpAddress, sig.mostRecentCheckpointTxId, bnStorage,
true);
bnImage.saveDigestAndRenameCheckpointImage(NameNodeFile.IMAGE,
sig.mostRecentCheckpointTxId, downloadedHash);
lastApplied = sig.mostRecentCheckpointTxId;
needReloadImage = true;
}
if (firstRemoteLog.getStartTxId() > lastApplied + 1) {
throw new IOException("No logs to roll forward from " + lastApplied);
}
// get edits files
for (RemoteEditLog log : manifest.getLogs()) {
TransferFsImage.downloadEditsToStorage(
backupNode.nnHttpAddress, log, bnStorage);
}
if(needReloadImage) {
LOG.info("Loading image with txid " + sig.mostRecentCheckpointTxId);
File file = bnStorage.findImageFile(NameNodeFile.IMAGE,
sig.mostRecentCheckpointTxId);
bnImage.reloadFromImageFile(file, backupNode.getNamesystem());
}
rollForwardByApplyingLogs(manifest, bnImage, backupNode.getNamesystem());
}
long txid = bnImage.getLastAppliedTxId();
backupNode.namesystem.writeLock();
try {
backupNode.namesystem.setImageLoaded();
if(backupNode.namesystem.getBlocksTotal() > 0) {
long completeBlocksTotal =
backupNode.namesystem.getCompleteBlocksTotal();
backupNode.namesystem.setBlockTotal(completeBlocksTotal);
}
bnImage.saveFSImageInAllDirs(backupNode.getNamesystem(), txid);
if (!backupNode.namesystem.isRollingUpgrade()) {
bnStorage.writeAll();
}
} finally {
backupNode.namesystem.writeUnlock();
}
if(cpCmd.needToReturnImage()) {
TransferFsImage.uploadImageFromStorage(backupNode.nnHttpAddress, conf,
bnStorage, NameNodeFile.IMAGE, txid);
}
getRemoteNamenodeProxy().endCheckpoint(backupNode.getRegistration(), sig);
if (backupNode.getRole() == NamenodeRole.BACKUP) {
bnImage.convergeJournalSpool();
}
backupNode.setRegistration(); // keep registration up to date
long imageSize = bnImage.getStorage().getFsImageName(txid).length();
LOG.info("Checkpoint completed in "
+ (monotonicNow() - startTime)/1000 + " seconds."
+ " New Image Size: " + imageSize);
}
private URL getImageListenAddress() {
InetSocketAddress httpSocAddr = backupNode.getHttpAddress();
int httpPort = httpSocAddr.getPort();
try {
return new URL(DFSUtil.getHttpClientScheme(conf) + "://" + infoBindAddress + ":" + httpPort);
} catch (MalformedURLException e) {
// Unreachable
throw new RuntimeException(e);
}
}
static void rollForwardByApplyingLogs(
RemoteEditLogManifest manifest,
FSImage dstImage,
FSNamesystem dstNamesystem) throws IOException {
NNStorage dstStorage = dstImage.getStorage();
List<EditLogInputStream> editsStreams = Lists.newArrayList();
for (RemoteEditLog log : manifest.getLogs()) {
if (log.getEndTxId() > dstImage.getLastAppliedTxId()) {
File f = dstStorage.findFinalizedEditsFile(
log.getStartTxId(), log.getEndTxId());
editsStreams.add(new EditLogFileInputStream(f, log.getStartTxId(),
log.getEndTxId(), true));
}
}
LOG.info("Checkpointer about to load edits from " +
editsStreams.size() + " stream(s).");
dstImage.loadEdits(editsStreams, dstNamesystem);
}
}
| 11,070 | 33.814465 | 99 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import com.google.common.base.Preconditions;
import org.apache.commons.io.Charsets;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.DirectoryListingStartAfterNotFoundException;
import org.apache.hadoop.fs.FileEncryptionInfo;
import org.apache.hadoop.fs.InvalidPathException;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.FsPermissionExtension;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.SnapshotException;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectorySnapshottableFeature;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.util.ReadOnlyList;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.Arrays;
import static org.apache.hadoop.util.Time.now;
class FSDirStatAndListingOp {
static DirectoryListing getListingInt(FSDirectory fsd, final String srcArg,
byte[] startAfter, boolean needLocation) throws IOException {
FSPermissionChecker pc = fsd.getPermissionChecker();
byte[][] pathComponents = FSDirectory
.getPathComponentsForReservedPath(srcArg);
final String startAfterString = new String(startAfter, Charsets.UTF_8);
final String src = fsd.resolvePath(pc, srcArg, pathComponents);
final INodesInPath iip = fsd.getINodesInPath(src, true);
// Get file name when startAfter is an INodePath
if (FSDirectory.isReservedName(startAfterString)) {
byte[][] startAfterComponents = FSDirectory
.getPathComponentsForReservedPath(startAfterString);
try {
String tmp = FSDirectory.resolvePath(src, startAfterComponents, fsd);
byte[][] regularPath = INode.getPathComponents(tmp);
startAfter = regularPath[regularPath.length - 1];
} catch (IOException e) {
// Possibly the inode is deleted
throw new DirectoryListingStartAfterNotFoundException(
"Can't find startAfter " + startAfterString);
}
}
boolean isSuperUser = true;
if (fsd.isPermissionEnabled()) {
if (iip.getLastINode() != null && iip.getLastINode().isDirectory()) {
fsd.checkPathAccess(pc, iip, FsAction.READ_EXECUTE);
} else {
fsd.checkTraverse(pc, iip);
}
isSuperUser = pc.isSuperUser();
}
return getListing(fsd, iip, src, startAfter, needLocation, isSuperUser);
}
/**
* Get the file info for a specific file.
*
* @param srcArg The string representation of the path to the file
* @param resolveLink whether to throw UnresolvedLinkException
* if src refers to a symlink
*
* @return object containing information regarding the file
* or null if file not found
*/
static HdfsFileStatus getFileInfo(
FSDirectory fsd, String srcArg, boolean resolveLink)
throws IOException {
String src = srcArg;
if (!DFSUtil.isValidName(src)) {
throw new InvalidPathException("Invalid file name: " + src);
}
FSPermissionChecker pc = fsd.getPermissionChecker();
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
src = fsd.resolvePath(pc, src, pathComponents);
final INodesInPath iip = fsd.getINodesInPath(src, resolveLink);
boolean isSuperUser = true;
if (fsd.isPermissionEnabled()) {
fsd.checkPermission(pc, iip, false, null, null, null, null, false);
isSuperUser = pc.isSuperUser();
}
return getFileInfo(fsd, src, resolveLink,
FSDirectory.isReservedRawName(srcArg), isSuperUser);
}
/**
* Returns true if the file is closed
*/
static boolean isFileClosed(FSDirectory fsd, String src) throws IOException {
FSPermissionChecker pc = fsd.getPermissionChecker();
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
src = fsd.resolvePath(pc, src, pathComponents);
final INodesInPath iip = fsd.getINodesInPath(src, true);
if (fsd.isPermissionEnabled()) {
fsd.checkTraverse(pc, iip);
}
return !INodeFile.valueOf(iip.getLastINode(), src).isUnderConstruction();
}
static ContentSummary getContentSummary(
FSDirectory fsd, String src) throws IOException {
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
FSPermissionChecker pc = fsd.getPermissionChecker();
src = fsd.resolvePath(pc, src, pathComponents);
final INodesInPath iip = fsd.getINodesInPath(src, false);
if (fsd.isPermissionEnabled()) {
fsd.checkPermission(pc, iip, false, null, null, null,
FsAction.READ_EXECUTE);
}
return getContentSummaryInt(fsd, iip);
}
/**
* Get block locations within the specified range.
* @see ClientProtocol#getBlockLocations(String, long, long)
* @throws IOException
*/
static GetBlockLocationsResult getBlockLocations(
FSDirectory fsd, FSPermissionChecker pc, String src, long offset,
long length, boolean needBlockToken) throws IOException {
Preconditions.checkArgument(offset >= 0,
"Negative offset is not supported. File: " + src);
Preconditions.checkArgument(length >= 0,
"Negative length is not supported. File: " + src);
CacheManager cm = fsd.getFSNamesystem().getCacheManager();
BlockManager bm = fsd.getBlockManager();
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
boolean isReservedName = FSDirectory.isReservedRawName(src);
fsd.readLock();
try {
src = fsd.resolvePath(pc, src, pathComponents);
final INodesInPath iip = fsd.getINodesInPath(src, true);
final INodeFile inode = INodeFile.valueOf(iip.getLastINode(), src);
if (fsd.isPermissionEnabled()) {
fsd.checkPathAccess(pc, iip, FsAction.READ);
fsd.checkUnreadableBySuperuser(pc, inode, iip.getPathSnapshotId());
}
final long fileSize = iip.isSnapshot()
? inode.computeFileSize(iip.getPathSnapshotId())
: inode.computeFileSizeNotIncludingLastUcBlock();
boolean isUc = inode.isUnderConstruction();
if (iip.isSnapshot()) {
// if src indicates a snapshot file, we need to make sure the returned
// blocks do not exceed the size of the snapshot file.
length = Math.min(length, fileSize - offset);
isUc = false;
}
final FileEncryptionInfo feInfo = isReservedName ? null
: fsd.getFileEncryptionInfo(inode, iip.getPathSnapshotId(), iip);
final LocatedBlocks blocks = bm.createLocatedBlocks(
inode.getBlocks(iip.getPathSnapshotId()), fileSize, isUc, offset,
length, needBlockToken, iip.isSnapshot(), feInfo);
// Set caching information for the located blocks.
for (LocatedBlock lb : blocks.getLocatedBlocks()) {
cm.setCachedLocations(lb);
}
final long now = now();
boolean updateAccessTime = fsd.isAccessTimeSupported()
&& !iip.isSnapshot()
&& now > inode.getAccessTime() + fsd.getAccessTimePrecision();
return new GetBlockLocationsResult(updateAccessTime, blocks);
} finally {
fsd.readUnlock();
}
}
private static byte getStoragePolicyID(byte inodePolicy, byte parentPolicy) {
return inodePolicy != HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED
? inodePolicy : parentPolicy;
}
/**
* Get a partial listing of the indicated directory
*
* We will stop when any of the following conditions is met:
* 1) this.lsLimit files have been added
* 2) needLocation is true AND enough files have been added such
* that at least this.lsLimit block locations are in the response
*
* @param fsd FSDirectory
* @param iip the INodesInPath instance containing all the INodes along the
* path
* @param src the directory name
* @param startAfter the name to start listing after
* @param needLocation if block locations are returned
* @return a partial listing starting after startAfter
*/
private static DirectoryListing getListing(FSDirectory fsd, INodesInPath iip,
String src, byte[] startAfter, boolean needLocation, boolean isSuperUser)
throws IOException {
String srcs = FSDirectory.normalizePath(src);
final boolean isRawPath = FSDirectory.isReservedRawName(src);
fsd.readLock();
try {
if (srcs.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR)) {
return getSnapshotsListing(fsd, srcs, startAfter);
}
final int snapshot = iip.getPathSnapshotId();
final INode targetNode = iip.getLastINode();
if (targetNode == null)
return null;
byte parentStoragePolicy = isSuperUser ?
targetNode.getStoragePolicyID() : HdfsConstants
.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
if (!targetNode.isDirectory()) {
INodeAttributes nodeAttrs = getINodeAttributes(
fsd, src, HdfsFileStatus.EMPTY_NAME, targetNode,
snapshot);
return new DirectoryListing(
new HdfsFileStatus[]{ createFileStatus(
fsd, HdfsFileStatus.EMPTY_NAME, targetNode, nodeAttrs,
needLocation, parentStoragePolicy, snapshot, isRawPath, iip)
}, 0);
}
final INodeDirectory dirInode = targetNode.asDirectory();
final ReadOnlyList<INode> contents = dirInode.getChildrenList(snapshot);
int startChild = INodeDirectory.nextChild(contents, startAfter);
int totalNumChildren = contents.size();
int numOfListing = Math.min(totalNumChildren - startChild,
fsd.getLsLimit());
int locationBudget = fsd.getLsLimit();
int listingCnt = 0;
HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing];
for (int i=0; i<numOfListing && locationBudget>0; i++) {
INode cur = contents.get(startChild+i);
byte curPolicy = isSuperUser && !cur.isSymlink()?
cur.getLocalStoragePolicyID():
HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
INodeAttributes nodeAttrs = getINodeAttributes(
fsd, src, cur.getLocalNameBytes(), cur,
snapshot);
listing[i] = createFileStatus(fsd, cur.getLocalNameBytes(),
cur, nodeAttrs, needLocation, getStoragePolicyID(curPolicy,
parentStoragePolicy), snapshot, isRawPath, iip);
listingCnt++;
if (needLocation) {
// Once we hit lsLimit locations, stop.
// This helps to prevent excessively large response payloads.
// Approximate #locations with locatedBlockCount() * repl_factor
LocatedBlocks blks =
((HdfsLocatedFileStatus)listing[i]).getBlockLocations();
locationBudget -= (blks == null) ? 0 :
blks.locatedBlockCount() * listing[i].getReplication();
}
}
// truncate return array if necessary
if (listingCnt < numOfListing) {
listing = Arrays.copyOf(listing, listingCnt);
}
return new DirectoryListing(
listing, totalNumChildren-startChild-listingCnt);
} finally {
fsd.readUnlock();
}
}
/**
* Get a listing of all the snapshots of a snapshottable directory
*/
private static DirectoryListing getSnapshotsListing(
FSDirectory fsd, String src, byte[] startAfter)
throws IOException {
Preconditions.checkState(fsd.hasReadLock());
Preconditions.checkArgument(
src.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR),
"%s does not end with %s", src, HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR);
final String dirPath = FSDirectory.normalizePath(src.substring(0,
src.length() - HdfsConstants.DOT_SNAPSHOT_DIR.length()));
final INode node = fsd.getINode(dirPath);
final INodeDirectory dirNode = INodeDirectory.valueOf(node, dirPath);
final DirectorySnapshottableFeature sf = dirNode.getDirectorySnapshottableFeature();
if (sf == null) {
throw new SnapshotException(
"Directory is not a snapshottable directory: " + dirPath);
}
final ReadOnlyList<Snapshot> snapshots = sf.getSnapshotList();
int skipSize = ReadOnlyList.Util.binarySearch(snapshots, startAfter);
skipSize = skipSize < 0 ? -skipSize - 1 : skipSize + 1;
int numOfListing = Math.min(snapshots.size() - skipSize, fsd.getLsLimit());
final HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing];
for (int i = 0; i < numOfListing; i++) {
Snapshot.Root sRoot = snapshots.get(i + skipSize).getRoot();
INodeAttributes nodeAttrs = getINodeAttributes(
fsd, src, sRoot.getLocalNameBytes(),
node, Snapshot.CURRENT_STATE_ID);
listing[i] = createFileStatus(
fsd, sRoot.getLocalNameBytes(),
sRoot, nodeAttrs,
HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED,
Snapshot.CURRENT_STATE_ID, false,
INodesInPath.fromINode(sRoot));
}
return new DirectoryListing(
listing, snapshots.size() - skipSize - numOfListing);
}
/** Get the file info for a specific file.
* @param fsd FSDirectory
* @param src The string representation of the path to the file
* @param isRawPath true if a /.reserved/raw pathname was passed by the user
* @param includeStoragePolicy whether to include storage policy
* @return object containing information regarding the file
* or null if file not found
*/
static HdfsFileStatus getFileInfo(
FSDirectory fsd, String path, INodesInPath src, boolean isRawPath,
boolean includeStoragePolicy)
throws IOException {
fsd.readLock();
try {
final INode i = src.getLastINode();
if (i == null) {
return null;
}
byte policyId = includeStoragePolicy && !i.isSymlink() ?
i.getStoragePolicyID() :
HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
INodeAttributes nodeAttrs = getINodeAttributes(fsd, path,
HdfsFileStatus.EMPTY_NAME,
i, src.getPathSnapshotId());
return createFileStatus(fsd, HdfsFileStatus.EMPTY_NAME, i, nodeAttrs,
policyId, src.getPathSnapshotId(), isRawPath, src);
} finally {
fsd.readUnlock();
}
}
static HdfsFileStatus getFileInfo(
FSDirectory fsd, String src, boolean resolveLink, boolean isRawPath,
boolean includeStoragePolicy)
throws IOException {
String srcs = FSDirectory.normalizePath(src);
if (srcs.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR)) {
if (fsd.getINode4DotSnapshot(srcs) != null) {
return new HdfsFileStatus(0, true, 0, 0, 0, 0, null, null, null, null,
HdfsFileStatus.EMPTY_NAME, -1L, 0, null,
HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED);
}
return null;
}
fsd.readLock();
try {
final INodesInPath iip = fsd.getINodesInPath(srcs, resolveLink);
return getFileInfo(fsd, src, iip, isRawPath, includeStoragePolicy);
} finally {
fsd.readUnlock();
}
}
/**
* create an hdfs file status from an inode
*
* @param fsd FSDirectory
* @param path the local name
* @param node inode
* @param needLocation if block locations need to be included or not
* @param isRawPath true if this is being called on behalf of a path in
* /.reserved/raw
* @return a file status
* @throws java.io.IOException if any error occurs
*/
private static HdfsFileStatus createFileStatus(
FSDirectory fsd, byte[] path, INode node, INodeAttributes nodeAttrs,
boolean needLocation, byte storagePolicy, int snapshot, boolean isRawPath,
INodesInPath iip)
throws IOException {
if (needLocation) {
return createLocatedFileStatus(fsd, path, node, nodeAttrs, storagePolicy,
snapshot, isRawPath, iip);
} else {
return createFileStatus(fsd, path, node, nodeAttrs, storagePolicy,
snapshot, isRawPath, iip);
}
}
/**
* Create FileStatus by file INode
*/
static HdfsFileStatus createFileStatusForEditLog(
FSDirectory fsd, String fullPath, byte[] path, INode node,
byte storagePolicy, int snapshot, boolean isRawPath,
INodesInPath iip) throws IOException {
INodeAttributes nodeAttrs = getINodeAttributes(
fsd, fullPath, path, node, snapshot);
return createFileStatus(fsd, path, node, nodeAttrs,
storagePolicy, snapshot, isRawPath, iip);
}
/**
* Create FileStatus by file INode
*/
static HdfsFileStatus createFileStatus(
FSDirectory fsd, byte[] path, INode node,
INodeAttributes nodeAttrs, byte storagePolicy, int snapshot,
boolean isRawPath, INodesInPath iip) throws IOException {
long size = 0; // length is zero for directories
short replication = 0;
long blocksize = 0;
final boolean isEncrypted;
final FileEncryptionInfo feInfo = isRawPath ? null :
fsd.getFileEncryptionInfo(node, snapshot, iip);
if (node.isFile()) {
final INodeFile fileNode = node.asFile();
size = fileNode.computeFileSize(snapshot);
replication = fileNode.getFileReplication(snapshot);
blocksize = fileNode.getPreferredBlockSize();
isEncrypted = (feInfo != null) ||
(isRawPath && fsd.isInAnEZ(INodesInPath.fromINode(node)));
} else {
isEncrypted = fsd.isInAnEZ(INodesInPath.fromINode(node));
}
int childrenNum = node.isDirectory() ?
node.asDirectory().getChildrenNum(snapshot) : 0;
return new HdfsFileStatus(
size,
node.isDirectory(),
replication,
blocksize,
node.getModificationTime(snapshot),
node.getAccessTime(snapshot),
getPermissionForFileStatus(nodeAttrs, isEncrypted),
nodeAttrs.getUserName(),
nodeAttrs.getGroupName(),
node.isSymlink() ? node.asSymlink().getSymlink() : null,
path,
node.getId(),
childrenNum,
feInfo,
storagePolicy);
}
private static INodeAttributes getINodeAttributes(
FSDirectory fsd, String fullPath, byte[] path, INode node, int snapshot) {
return fsd.getAttributes(fullPath, path, node, snapshot);
}
/**
* Create FileStatus with location info by file INode
*/
private static HdfsLocatedFileStatus createLocatedFileStatus(
FSDirectory fsd, byte[] path, INode node, INodeAttributes nodeAttrs,
byte storagePolicy, int snapshot,
boolean isRawPath, INodesInPath iip) throws IOException {
assert fsd.hasReadLock();
long size = 0; // length is zero for directories
short replication = 0;
long blocksize = 0;
LocatedBlocks loc = null;
final boolean isEncrypted;
final FileEncryptionInfo feInfo = isRawPath ? null :
fsd.getFileEncryptionInfo(node, snapshot, iip);
if (node.isFile()) {
final INodeFile fileNode = node.asFile();
size = fileNode.computeFileSize(snapshot);
replication = fileNode.getFileReplication(snapshot);
blocksize = fileNode.getPreferredBlockSize();
final boolean inSnapshot = snapshot != Snapshot.CURRENT_STATE_ID;
final boolean isUc = !inSnapshot && fileNode.isUnderConstruction();
final long fileSize = !inSnapshot && isUc ?
fileNode.computeFileSizeNotIncludingLastUcBlock() : size;
loc = fsd.getBlockManager().createLocatedBlocks(
fileNode.getBlocks(snapshot), fileSize, isUc, 0L, size, false,
inSnapshot, feInfo);
if (loc == null) {
loc = new LocatedBlocks();
}
isEncrypted = (feInfo != null) ||
(isRawPath && fsd.isInAnEZ(INodesInPath.fromINode(node)));
} else {
isEncrypted = fsd.isInAnEZ(INodesInPath.fromINode(node));
}
int childrenNum = node.isDirectory() ?
node.asDirectory().getChildrenNum(snapshot) : 0;
HdfsLocatedFileStatus status =
new HdfsLocatedFileStatus(size, node.isDirectory(), replication,
blocksize, node.getModificationTime(snapshot),
node.getAccessTime(snapshot),
getPermissionForFileStatus(nodeAttrs, isEncrypted),
nodeAttrs.getUserName(), nodeAttrs.getGroupName(),
node.isSymlink() ? node.asSymlink().getSymlink() : null, path,
node.getId(), loc, childrenNum, feInfo, storagePolicy);
// Set caching information for the located blocks.
if (loc != null) {
CacheManager cacheManager = fsd.getFSNamesystem().getCacheManager();
for (LocatedBlock lb: loc.getLocatedBlocks()) {
cacheManager.setCachedLocations(lb);
}
}
return status;
}
/**
* Returns an inode's FsPermission for use in an outbound FileStatus. If the
* inode has an ACL or is for an encrypted file/dir, then this method will
* return an FsPermissionExtension.
*
* @param node INode to check
* @param isEncrypted boolean true if the file/dir is encrypted
* @return FsPermission from inode, with ACL bit on if the inode has an ACL
* and encrypted bit on if it represents an encrypted file/dir.
*/
private static FsPermission getPermissionForFileStatus(
INodeAttributes node, boolean isEncrypted) {
FsPermission perm = node.getFsPermission();
boolean hasAcl = node.getAclFeature() != null;
if (hasAcl || isEncrypted) {
perm = new FsPermissionExtension(perm, hasAcl, isEncrypted);
}
return perm;
}
private static ContentSummary getContentSummaryInt(FSDirectory fsd,
INodesInPath iip) throws IOException {
fsd.readLock();
try {
INode targetNode = iip.getLastINode();
if (targetNode == null) {
throw new FileNotFoundException("File does not exist: " + iip.getPath());
}
else {
// Make it relinquish locks everytime contentCountLimit entries are
// processed. 0 means disabled. I.e. blocking for the entire duration.
ContentSummaryComputationContext cscc =
new ContentSummaryComputationContext(fsd, fsd.getFSNamesystem(),
fsd.getContentCountLimit(), fsd.getContentSleepMicroSec());
ContentSummary cs = targetNode.computeAndConvertContentSummary(cscc);
fsd.addYieldCount(cscc.getYieldCount());
return cs;
}
} finally {
fsd.readUnlock();
}
}
static class GetBlockLocationsResult {
final boolean updateAccessTime;
final LocatedBlocks blocks;
boolean updateAccessTime() {
return updateAccessTime;
}
private GetBlockLocationsResult(
boolean updateAccessTime, LocatedBlocks blocks) {
this.updateAccessTime = updateAccessTime;
this.blocks = blocks;
}
}
}
| 24,095 | 39.227045 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.util.HashMap;
import java.util.Map;
import java.util.SortedSet;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.protocol.LayoutVersion.FeatureInfo;
import org.apache.hadoop.hdfs.protocol.LayoutVersion.LayoutFeature;
@InterfaceAudience.Private
public class NameNodeLayoutVersion {
/** Build layout version and corresponding feature matrix */
public final static Map<Integer, SortedSet<LayoutFeature>> FEATURES
= new HashMap<Integer, SortedSet<LayoutFeature>>();
public static final int CURRENT_LAYOUT_VERSION
= LayoutVersion.getCurrentLayoutVersion(Feature.values());
public static final int MINIMUM_COMPATIBLE_LAYOUT_VERSION
= LayoutVersion.getMinimumCompatibleLayoutVersion(Feature.values());
static {
LayoutVersion.updateMap(FEATURES, LayoutVersion.Feature.values());
LayoutVersion.updateMap(FEATURES, NameNodeLayoutVersion.Feature.values());
}
public static SortedSet<LayoutFeature> getFeatures(int lv) {
return FEATURES.get(lv);
}
public static boolean supports(final LayoutFeature f, final int lv) {
return LayoutVersion.supports(FEATURES, f, lv);
}
/**
* Enums for features that change the layout version.
* <br><br>
* To add a new layout version:
* <ul>
* <li>Define a new enum constant with a short enum name, the new layout version
* and description of the added feature.</li>
* <li>When adding a layout version with an ancestor that is not same as
* its immediate predecessor, use the constructor where a specific ancestor
* can be passed.
* </li>
* <li>Specify a minimum compatible layout version. The minimum compatible
* layout version is the earliest prior version to which a downgrade is
* possible after initiating rolling upgrade. If the feature cannot satisfy
* compatibility with any prior version, then set its minimum compatible
* lqyout version to itself to indicate that downgrade is impossible.
* Satisfying compatibility might require adding logic to the new feature to
* reject operations or handle them differently while rolling upgrade is in
* progress. In general, it's possible to satisfy compatiblity for downgrade
* if the new feature just involves adding new edit log ops. Deeper
* structural changes, such as changing the way we place files in the metadata
* directories, might be incompatible. Feature implementations should strive
* for compatibility, because it's in the best interest of our users to
* support downgrade.
* </ul>
*/
public static enum Feature implements LayoutFeature {
ROLLING_UPGRADE(-55, -53, -55, "Support rolling upgrade", false),
EDITLOG_LENGTH(-56, -56, "Add length field to every edit log op"),
XATTRS(-57, -57, "Extended attributes"),
CREATE_OVERWRITE(-58, -58, "Use single editlog record for " +
"creating file with overwrite"),
XATTRS_NAMESPACE_EXT(-59, -59, "Increase number of xattr namespaces"),
BLOCK_STORAGE_POLICY(-60, -60, "Block Storage policy"),
TRUNCATE(-61, -61, "Truncate"),
APPEND_NEW_BLOCK(-62, -61, "Support appending to new block"),
QUOTA_BY_STORAGE_TYPE(-63, -61, "Support quota for specific storage types");
private final FeatureInfo info;
/**
* Feature that is added at layout version {@code lv} - 1.
* @param lv new layout version with the addition of this feature
* @param minCompatLV minimium compatible layout version
* @param description description of the feature
*/
Feature(final int lv, int minCompatLV, final String description) {
this(lv, lv + 1, minCompatLV, description, false);
}
/**
* NameNode feature that is added at layout version {@code ancestoryLV}.
* @param lv new layout version with the addition of this feature
* @param ancestorLV layout version from which the new lv is derived from.
* @param minCompatLV minimum compatible layout version
* @param description description of the feature
* @param reserved true when this is a layout version reserved for previous
* versions
* @param features set of features that are to be enabled for this version
*/
Feature(final int lv, final int ancestorLV, int minCompatLV,
final String description, boolean reserved, Feature... features) {
info = new FeatureInfo(lv, ancestorLV, minCompatLV, description, reserved,
features);
}
@Override
public FeatureInfo getInfo() {
return info;
}
}
}
| 5,453 | 42.285714 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CancelDelegationTokenServlet.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException;
import java.security.PrivilegedExceptionAction;
import javax.servlet.ServletContext;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
/**
* Cancel delegation tokens over http for use in hftp.
*/
@SuppressWarnings("serial")
public class CancelDelegationTokenServlet extends DfsServlet {
private static final Log LOG = LogFactory.getLog(CancelDelegationTokenServlet.class);
public static final String PATH_SPEC = "/cancelDelegationToken";
public static final String TOKEN = "token";
@Override
protected void doGet(final HttpServletRequest req, final HttpServletResponse resp)
throws ServletException, IOException {
final UserGroupInformation ugi;
final ServletContext context = getServletContext();
final Configuration conf = NameNodeHttpServer.getConfFromContext(context);
try {
ugi = getUGI(req, conf);
} catch(IOException ioe) {
LOG.info("Request for token received with no authentication from "
+ req.getRemoteAddr(), ioe);
resp.sendError(HttpServletResponse.SC_FORBIDDEN,
"Unable to identify or authenticate user");
return;
}
final NameNode nn = NameNodeHttpServer.getNameNodeFromContext(
context);
String tokenString = req.getParameter(TOKEN);
if (tokenString == null) {
resp.sendError(HttpServletResponse.SC_MULTIPLE_CHOICES,
"Token to renew not specified");
}
final Token<DelegationTokenIdentifier> token =
new Token<DelegationTokenIdentifier>();
token.decodeFromUrlString(tokenString);
try {
ugi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
nn.getRpcServer().cancelDelegationToken(token);
return null;
}
});
} catch(Exception e) {
LOG.info("Exception while cancelling token. Re-throwing. ", e);
resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR,
e.getMessage());
}
}
}
| 3,275 | 38 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HANDLER_COUNT_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HANDLER_COUNT_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_HANDLER_COUNT_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_HANDLER_COUNT_KEY;
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.MAX_PATH_DEPTH;
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.MAX_PATH_LENGTH;
import static org.apache.hadoop.util.Time.now;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.Arrays;
import java.util.Collection;
import java.util.EnumSet;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import com.google.common.collect.Lists;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.CryptoProtocolVersion;
import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries;
import org.apache.hadoop.fs.CacheFlag;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.InvalidPathException;
import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.ParentNotDirectoryException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.ha.HAServiceStatus;
import org.apache.hadoop.ha.HealthCheckFailedException;
import org.apache.hadoop.ha.ServiceFailedException;
import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAServiceProtocolService;
import org.apache.hadoop.ha.protocolPB.HAServiceProtocolPB;
import org.apache.hadoop.ha.protocolPB.HAServiceProtocolServerSideTranslatorPB;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HDFSPolicyProvider;
import org.apache.hadoop.hdfs.inotify.EventBatch;
import org.apache.hadoop.hdfs.inotify.EventBatchList;
import org.apache.hadoop.hdfs.protocol.AclException;
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.FSLimitException;
import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.QuotaByStorageTypeExceededException;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeProtocolService;
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.NamenodeProtocolService;
import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB;
import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB;
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolPB;
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolServerSideTranslatorPB;
import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolPB;
import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolServerSideTranslatorPB;
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerFaultInjector;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand;
import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.hdfs.server.protocol.NodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary;
import org.apache.hadoop.io.EnumSetWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RetriableException;
import org.apache.hadoop.ipc.RetryCache;
import org.apache.hadoop.ipc.RetryCache.CacheEntry;
import org.apache.hadoop.ipc.RetryCache.CacheEntryWithPayload;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.ipc.WritableRpcEngine;
import org.apache.hadoop.ipc.RefreshRegistry;
import org.apache.hadoop.ipc.RefreshResponse;
import org.apache.hadoop.net.Node;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.Groups;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AuthorizationException;
import org.apache.hadoop.security.authorize.ProxyUsers;
import org.apache.hadoop.security.proto.RefreshAuthorizationPolicyProtocolProtos.RefreshAuthorizationPolicyProtocolService;
import org.apache.hadoop.security.proto.RefreshUserMappingsProtocolProtos.RefreshUserMappingsProtocolService;
import org.apache.hadoop.security.protocolPB.RefreshAuthorizationPolicyProtocolPB;
import org.apache.hadoop.security.protocolPB.RefreshAuthorizationPolicyProtocolServerSideTranslatorPB;
import org.apache.hadoop.security.protocolPB.RefreshUserMappingsProtocolPB;
import org.apache.hadoop.security.protocolPB.RefreshUserMappingsProtocolServerSideTranslatorPB;
import org.apache.hadoop.ipc.protocolPB.RefreshCallQueueProtocolPB;
import org.apache.hadoop.ipc.protocolPB.RefreshCallQueueProtocolServerSideTranslatorPB;
import org.apache.hadoop.ipc.proto.RefreshCallQueueProtocolProtos.RefreshCallQueueProtocolService;
import org.apache.hadoop.ipc.protocolPB.GenericRefreshProtocolPB;
import org.apache.hadoop.ipc.protocolPB.GenericRefreshProtocolServerSideTranslatorPB;
import org.apache.hadoop.ipc.proto.GenericRefreshProtocolProtos.GenericRefreshProtocolService;
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.tools.proto.GetUserMappingsProtocolProtos.GetUserMappingsProtocolService;
import org.apache.hadoop.tools.protocolPB.GetUserMappingsProtocolPB;
import org.apache.hadoop.tools.protocolPB.GetUserMappingsProtocolServerSideTranslatorPB;
import org.apache.hadoop.tracing.SpanReceiverInfo;
import org.apache.hadoop.tracing.TraceAdminPB.TraceAdminService;
import org.apache.hadoop.tracing.TraceAdminProtocolPB;
import org.apache.hadoop.tracing.TraceAdminProtocolServerSideTranslatorPB;
import org.apache.hadoop.util.VersionInfo;
import org.apache.hadoop.util.VersionUtil;
import org.slf4j.Logger;
import com.google.common.annotations.VisibleForTesting;
import com.google.protobuf.BlockingService;
/**
* This class is responsible for handling all of the RPC calls to the NameNode.
* It is created, started, and stopped by {@link NameNode}.
*/
class NameNodeRpcServer implements NamenodeProtocols {
private static final Logger LOG = NameNode.LOG;
private static final Logger stateChangeLog = NameNode.stateChangeLog;
private static final Logger blockStateChangeLog = NameNode
.blockStateChangeLog;
// Dependencies from other parts of NN.
protected final FSNamesystem namesystem;
protected final NameNode nn;
private final NameNodeMetrics metrics;
private final RetryCache retryCache;
private final boolean serviceAuthEnabled;
/** The RPC server that listens to requests from DataNodes */
private final RPC.Server serviceRpcServer;
private final InetSocketAddress serviceRPCAddress;
/** The RPC server that listens to requests from clients */
protected final RPC.Server clientRpcServer;
protected final InetSocketAddress clientRpcAddress;
private final String minimumDataNodeVersion;
public NameNodeRpcServer(Configuration conf, NameNode nn)
throws IOException {
this.nn = nn;
this.namesystem = nn.getNamesystem();
this.retryCache = namesystem.getRetryCache();
this.metrics = NameNode.getNameNodeMetrics();
int handlerCount =
conf.getInt(DFS_NAMENODE_HANDLER_COUNT_KEY,
DFS_NAMENODE_HANDLER_COUNT_DEFAULT);
RPC.setProtocolEngine(conf, ClientNamenodeProtocolPB.class,
ProtobufRpcEngine.class);
ClientNamenodeProtocolServerSideTranslatorPB
clientProtocolServerTranslator =
new ClientNamenodeProtocolServerSideTranslatorPB(this);
BlockingService clientNNPbService = ClientNamenodeProtocol.
newReflectiveBlockingService(clientProtocolServerTranslator);
DatanodeProtocolServerSideTranslatorPB dnProtoPbTranslator =
new DatanodeProtocolServerSideTranslatorPB(this);
BlockingService dnProtoPbService = DatanodeProtocolService
.newReflectiveBlockingService(dnProtoPbTranslator);
NamenodeProtocolServerSideTranslatorPB namenodeProtocolXlator =
new NamenodeProtocolServerSideTranslatorPB(this);
BlockingService NNPbService = NamenodeProtocolService
.newReflectiveBlockingService(namenodeProtocolXlator);
RefreshAuthorizationPolicyProtocolServerSideTranslatorPB refreshAuthPolicyXlator =
new RefreshAuthorizationPolicyProtocolServerSideTranslatorPB(this);
BlockingService refreshAuthService = RefreshAuthorizationPolicyProtocolService
.newReflectiveBlockingService(refreshAuthPolicyXlator);
RefreshUserMappingsProtocolServerSideTranslatorPB refreshUserMappingXlator =
new RefreshUserMappingsProtocolServerSideTranslatorPB(this);
BlockingService refreshUserMappingService = RefreshUserMappingsProtocolService
.newReflectiveBlockingService(refreshUserMappingXlator);
RefreshCallQueueProtocolServerSideTranslatorPB refreshCallQueueXlator =
new RefreshCallQueueProtocolServerSideTranslatorPB(this);
BlockingService refreshCallQueueService = RefreshCallQueueProtocolService
.newReflectiveBlockingService(refreshCallQueueXlator);
GenericRefreshProtocolServerSideTranslatorPB genericRefreshXlator =
new GenericRefreshProtocolServerSideTranslatorPB(this);
BlockingService genericRefreshService = GenericRefreshProtocolService
.newReflectiveBlockingService(genericRefreshXlator);
GetUserMappingsProtocolServerSideTranslatorPB getUserMappingXlator =
new GetUserMappingsProtocolServerSideTranslatorPB(this);
BlockingService getUserMappingService = GetUserMappingsProtocolService
.newReflectiveBlockingService(getUserMappingXlator);
HAServiceProtocolServerSideTranslatorPB haServiceProtocolXlator =
new HAServiceProtocolServerSideTranslatorPB(this);
BlockingService haPbService = HAServiceProtocolService
.newReflectiveBlockingService(haServiceProtocolXlator);
TraceAdminProtocolServerSideTranslatorPB traceAdminXlator =
new TraceAdminProtocolServerSideTranslatorPB(this);
BlockingService traceAdminService = TraceAdminService
.newReflectiveBlockingService(traceAdminXlator);
WritableRpcEngine.ensureInitialized();
InetSocketAddress serviceRpcAddr = nn.getServiceRpcServerAddress(conf);
if (serviceRpcAddr != null) {
String bindHost = nn.getServiceRpcServerBindHost(conf);
if (bindHost == null) {
bindHost = serviceRpcAddr.getHostName();
}
LOG.info("Service RPC server is binding to " + bindHost + ":" +
serviceRpcAddr.getPort());
int serviceHandlerCount =
conf.getInt(DFS_NAMENODE_SERVICE_HANDLER_COUNT_KEY,
DFS_NAMENODE_SERVICE_HANDLER_COUNT_DEFAULT);
this.serviceRpcServer = new RPC.Builder(conf)
.setProtocol(
org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB.class)
.setInstance(clientNNPbService)
.setBindAddress(bindHost)
.setPort(serviceRpcAddr.getPort()).setNumHandlers(serviceHandlerCount)
.setVerbose(false)
.setSecretManager(namesystem.getDelegationTokenSecretManager())
.build();
// Add all the RPC protocols that the namenode implements
DFSUtil.addPBProtocol(conf, HAServiceProtocolPB.class, haPbService,
serviceRpcServer);
DFSUtil.addPBProtocol(conf, NamenodeProtocolPB.class, NNPbService,
serviceRpcServer);
DFSUtil.addPBProtocol(conf, DatanodeProtocolPB.class, dnProtoPbService,
serviceRpcServer);
DFSUtil.addPBProtocol(conf, RefreshAuthorizationPolicyProtocolPB.class,
refreshAuthService, serviceRpcServer);
DFSUtil.addPBProtocol(conf, RefreshUserMappingsProtocolPB.class,
refreshUserMappingService, serviceRpcServer);
// We support Refreshing call queue here in case the client RPC queue is full
DFSUtil.addPBProtocol(conf, RefreshCallQueueProtocolPB.class,
refreshCallQueueService, serviceRpcServer);
DFSUtil.addPBProtocol(conf, GenericRefreshProtocolPB.class,
genericRefreshService, serviceRpcServer);
DFSUtil.addPBProtocol(conf, GetUserMappingsProtocolPB.class,
getUserMappingService, serviceRpcServer);
DFSUtil.addPBProtocol(conf, TraceAdminProtocolPB.class,
traceAdminService, serviceRpcServer);
// Update the address with the correct port
InetSocketAddress listenAddr = serviceRpcServer.getListenerAddress();
serviceRPCAddress = new InetSocketAddress(
serviceRpcAddr.getHostName(), listenAddr.getPort());
nn.setRpcServiceServerAddress(conf, serviceRPCAddress);
} else {
serviceRpcServer = null;
serviceRPCAddress = null;
}
InetSocketAddress rpcAddr = nn.getRpcServerAddress(conf);
String bindHost = nn.getRpcServerBindHost(conf);
if (bindHost == null) {
bindHost = rpcAddr.getHostName();
}
LOG.info("RPC server is binding to " + bindHost + ":" + rpcAddr.getPort());
this.clientRpcServer = new RPC.Builder(conf)
.setProtocol(
org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB.class)
.setInstance(clientNNPbService).setBindAddress(bindHost)
.setPort(rpcAddr.getPort()).setNumHandlers(handlerCount)
.setVerbose(false)
.setSecretManager(namesystem.getDelegationTokenSecretManager()).build();
// Add all the RPC protocols that the namenode implements
DFSUtil.addPBProtocol(conf, HAServiceProtocolPB.class, haPbService,
clientRpcServer);
DFSUtil.addPBProtocol(conf, NamenodeProtocolPB.class, NNPbService,
clientRpcServer);
DFSUtil.addPBProtocol(conf, DatanodeProtocolPB.class, dnProtoPbService,
clientRpcServer);
DFSUtil.addPBProtocol(conf, RefreshAuthorizationPolicyProtocolPB.class,
refreshAuthService, clientRpcServer);
DFSUtil.addPBProtocol(conf, RefreshUserMappingsProtocolPB.class,
refreshUserMappingService, clientRpcServer);
DFSUtil.addPBProtocol(conf, RefreshCallQueueProtocolPB.class,
refreshCallQueueService, clientRpcServer);
DFSUtil.addPBProtocol(conf, GenericRefreshProtocolPB.class,
genericRefreshService, clientRpcServer);
DFSUtil.addPBProtocol(conf, GetUserMappingsProtocolPB.class,
getUserMappingService, clientRpcServer);
DFSUtil.addPBProtocol(conf, TraceAdminProtocolPB.class,
traceAdminService, clientRpcServer);
// set service-level authorization security policy
if (serviceAuthEnabled =
conf.getBoolean(
CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
clientRpcServer.refreshServiceAcl(conf, new HDFSPolicyProvider());
if (serviceRpcServer != null) {
serviceRpcServer.refreshServiceAcl(conf, new HDFSPolicyProvider());
}
}
// The rpc-server port can be ephemeral... ensure we have the correct info
InetSocketAddress listenAddr = clientRpcServer.getListenerAddress();
clientRpcAddress = new InetSocketAddress(
rpcAddr.getHostName(), listenAddr.getPort());
nn.setRpcServerAddress(conf, clientRpcAddress);
minimumDataNodeVersion = conf.get(
DFSConfigKeys.DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_KEY,
DFSConfigKeys.DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_DEFAULT);
// Set terse exception whose stack trace won't be logged
this.clientRpcServer.addTerseExceptions(SafeModeException.class,
FileNotFoundException.class,
HadoopIllegalArgumentException.class,
FileAlreadyExistsException.class,
InvalidPathException.class,
ParentNotDirectoryException.class,
UnresolvedLinkException.class,
AlreadyBeingCreatedException.class,
QuotaExceededException.class,
RecoveryInProgressException.class,
AccessControlException.class,
InvalidToken.class,
LeaseExpiredException.class,
NSQuotaExceededException.class,
DSQuotaExceededException.class,
QuotaByStorageTypeExceededException.class,
AclException.class,
FSLimitException.PathComponentTooLongException.class,
FSLimitException.MaxDirectoryItemsExceededException.class,
UnresolvedPathException.class);
}
/** Allow access to the client RPC server for testing */
@VisibleForTesting
RPC.Server getClientRpcServer() {
return clientRpcServer;
}
/** Allow access to the service RPC server for testing */
@VisibleForTesting
RPC.Server getServiceRpcServer() {
return serviceRpcServer;
}
/**
* Start client and service RPC servers.
*/
void start() {
clientRpcServer.start();
if (serviceRpcServer != null) {
serviceRpcServer.start();
}
}
/**
* Wait until the RPC servers have shutdown.
*/
void join() throws InterruptedException {
clientRpcServer.join();
if (serviceRpcServer != null) {
serviceRpcServer.join();
}
}
/**
* Stop client and service RPC servers.
*/
void stop() {
if (clientRpcServer != null) {
clientRpcServer.stop();
}
if (serviceRpcServer != null) {
serviceRpcServer.stop();
}
}
InetSocketAddress getServiceRpcAddress() {
return serviceRPCAddress;
}
InetSocketAddress getRpcAddress() {
return clientRpcAddress;
}
private static UserGroupInformation getRemoteUser() throws IOException {
return NameNode.getRemoteUser();
}
/////////////////////////////////////////////////////
// NamenodeProtocol
/////////////////////////////////////////////////////
@Override // NamenodeProtocol
public BlocksWithLocations getBlocks(DatanodeInfo datanode, long size)
throws IOException {
if(size <= 0) {
throw new IllegalArgumentException(
"Unexpected not positive size: "+size);
}
checkNNStartup();
namesystem.checkSuperuserPrivilege();
return namesystem.getBlockManager().getBlocks(datanode, size);
}
@Override // NamenodeProtocol
public ExportedBlockKeys getBlockKeys() throws IOException {
checkNNStartup();
namesystem.checkSuperuserPrivilege();
return namesystem.getBlockManager().getBlockKeys();
}
@Override // NamenodeProtocol
public void errorReport(NamenodeRegistration registration,
int errorCode,
String msg) throws IOException {
checkNNStartup();
namesystem.checkOperation(OperationCategory.UNCHECKED);
namesystem.checkSuperuserPrivilege();
verifyRequest(registration);
LOG.info("Error report from " + registration + ": " + msg);
if (errorCode == FATAL) {
namesystem.releaseBackupNode(registration);
}
}
@Override // NamenodeProtocol
public NamenodeRegistration registerSubordinateNamenode(
NamenodeRegistration registration) throws IOException {
checkNNStartup();
namesystem.checkSuperuserPrivilege();
verifyLayoutVersion(registration.getVersion());
NamenodeRegistration myRegistration = nn.setRegistration();
namesystem.registerBackupNode(registration, myRegistration);
return myRegistration;
}
@Override // NamenodeProtocol
public NamenodeCommand startCheckpoint(NamenodeRegistration registration)
throws IOException {
checkNNStartup();
namesystem.checkSuperuserPrivilege();
verifyRequest(registration);
if(!nn.isRole(NamenodeRole.NAMENODE))
throw new IOException("Only an ACTIVE node can invoke startCheckpoint.");
CacheEntryWithPayload cacheEntry = RetryCache.waitForCompletion(retryCache,
null);
if (cacheEntry != null && cacheEntry.isSuccess()) {
return (NamenodeCommand) cacheEntry.getPayload();
}
NamenodeCommand ret = null;
try {
ret = namesystem.startCheckpoint(registration, nn.setRegistration());
} finally {
RetryCache.setState(cacheEntry, ret != null, ret);
}
return ret;
}
@Override // NamenodeProtocol
public void endCheckpoint(NamenodeRegistration registration,
CheckpointSignature sig) throws IOException {
checkNNStartup();
namesystem.checkSuperuserPrivilege();
CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
if (cacheEntry != null && cacheEntry.isSuccess()) {
return; // Return previous response
}
boolean success = false;
try {
namesystem.endCheckpoint(registration, sig);
success = true;
} finally {
RetryCache.setState(cacheEntry, success);
}
}
@Override // ClientProtocol
public Token<DelegationTokenIdentifier> getDelegationToken(Text renewer)
throws IOException {
checkNNStartup();
return namesystem.getDelegationToken(renewer);
}
@Override // ClientProtocol
public long renewDelegationToken(Token<DelegationTokenIdentifier> token)
throws InvalidToken, IOException {
checkNNStartup();
return namesystem.renewDelegationToken(token);
}
@Override // ClientProtocol
public void cancelDelegationToken(Token<DelegationTokenIdentifier> token)
throws IOException {
checkNNStartup();
namesystem.cancelDelegationToken(token);
}
@Override // ClientProtocol
public LocatedBlocks getBlockLocations(String src,
long offset,
long length)
throws IOException {
checkNNStartup();
metrics.incrGetBlockLocations();
return namesystem.getBlockLocations(getClientMachine(),
src, offset, length);
}
@Override // ClientProtocol
public FsServerDefaults getServerDefaults() throws IOException {
checkNNStartup();
return namesystem.getServerDefaults();
}
@Override // ClientProtocol
public HdfsFileStatus create(String src, FsPermission masked,
String clientName, EnumSetWritable<CreateFlag> flag,
boolean createParent, short replication, long blockSize,
CryptoProtocolVersion[] supportedVersions)
throws IOException {
checkNNStartup();
String clientMachine = getClientMachine();
if (stateChangeLog.isDebugEnabled()) {
stateChangeLog.debug("*DIR* NameNode.create: file "
+src+" for "+clientName+" at "+clientMachine);
}
if (!checkPathLength(src)) {
throw new IOException("create: Pathname too long. Limit "
+ MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
}
namesystem.checkOperation(OperationCategory.WRITE);
CacheEntryWithPayload cacheEntry = RetryCache.waitForCompletion(retryCache, null);
if (cacheEntry != null && cacheEntry.isSuccess()) {
return (HdfsFileStatus) cacheEntry.getPayload();
}
HdfsFileStatus status = null;
try {
PermissionStatus perm = new PermissionStatus(getRemoteUser()
.getShortUserName(), null, masked);
status = namesystem.startFile(src, perm, clientName, clientMachine,
flag.get(), createParent, replication, blockSize, supportedVersions,
cacheEntry != null);
} finally {
RetryCache.setState(cacheEntry, status != null, status);
}
metrics.incrFilesCreated();
metrics.incrCreateFileOps();
return status;
}
@Override // ClientProtocol
public LastBlockWithStatus append(String src, String clientName,
EnumSetWritable<CreateFlag> flag) throws IOException {
checkNNStartup();
String clientMachine = getClientMachine();
if (stateChangeLog.isDebugEnabled()) {
stateChangeLog.debug("*DIR* NameNode.append: file "
+src+" for "+clientName+" at "+clientMachine);
}
namesystem.checkOperation(OperationCategory.WRITE);
CacheEntryWithPayload cacheEntry = RetryCache.waitForCompletion(retryCache,
null);
if (cacheEntry != null && cacheEntry.isSuccess()) {
return (LastBlockWithStatus) cacheEntry.getPayload();
}
LastBlockWithStatus info = null;
boolean success = false;
try {
info = namesystem.appendFile(src, clientName, clientMachine, flag.get(),
cacheEntry != null);
success = true;
} finally {
RetryCache.setState(cacheEntry, success, info);
}
metrics.incrFilesAppended();
return info;
}
@Override // ClientProtocol
public boolean recoverLease(String src, String clientName) throws IOException {
checkNNStartup();
String clientMachine = getClientMachine();
return namesystem.recoverLease(src, clientName, clientMachine);
}
@Override // ClientProtocol
public boolean setReplication(String src, short replication)
throws IOException {
checkNNStartup();
return namesystem.setReplication(src, replication);
}
@Override
public void setStoragePolicy(String src, String policyName)
throws IOException {
checkNNStartup();
namesystem.setStoragePolicy(src, policyName);
}
@Override
public BlockStoragePolicy getStoragePolicy(String path) throws IOException {
checkNNStartup();
return namesystem.getStoragePolicy(path);
}
@Override
public BlockStoragePolicy[] getStoragePolicies() throws IOException {
checkNNStartup();
return namesystem.getStoragePolicies();
}
@Override // ClientProtocol
public void setPermission(String src, FsPermission permissions)
throws IOException {
checkNNStartup();
namesystem.setPermission(src, permissions);
}
@Override // ClientProtocol
public void setOwner(String src, String username, String groupname)
throws IOException {
checkNNStartup();
namesystem.setOwner(src, username, groupname);
}
@Override
public LocatedBlock addBlock(String src, String clientName,
ExtendedBlock previous, DatanodeInfo[] excludedNodes, long fileId,
String[] favoredNodes)
throws IOException {
checkNNStartup();
LocatedBlock locatedBlock = namesystem.getAdditionalBlock(src, fileId,
clientName, previous, excludedNodes, favoredNodes);
if (locatedBlock != null) {
metrics.incrAddBlockOps();
}
return locatedBlock;
}
@Override // ClientProtocol
public LocatedBlock getAdditionalDatanode(final String src,
final long fileId, final ExtendedBlock blk,
final DatanodeInfo[] existings, final String[] existingStorageIDs,
final DatanodeInfo[] excludes,
final int numAdditionalNodes, final String clientName
) throws IOException {
checkNNStartup();
if (LOG.isDebugEnabled()) {
LOG.debug("getAdditionalDatanode: src=" + src
+ ", fileId=" + fileId
+ ", blk=" + blk
+ ", existings=" + Arrays.asList(existings)
+ ", excludes=" + Arrays.asList(excludes)
+ ", numAdditionalNodes=" + numAdditionalNodes
+ ", clientName=" + clientName);
}
metrics.incrGetAdditionalDatanodeOps();
Set<Node> excludeSet = null;
if (excludes != null) {
excludeSet = new HashSet<Node>(excludes.length);
for (Node node : excludes) {
excludeSet.add(node);
}
}
return namesystem.getAdditionalDatanode(src, fileId, blk, existings,
existingStorageIDs, excludeSet, numAdditionalNodes, clientName);
}
/**
* The client needs to give up on the block.
*/
@Override // ClientProtocol
public void abandonBlock(ExtendedBlock b, long fileId, String src,
String holder) throws IOException {
checkNNStartup();
namesystem.abandonBlock(b, fileId, src, holder);
}
@Override // ClientProtocol
public boolean complete(String src, String clientName,
ExtendedBlock last, long fileId)
throws IOException {
checkNNStartup();
return namesystem.completeFile(src, clientName, last, fileId);
}
/**
* The client has detected an error on the specified located blocks
* and is reporting them to the server. For now, the namenode will
* mark the block as corrupt. In the future we might
* check the blocks are actually corrupt.
*/
@Override // ClientProtocol, DatanodeProtocol
public void reportBadBlocks(LocatedBlock[] blocks) throws IOException {
checkNNStartup();
namesystem.reportBadBlocks(blocks);
}
@Override // ClientProtocol
public LocatedBlock updateBlockForPipeline(ExtendedBlock block, String clientName)
throws IOException {
checkNNStartup();
return namesystem.updateBlockForPipeline(block, clientName);
}
@Override // ClientProtocol
public void updatePipeline(String clientName, ExtendedBlock oldBlock,
ExtendedBlock newBlock, DatanodeID[] newNodes, String[] newStorageIDs)
throws IOException {
checkNNStartup();
namesystem.checkOperation(OperationCategory.WRITE);
CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
if (cacheEntry != null && cacheEntry.isSuccess()) {
return; // Return previous response
}
boolean success = false;
try {
namesystem.updatePipeline(clientName, oldBlock, newBlock, newNodes,
newStorageIDs, cacheEntry != null);
success = true;
} finally {
RetryCache.setState(cacheEntry, success);
}
}
@Override // DatanodeProtocol
public void commitBlockSynchronization(ExtendedBlock block,
long newgenerationstamp, long newlength,
boolean closeFile, boolean deleteblock, DatanodeID[] newtargets,
String[] newtargetstorages)
throws IOException {
checkNNStartup();
namesystem.commitBlockSynchronization(block, newgenerationstamp,
newlength, closeFile, deleteblock, newtargets, newtargetstorages);
}
@Override // ClientProtocol
public long getPreferredBlockSize(String filename)
throws IOException {
checkNNStartup();
return namesystem.getPreferredBlockSize(filename);
}
@Deprecated
@Override // ClientProtocol
public boolean rename(String src, String dst) throws IOException {
checkNNStartup();
if(stateChangeLog.isDebugEnabled()) {
stateChangeLog.debug("*DIR* NameNode.rename: " + src + " to " + dst);
}
if (!checkPathLength(dst)) {
throw new IOException("rename: Pathname too long. Limit "
+ MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
}
namesystem.checkOperation(OperationCategory.WRITE);
CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
if (cacheEntry != null && cacheEntry.isSuccess()) {
return true; // Return previous response
}
boolean ret = false;
try {
ret = namesystem.renameTo(src, dst, cacheEntry != null);
} finally {
RetryCache.setState(cacheEntry, ret);
}
if (ret) {
metrics.incrFilesRenamed();
}
return ret;
}
@Override // ClientProtocol
public void concat(String trg, String[] src) throws IOException {
checkNNStartup();
namesystem.checkOperation(OperationCategory.WRITE);
CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
if (cacheEntry != null && cacheEntry.isSuccess()) {
return; // Return previous response
}
boolean success = false;
try {
namesystem.concat(trg, src, cacheEntry != null);
success = true;
} finally {
RetryCache.setState(cacheEntry, success);
}
}
@Override // ClientProtocol
public void rename2(String src, String dst, Options.Rename... options)
throws IOException {
checkNNStartup();
if(stateChangeLog.isDebugEnabled()) {
stateChangeLog.debug("*DIR* NameNode.rename: " + src + " to " + dst);
}
if (!checkPathLength(dst)) {
throw new IOException("rename: Pathname too long. Limit "
+ MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
}
namesystem.checkOperation(OperationCategory.WRITE);
CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
if (cacheEntry != null && cacheEntry.isSuccess()) {
return; // Return previous response
}
boolean success = false;
try {
namesystem.renameTo(src, dst, cacheEntry != null, options);
success = true;
} finally {
RetryCache.setState(cacheEntry, success);
}
metrics.incrFilesRenamed();
}
@Override // ClientProtocol
public boolean truncate(String src, long newLength, String clientName)
throws IOException {
if(stateChangeLog.isDebugEnabled()) {
stateChangeLog.debug("*DIR* NameNode.truncate: " + src + " to " +
newLength);
}
String clientMachine = getClientMachine();
try {
return namesystem.truncate(
src, newLength, clientName, clientMachine, now());
} finally {
metrics.incrFilesTruncated();
}
}
@Override // ClientProtocol
public boolean delete(String src, boolean recursive) throws IOException {
checkNNStartup();
if (stateChangeLog.isDebugEnabled()) {
stateChangeLog.debug("*DIR* Namenode.delete: src=" + src
+ ", recursive=" + recursive);
}
namesystem.checkOperation(OperationCategory.WRITE);
CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
if (cacheEntry != null && cacheEntry.isSuccess()) {
return true; // Return previous response
}
boolean ret = false;
try {
ret = namesystem.delete(src, recursive, cacheEntry != null);
} finally {
RetryCache.setState(cacheEntry, ret);
}
if (ret)
metrics.incrDeleteFileOps();
return ret;
}
/**
* Check path length does not exceed maximum. Returns true if
* length and depth are okay. Returns false if length is too long
* or depth is too great.
*/
private boolean checkPathLength(String src) {
Path srcPath = new Path(src);
return (src.length() <= MAX_PATH_LENGTH &&
srcPath.depth() <= MAX_PATH_DEPTH);
}
@Override // ClientProtocol
public boolean mkdirs(String src, FsPermission masked, boolean createParent)
throws IOException {
checkNNStartup();
if(stateChangeLog.isDebugEnabled()) {
stateChangeLog.debug("*DIR* NameNode.mkdirs: " + src);
}
if (!checkPathLength(src)) {
throw new IOException("mkdirs: Pathname too long. Limit "
+ MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
}
return namesystem.mkdirs(src,
new PermissionStatus(getRemoteUser().getShortUserName(),
null, masked), createParent);
}
@Override // ClientProtocol
public void renewLease(String clientName) throws IOException {
checkNNStartup();
namesystem.renewLease(clientName);
}
@Override // ClientProtocol
public DirectoryListing getListing(String src, byte[] startAfter,
boolean needLocation) throws IOException {
checkNNStartup();
DirectoryListing files = namesystem.getListing(
src, startAfter, needLocation);
if (files != null) {
metrics.incrGetListingOps();
metrics.incrFilesInGetListingOps(files.getPartialListing().length);
}
return files;
}
@Override // ClientProtocol
public HdfsFileStatus getFileInfo(String src) throws IOException {
checkNNStartup();
metrics.incrFileInfoOps();
return namesystem.getFileInfo(src, true);
}
@Override // ClientProtocol
public boolean isFileClosed(String src) throws IOException{
checkNNStartup();
return namesystem.isFileClosed(src);
}
@Override // ClientProtocol
public HdfsFileStatus getFileLinkInfo(String src) throws IOException {
checkNNStartup();
metrics.incrFileInfoOps();
return namesystem.getFileInfo(src, false);
}
@Override // ClientProtocol
public long[] getStats() throws IOException {
checkNNStartup();
namesystem.checkOperation(OperationCategory.READ);
return namesystem.getStats();
}
@Override // ClientProtocol
public DatanodeInfo[] getDatanodeReport(DatanodeReportType type)
throws IOException {
checkNNStartup();
DatanodeInfo results[] = namesystem.datanodeReport(type);
return results;
}
@Override // ClientProtocol
public DatanodeStorageReport[] getDatanodeStorageReport(
DatanodeReportType type) throws IOException {
checkNNStartup();
final DatanodeStorageReport[] reports = namesystem.getDatanodeStorageReport(type);
return reports;
}
@Override // ClientProtocol
public boolean setSafeMode(SafeModeAction action, boolean isChecked)
throws IOException {
checkNNStartup();
OperationCategory opCategory = OperationCategory.UNCHECKED;
if (isChecked) {
if (action == SafeModeAction.SAFEMODE_GET) {
opCategory = OperationCategory.READ;
} else {
opCategory = OperationCategory.WRITE;
}
}
namesystem.checkOperation(opCategory);
return namesystem.setSafeMode(action);
}
@Override // ClientProtocol
public boolean restoreFailedStorage(String arg) throws IOException {
checkNNStartup();
return namesystem.restoreFailedStorage(arg);
}
@Override // ClientProtocol
public void saveNamespace() throws IOException {
checkNNStartup();
CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
if (cacheEntry != null && cacheEntry.isSuccess()) {
return; // Return previous response
}
boolean success = false;
try {
namesystem.saveNamespace();
success = true;
} finally {
RetryCache.setState(cacheEntry, success);
}
}
@Override // ClientProtocol
public long rollEdits() throws AccessControlException, IOException {
checkNNStartup();
CheckpointSignature sig = namesystem.rollEditLog();
return sig.getCurSegmentTxId();
}
@Override // ClientProtocol
public void refreshNodes() throws IOException {
checkNNStartup();
namesystem.refreshNodes();
}
@Override // NamenodeProtocol
public long getTransactionID() throws IOException {
checkNNStartup();
namesystem.checkOperation(OperationCategory.UNCHECKED);
namesystem.checkSuperuserPrivilege();
return namesystem.getFSImage().getLastAppliedOrWrittenTxId();
}
@Override // NamenodeProtocol
public long getMostRecentCheckpointTxId() throws IOException {
checkNNStartup();
namesystem.checkOperation(OperationCategory.UNCHECKED);
namesystem.checkSuperuserPrivilege();
return namesystem.getFSImage().getMostRecentCheckpointTxId();
}
@Override // NamenodeProtocol
public CheckpointSignature rollEditLog() throws IOException {
checkNNStartup();
namesystem.checkSuperuserPrivilege();
return namesystem.rollEditLog();
}
@Override // NamenodeProtocol
public RemoteEditLogManifest getEditLogManifest(long sinceTxId)
throws IOException {
checkNNStartup();
namesystem.checkOperation(OperationCategory.READ);
namesystem.checkSuperuserPrivilege();
return namesystem.getEditLog().getEditLogManifest(sinceTxId);
}
@Override // NamenodeProtocol
public boolean isUpgradeFinalized() throws IOException {
checkNNStartup();
namesystem.checkSuperuserPrivilege();
return namesystem.isUpgradeFinalized();
}
@Override // ClientProtocol
public void finalizeUpgrade() throws IOException {
checkNNStartup();
namesystem.finalizeUpgrade();
}
@Override // ClientProtocol
public RollingUpgradeInfo rollingUpgrade(RollingUpgradeAction action) throws IOException {
checkNNStartup();
LOG.info("rollingUpgrade " + action);
switch(action) {
case QUERY:
return namesystem.queryRollingUpgrade();
case PREPARE:
return namesystem.startRollingUpgrade();
case FINALIZE:
return namesystem.finalizeRollingUpgrade();
default:
throw new UnsupportedActionException(action + " is not yet supported.");
}
}
@Override // ClientProtocol
public void metaSave(String filename) throws IOException {
checkNNStartup();
namesystem.metaSave(filename);
}
@Override // ClientProtocol
public CorruptFileBlocks listCorruptFileBlocks(String path, String cookie)
throws IOException {
checkNNStartup();
String[] cookieTab = new String[] { cookie };
Collection<FSNamesystem.CorruptFileBlockInfo> fbs =
namesystem.listCorruptFileBlocks(path, cookieTab);
String[] files = new String[fbs.size()];
int i = 0;
for(FSNamesystem.CorruptFileBlockInfo fb: fbs) {
files[i++] = fb.path;
}
return new CorruptFileBlocks(files, cookieTab[0]);
}
/**
* Tell all datanodes to use a new, non-persistent bandwidth value for
* dfs.datanode.balance.bandwidthPerSec.
* @param bandwidth Balancer bandwidth in bytes per second for all datanodes.
* @throws IOException
*/
@Override // ClientProtocol
public void setBalancerBandwidth(long bandwidth) throws IOException {
checkNNStartup();
namesystem.setBalancerBandwidth(bandwidth);
}
@Override // ClientProtocol
public ContentSummary getContentSummary(String path) throws IOException {
checkNNStartup();
return namesystem.getContentSummary(path);
}
@Override // ClientProtocol
public void setQuota(String path, long namespaceQuota, long storagespaceQuota,
StorageType type)
throws IOException {
checkNNStartup();
namesystem.setQuota(path, namespaceQuota, storagespaceQuota, type);
}
@Override // ClientProtocol
public void fsync(String src, long fileId, String clientName,
long lastBlockLength)
throws IOException {
checkNNStartup();
namesystem.fsync(src, fileId, clientName, lastBlockLength);
}
@Override // ClientProtocol
public void setTimes(String src, long mtime, long atime)
throws IOException {
checkNNStartup();
namesystem.setTimes(src, mtime, atime);
}
@Override // ClientProtocol
public void createSymlink(String target, String link, FsPermission dirPerms,
boolean createParent) throws IOException {
checkNNStartup();
namesystem.checkOperation(OperationCategory.WRITE);
CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
if (cacheEntry != null && cacheEntry.isSuccess()) {
return; // Return previous response
}
/* We enforce the MAX_PATH_LENGTH limit even though a symlink target
* URI may refer to a non-HDFS file system.
*/
if (!checkPathLength(link)) {
throw new IOException("Symlink path exceeds " + MAX_PATH_LENGTH +
" character limit");
}
final UserGroupInformation ugi = getRemoteUser();
boolean success = false;
try {
PermissionStatus perm = new PermissionStatus(ugi.getShortUserName(),
null, dirPerms);
namesystem.createSymlink(target, link, perm, createParent,
cacheEntry != null);
success = true;
} finally {
RetryCache.setState(cacheEntry, success);
}
}
@Override // ClientProtocol
public String getLinkTarget(String path) throws IOException {
checkNNStartup();
metrics.incrGetLinkTargetOps();
HdfsFileStatus stat = null;
try {
stat = namesystem.getFileInfo(path, false);
} catch (UnresolvedPathException e) {
return e.getResolvedPath().toString();
} catch (UnresolvedLinkException e) {
// The NameNode should only throw an UnresolvedPathException
throw new AssertionError("UnresolvedLinkException thrown");
}
if (stat == null) {
throw new FileNotFoundException("File does not exist: " + path);
} else if (!stat.isSymlink()) {
throw new IOException("Path " + path + " is not a symbolic link");
}
return stat.getSymlink();
}
@Override // DatanodeProtocol
public DatanodeRegistration registerDatanode(DatanodeRegistration nodeReg)
throws IOException {
checkNNStartup();
verifySoftwareVersion(nodeReg);
namesystem.registerDatanode(nodeReg);
return nodeReg;
}
@Override // DatanodeProtocol
public HeartbeatResponse sendHeartbeat(DatanodeRegistration nodeReg,
StorageReport[] report, long dnCacheCapacity, long dnCacheUsed,
int xmitsInProgress, int xceiverCount,
int failedVolumes, VolumeFailureSummary volumeFailureSummary,
boolean requestFullBlockReportLease) throws IOException {
checkNNStartup();
verifyRequest(nodeReg);
return namesystem.handleHeartbeat(nodeReg, report,
dnCacheCapacity, dnCacheUsed, xceiverCount, xmitsInProgress,
failedVolumes, volumeFailureSummary, requestFullBlockReportLease);
}
@Override // DatanodeProtocol
public DatanodeCommand blockReport(DatanodeRegistration nodeReg,
String poolId, StorageBlockReport[] reports,
BlockReportContext context) throws IOException {
checkNNStartup();
verifyRequest(nodeReg);
if(blockStateChangeLog.isDebugEnabled()) {
blockStateChangeLog.debug("*BLOCK* NameNode.blockReport: "
+ "from " + nodeReg + ", reports.length=" + reports.length);
}
final BlockManager bm = namesystem.getBlockManager();
boolean noStaleStorages = false;
for (int r = 0; r < reports.length; r++) {
final BlockListAsLongs blocks = reports[r].getBlocks();
//
// BlockManager.processReport accumulates information of prior calls
// for the same node and storage, so the value returned by the last
// call of this loop is the final updated value for noStaleStorage.
//
noStaleStorages = bm.processReport(nodeReg, reports[r].getStorage(),
blocks, context, (r == reports.length - 1));
metrics.incrStorageBlockReportOps();
}
BlockManagerFaultInjector.getInstance().
incomingBlockReportRpc(nodeReg, context);
if (nn.getFSImage().isUpgradeFinalized() &&
!namesystem.isRollingUpgrade() &&
!nn.isStandbyState() &&
noStaleStorages) {
return new FinalizeCommand(poolId);
}
return null;
}
@Override
public DatanodeCommand cacheReport(DatanodeRegistration nodeReg,
String poolId, List<Long> blockIds) throws IOException {
checkNNStartup();
verifyRequest(nodeReg);
if (blockStateChangeLog.isDebugEnabled()) {
blockStateChangeLog.debug("*BLOCK* NameNode.cacheReport: "
+ "from " + nodeReg + " " + blockIds.size() + " blocks");
}
namesystem.getCacheManager().processCacheReport(nodeReg, blockIds);
return null;
}
@Override // DatanodeProtocol
public void blockReceivedAndDeleted(DatanodeRegistration nodeReg, String poolId,
StorageReceivedDeletedBlocks[] receivedAndDeletedBlocks) throws IOException {
checkNNStartup();
verifyRequest(nodeReg);
metrics.incrBlockReceivedAndDeletedOps();
if(blockStateChangeLog.isDebugEnabled()) {
blockStateChangeLog.debug("*BLOCK* NameNode.blockReceivedAndDeleted: "
+"from "+nodeReg+" "+receivedAndDeletedBlocks.length
+" blocks.");
}
for(StorageReceivedDeletedBlocks r : receivedAndDeletedBlocks) {
namesystem.processIncrementalBlockReport(nodeReg, r);
}
}
@Override // DatanodeProtocol
public void errorReport(DatanodeRegistration nodeReg,
int errorCode, String msg) throws IOException {
checkNNStartup();
String dnName =
(nodeReg == null) ? "Unknown DataNode" : nodeReg.toString();
if (errorCode == DatanodeProtocol.NOTIFY) {
LOG.info("Error report from " + dnName + ": " + msg);
return;
}
verifyRequest(nodeReg);
if (errorCode == DatanodeProtocol.DISK_ERROR) {
LOG.warn("Disk error on " + dnName + ": " + msg);
} else if (errorCode == DatanodeProtocol.FATAL_DISK_ERROR) {
LOG.warn("Fatal disk error on " + dnName + ": " + msg);
namesystem.getBlockManager().getDatanodeManager().removeDatanode(nodeReg);
} else {
LOG.info("Error report from " + dnName + ": " + msg);
}
}
@Override // DatanodeProtocol, NamenodeProtocol
public NamespaceInfo versionRequest() throws IOException {
checkNNStartup();
namesystem.checkSuperuserPrivilege();
return namesystem.getNamespaceInfo();
}
/**
* Verifies the given registration.
*
* @param nodeReg node registration
* @throws UnregisteredNodeException if the registration is invalid
*/
private void verifyRequest(NodeRegistration nodeReg) throws IOException {
// verify registration ID
final String id = nodeReg.getRegistrationID();
final String expectedID = namesystem.getRegistrationID();
if (!expectedID.equals(id)) {
LOG.warn("Registration IDs mismatched: the "
+ nodeReg.getClass().getSimpleName() + " ID is " + id
+ " but the expected ID is " + expectedID);
throw new UnregisteredNodeException(nodeReg);
}
}
@Override // RefreshAuthorizationPolicyProtocol
public void refreshServiceAcl() throws IOException {
checkNNStartup();
if (!serviceAuthEnabled) {
throw new AuthorizationException("Service Level Authorization not enabled!");
}
this.clientRpcServer.refreshServiceAcl(new Configuration(), new HDFSPolicyProvider());
if (this.serviceRpcServer != null) {
this.serviceRpcServer.refreshServiceAcl(new Configuration(), new HDFSPolicyProvider());
}
}
@Override // RefreshAuthorizationPolicyProtocol
public void refreshUserToGroupsMappings() throws IOException {
LOG.info("Refreshing all user-to-groups mappings. Requested by user: " +
getRemoteUser().getShortUserName());
Groups.getUserToGroupsMappingService().refresh();
}
@Override // RefreshAuthorizationPolicyProtocol
public void refreshSuperUserGroupsConfiguration() {
LOG.info("Refreshing SuperUser proxy group mapping list ");
ProxyUsers.refreshSuperUserGroupsConfiguration();
}
@Override // RefreshCallQueueProtocol
public void refreshCallQueue() {
LOG.info("Refreshing call queue.");
Configuration conf = new Configuration();
clientRpcServer.refreshCallQueue(conf);
if (this.serviceRpcServer != null) {
serviceRpcServer.refreshCallQueue(conf);
}
}
@Override // GenericRefreshProtocol
public Collection<RefreshResponse> refresh(String identifier, String[] args) {
// Let the registry handle as needed
return RefreshRegistry.defaultRegistry().dispatch(identifier, args);
}
@Override // GetUserMappingsProtocol
public String[] getGroupsForUser(String user) throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Getting groups for user " + user);
}
return UserGroupInformation.createRemoteUser(user).getGroupNames();
}
@Override // HAServiceProtocol
public synchronized void monitorHealth() throws HealthCheckFailedException,
AccessControlException, IOException {
checkNNStartup();
nn.monitorHealth();
}
@Override // HAServiceProtocol
public synchronized void transitionToActive(StateChangeRequestInfo req)
throws ServiceFailedException, AccessControlException, IOException {
checkNNStartup();
nn.checkHaStateChange(req);
nn.transitionToActive();
}
@Override // HAServiceProtocol
public synchronized void transitionToStandby(StateChangeRequestInfo req)
throws ServiceFailedException, AccessControlException, IOException {
checkNNStartup();
nn.checkHaStateChange(req);
nn.transitionToStandby();
}
@Override // HAServiceProtocol
public synchronized HAServiceStatus getServiceStatus()
throws AccessControlException, ServiceFailedException, IOException {
checkNNStartup();
return nn.getServiceStatus();
}
/**
* Verify version.
* @param version layout version
* @throws IOException on layout version mismatch
*/
void verifyLayoutVersion(int version) throws IOException {
if (version != HdfsServerConstants.NAMENODE_LAYOUT_VERSION)
throw new IncorrectVersionException(
HdfsServerConstants.NAMENODE_LAYOUT_VERSION, version, "data node");
}
private void verifySoftwareVersion(DatanodeRegistration dnReg)
throws IncorrectVersionException {
String dnVersion = dnReg.getSoftwareVersion();
if (VersionUtil.compareVersions(dnVersion, minimumDataNodeVersion) < 0) {
IncorrectVersionException ive = new IncorrectVersionException(
minimumDataNodeVersion, dnVersion, "DataNode", "NameNode");
LOG.warn(ive.getMessage() + " DN: " + dnReg);
throw ive;
}
String nnVersion = VersionInfo.getVersion();
if (!dnVersion.equals(nnVersion)) {
String messagePrefix = "Reported DataNode version '" + dnVersion +
"' of DN " + dnReg + " does not match NameNode version '" +
nnVersion + "'";
long nnCTime = nn.getFSImage().getStorage().getCTime();
long dnCTime = dnReg.getStorageInfo().getCTime();
if (nnCTime != dnCTime) {
IncorrectVersionException ive = new IncorrectVersionException(
messagePrefix + " and CTime of DN ('" + dnCTime +
"') does not match CTime of NN ('" + nnCTime + "')");
LOG.warn(ive.toString(), ive);
throw ive;
} else {
LOG.info(messagePrefix +
". Note: This is normal during a rolling upgrade.");
}
}
}
private static String getClientMachine() {
String clientMachine = NamenodeWebHdfsMethods.getRemoteAddress();
if (clientMachine == null) { //not a web client
clientMachine = Server.getRemoteAddress();
}
if (clientMachine == null) { //not a RPC client
clientMachine = "";
}
return clientMachine;
}
@Override
public DataEncryptionKey getDataEncryptionKey() throws IOException {
checkNNStartup();
return namesystem.getBlockManager().generateDataEncryptionKey();
}
@Override
public String createSnapshot(String snapshotRoot, String snapshotName)
throws IOException {
checkNNStartup();
if (!checkPathLength(snapshotRoot)) {
throw new IOException("createSnapshot: Pathname too long. Limit "
+ MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
}
namesystem.checkOperation(OperationCategory.WRITE);
CacheEntryWithPayload cacheEntry = RetryCache.waitForCompletion(retryCache,
null);
if (cacheEntry != null && cacheEntry.isSuccess()) {
return (String) cacheEntry.getPayload();
}
metrics.incrCreateSnapshotOps();
String ret = null;
try {
ret = namesystem.createSnapshot(snapshotRoot, snapshotName,
cacheEntry != null);
} finally {
RetryCache.setState(cacheEntry, ret != null, ret);
}
return ret;
}
@Override
public void deleteSnapshot(String snapshotRoot, String snapshotName)
throws IOException {
checkNNStartup();
namesystem.checkOperation(OperationCategory.WRITE);
metrics.incrDeleteSnapshotOps();
CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
if (cacheEntry != null && cacheEntry.isSuccess()) {
return; // Return previous response
}
boolean success = false;
try {
namesystem.deleteSnapshot(snapshotRoot, snapshotName, cacheEntry != null);
success = true;
} finally {
RetryCache.setState(cacheEntry, success);
}
}
@Override
// Client Protocol
public void allowSnapshot(String snapshotRoot) throws IOException {
checkNNStartup();
metrics.incrAllowSnapshotOps();
namesystem.allowSnapshot(snapshotRoot);
}
@Override
// Client Protocol
public void disallowSnapshot(String snapshot) throws IOException {
checkNNStartup();
metrics.incrDisAllowSnapshotOps();
namesystem.disallowSnapshot(snapshot);
}
@Override
// ClientProtocol
public void renameSnapshot(String snapshotRoot, String snapshotOldName,
String snapshotNewName) throws IOException {
checkNNStartup();
if (snapshotNewName == null || snapshotNewName.isEmpty()) {
throw new IOException("The new snapshot name is null or empty.");
}
namesystem.checkOperation(OperationCategory.WRITE);
metrics.incrRenameSnapshotOps();
CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
if (cacheEntry != null && cacheEntry.isSuccess()) {
return; // Return previous response
}
boolean success = false;
try {
namesystem.renameSnapshot(snapshotRoot, snapshotOldName,
snapshotNewName, cacheEntry != null);
success = true;
} finally {
RetryCache.setState(cacheEntry, success);
}
}
@Override // Client Protocol
public SnapshottableDirectoryStatus[] getSnapshottableDirListing()
throws IOException {
checkNNStartup();
SnapshottableDirectoryStatus[] status = namesystem
.getSnapshottableDirListing();
metrics.incrListSnapshottableDirOps();
return status;
}
@Override // ClientProtocol
public SnapshotDiffReport getSnapshotDiffReport(String snapshotRoot,
String earlierSnapshotName, String laterSnapshotName) throws IOException {
checkNNStartup();
SnapshotDiffReport report = namesystem.getSnapshotDiffReport(snapshotRoot,
earlierSnapshotName, laterSnapshotName);
metrics.incrSnapshotDiffReportOps();
return report;
}
@Override // ClientProtocol
public long addCacheDirective(
CacheDirectiveInfo path, EnumSet<CacheFlag> flags) throws IOException {
checkNNStartup();
namesystem.checkOperation(OperationCategory.WRITE);
CacheEntryWithPayload cacheEntry = RetryCache.waitForCompletion
(retryCache, null);
if (cacheEntry != null && cacheEntry.isSuccess()) {
return (Long) cacheEntry.getPayload();
}
boolean success = false;
long ret = 0;
try {
ret = namesystem.addCacheDirective(path, flags, cacheEntry != null);
success = true;
} finally {
RetryCache.setState(cacheEntry, success, ret);
}
return ret;
}
@Override // ClientProtocol
public void modifyCacheDirective(
CacheDirectiveInfo directive, EnumSet<CacheFlag> flags) throws IOException {
checkNNStartup();
namesystem.checkOperation(OperationCategory.WRITE);
CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
if (cacheEntry != null && cacheEntry.isSuccess()) {
return;
}
boolean success = false;
try {
namesystem.modifyCacheDirective(directive, flags, cacheEntry != null);
success = true;
} finally {
RetryCache.setState(cacheEntry, success);
}
}
@Override // ClientProtocol
public void removeCacheDirective(long id) throws IOException {
checkNNStartup();
namesystem.checkOperation(OperationCategory.WRITE);
CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
if (cacheEntry != null && cacheEntry.isSuccess()) {
return;
}
boolean success = false;
try {
namesystem.removeCacheDirective(id, cacheEntry != null);
success = true;
} finally {
RetryCache.setState(cacheEntry, success);
}
}
@Override // ClientProtocol
public BatchedEntries<CacheDirectiveEntry> listCacheDirectives(long prevId,
CacheDirectiveInfo filter) throws IOException {
checkNNStartup();
if (filter == null) {
filter = new CacheDirectiveInfo.Builder().build();
}
return namesystem.listCacheDirectives(prevId, filter);
}
@Override //ClientProtocol
public void addCachePool(CachePoolInfo info) throws IOException {
checkNNStartup();
namesystem.checkOperation(OperationCategory.WRITE);
CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
if (cacheEntry != null && cacheEntry.isSuccess()) {
return; // Return previous response
}
boolean success = false;
try {
namesystem.addCachePool(info, cacheEntry != null);
success = true;
} finally {
RetryCache.setState(cacheEntry, success);
}
}
@Override // ClientProtocol
public void modifyCachePool(CachePoolInfo info) throws IOException {
checkNNStartup();
namesystem.checkOperation(OperationCategory.WRITE);
CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
if (cacheEntry != null && cacheEntry.isSuccess()) {
return; // Return previous response
}
boolean success = false;
try {
namesystem.modifyCachePool(info, cacheEntry != null);
success = true;
} finally {
RetryCache.setState(cacheEntry, success);
}
}
@Override // ClientProtocol
public void removeCachePool(String cachePoolName) throws IOException {
checkNNStartup();
namesystem.checkOperation(OperationCategory.WRITE);
CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
if (cacheEntry != null && cacheEntry.isSuccess()) {
return;
}
boolean success = false;
try {
namesystem.removeCachePool(cachePoolName, cacheEntry != null);
success = true;
} finally {
RetryCache.setState(cacheEntry, success);
}
}
@Override // ClientProtocol
public BatchedEntries<CachePoolEntry> listCachePools(String prevKey)
throws IOException {
checkNNStartup();
return namesystem.listCachePools(prevKey != null ? prevKey : "");
}
@Override // ClientProtocol
public void modifyAclEntries(String src, List<AclEntry> aclSpec)
throws IOException {
checkNNStartup();
namesystem.modifyAclEntries(src, aclSpec);
}
@Override // ClienProtocol
public void removeAclEntries(String src, List<AclEntry> aclSpec)
throws IOException {
checkNNStartup();
namesystem.removeAclEntries(src, aclSpec);
}
@Override // ClientProtocol
public void removeDefaultAcl(String src) throws IOException {
checkNNStartup();
namesystem.removeDefaultAcl(src);
}
@Override // ClientProtocol
public void removeAcl(String src) throws IOException {
checkNNStartup();
namesystem.removeAcl(src);
}
@Override // ClientProtocol
public void setAcl(String src, List<AclEntry> aclSpec) throws IOException {
checkNNStartup();
namesystem.setAcl(src, aclSpec);
}
@Override // ClientProtocol
public AclStatus getAclStatus(String src) throws IOException {
checkNNStartup();
return namesystem.getAclStatus(src);
}
@Override // ClientProtocol
public void createEncryptionZone(String src, String keyName)
throws IOException {
checkNNStartup();
namesystem.checkOperation(OperationCategory.WRITE);
final CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
if (cacheEntry != null && cacheEntry.isSuccess()) {
return;
}
boolean success = false;
try {
namesystem.createEncryptionZone(src, keyName, cacheEntry != null);
success = true;
} finally {
RetryCache.setState(cacheEntry, success);
}
}
@Override // ClientProtocol
public EncryptionZone getEZForPath(String src)
throws IOException {
checkNNStartup();
return namesystem.getEZForPath(src);
}
@Override // ClientProtocol
public BatchedEntries<EncryptionZone> listEncryptionZones(
long prevId) throws IOException {
checkNNStartup();
return namesystem.listEncryptionZones(prevId);
}
@Override // ClientProtocol
public void setXAttr(String src, XAttr xAttr, EnumSet<XAttrSetFlag> flag)
throws IOException {
checkNNStartup();
namesystem.checkOperation(OperationCategory.WRITE);
CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
if (cacheEntry != null && cacheEntry.isSuccess()) {
return; // Return previous response
}
boolean success = false;
try {
namesystem.setXAttr(src, xAttr, flag, cacheEntry != null);
success = true;
} finally {
RetryCache.setState(cacheEntry, success);
}
}
@Override // ClientProtocol
public List<XAttr> getXAttrs(String src, List<XAttr> xAttrs)
throws IOException {
checkNNStartup();
return namesystem.getXAttrs(src, xAttrs);
}
@Override // ClientProtocol
public List<XAttr> listXAttrs(String src) throws IOException {
checkNNStartup();
return namesystem.listXAttrs(src);
}
@Override // ClientProtocol
public void removeXAttr(String src, XAttr xAttr) throws IOException {
checkNNStartup();
namesystem.checkOperation(OperationCategory.WRITE);
CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
if (cacheEntry != null && cacheEntry.isSuccess()) {
return; // Return previous response
}
boolean success = false;
try {
namesystem.removeXAttr(src, xAttr, cacheEntry != null);
success = true;
} finally {
RetryCache.setState(cacheEntry, success);
}
}
private void checkNNStartup() throws IOException {
if (!this.nn.isStarted()) {
throw new RetriableException(this.nn.getRole() + " still not started");
}
}
@Override // ClientProtocol
public void checkAccess(String path, FsAction mode) throws IOException {
checkNNStartup();
namesystem.checkAccess(path, mode);
}
@Override // ClientProtocol
public long getCurrentEditLogTxid() throws IOException {
checkNNStartup();
namesystem.checkOperation(OperationCategory.READ); // only active
namesystem.checkSuperuserPrivilege();
// if it's not yet open for write, we may be in the process of transitioning
// from standby to active and may not yet know what the latest committed
// txid is
return namesystem.getEditLog().isOpenForWrite() ?
namesystem.getEditLog().getLastWrittenTxId() : -1;
}
private static FSEditLogOp readOp(EditLogInputStream elis)
throws IOException {
try {
return elis.readOp();
// we can get the below two exceptions if a segment is deleted
// (because we have accumulated too many edits) or (for the local journal/
// no-QJM case only) if a in-progress segment is finalized under us ...
// no need to throw an exception back to the client in this case
} catch (FileNotFoundException e) {
LOG.debug("Tried to read from deleted or moved edit log segment", e);
return null;
} catch (TransferFsImage.HttpGetFailedException e) {
LOG.debug("Tried to read from deleted edit log segment", e);
return null;
}
}
@Override // ClientProtocol
public EventBatchList getEditsFromTxid(long txid) throws IOException {
checkNNStartup();
namesystem.checkOperation(OperationCategory.READ); // only active
namesystem.checkSuperuserPrivilege();
int maxEventsPerRPC = nn.conf.getInt(
DFSConfigKeys.DFS_NAMENODE_INOTIFY_MAX_EVENTS_PER_RPC_KEY,
DFSConfigKeys.DFS_NAMENODE_INOTIFY_MAX_EVENTS_PER_RPC_DEFAULT);
FSEditLog log = namesystem.getFSImage().getEditLog();
long syncTxid = log.getSyncTxId();
// If we haven't synced anything yet, we can only read finalized
// segments since we can't reliably determine which txns in in-progress
// segments have actually been committed (e.g. written to a quorum of JNs).
// If we have synced txns, we can definitely read up to syncTxid since
// syncTxid is only updated after a transaction is committed to all
// journals. (In-progress segments written by old writers are already
// discarded for us, so if we read any in-progress segments they are
// guaranteed to have been written by this NameNode.)
boolean readInProgress = syncTxid > 0;
List<EventBatch> batches = Lists.newArrayList();
int totalEvents = 0;
long maxSeenTxid = -1;
long firstSeenTxid = -1;
if (syncTxid > 0 && txid > syncTxid) {
// we can't read past syncTxid, so there's no point in going any further
return new EventBatchList(batches, firstSeenTxid, maxSeenTxid, syncTxid);
}
Collection<EditLogInputStream> streams = null;
try {
streams = log.selectInputStreams(txid, 0, null, readInProgress);
} catch (IllegalStateException e) { // can happen if we have
// transitioned out of active and haven't yet transitioned to standby
// and are using QJM -- the edit log will be closed and this exception
// will result
LOG.info("NN is transitioning from active to standby and FSEditLog " +
"is closed -- could not read edits");
return new EventBatchList(batches, firstSeenTxid, maxSeenTxid, syncTxid);
}
boolean breakOuter = false;
for (EditLogInputStream elis : streams) {
// our assumption in this code is the EditLogInputStreams are ordered by
// starting txid
try {
FSEditLogOp op = null;
while ((op = readOp(elis)) != null) {
// break out of here in the unlikely event that syncTxid is so
// out of date that its segment has already been deleted, so the first
// txid we get is greater than syncTxid
if (syncTxid > 0 && op.getTransactionId() > syncTxid) {
breakOuter = true;
break;
}
EventBatch eventBatch = InotifyFSEditLogOpTranslator.translate(op);
if (eventBatch != null) {
batches.add(eventBatch);
totalEvents += eventBatch.getEvents().length;
}
if (op.getTransactionId() > maxSeenTxid) {
maxSeenTxid = op.getTransactionId();
}
if (firstSeenTxid == -1) {
firstSeenTxid = op.getTransactionId();
}
if (totalEvents >= maxEventsPerRPC || (syncTxid > 0 &&
op.getTransactionId() == syncTxid)) {
// we're done
breakOuter = true;
break;
}
}
} finally {
elis.close();
}
if (breakOuter) {
break;
}
}
return new EventBatchList(batches, firstSeenTxid, maxSeenTxid, syncTxid);
}
@Override // TraceAdminProtocol
public SpanReceiverInfo[] listSpanReceivers() throws IOException {
checkNNStartup();
namesystem.checkSuperuserPrivilege();
return nn.spanReceiverHost.listSpanReceivers();
}
@Override // TraceAdminProtocol
public long addSpanReceiver(SpanReceiverInfo info) throws IOException {
checkNNStartup();
namesystem.checkSuperuserPrivilege();
return nn.spanReceiverHost.addSpanReceiver(info);
}
@Override // TraceAdminProtocol
public void removeSpanReceiver(long id) throws IOException {
checkNNStartup();
namesystem.checkSuperuserPrivilege();
nn.spanReceiverHost.removeSpanReceiver(id);
}
}
| 75,220 | 35.891123 | 123 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileUnderConstructionFeature.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
/**
* Feature for under-construction file.
*/
@InterfaceAudience.Private
public class FileUnderConstructionFeature implements INode.Feature {
private String clientName; // lease holder
private final String clientMachine;
public FileUnderConstructionFeature(final String clientName, final String clientMachine) {
this.clientName = clientName;
this.clientMachine = clientMachine;
}
public String getClientName() {
return clientName;
}
void setClientName(String clientName) {
this.clientName = clientName;
}
public String getClientMachine() {
return clientMachine;
}
/**
* Update the length for the last block
*
* @param lastBlockLength
* The length of the last block reported from client
* @throws IOException
*/
void updateLengthOfLastBlock(INodeFile f, long lastBlockLength)
throws IOException {
BlockInfo lastBlock = f.getLastBlock();
assert (lastBlock != null) : "The last block for path "
+ f.getFullPathName() + " is null when updating its length";
assert (lastBlock instanceof BlockInfoContiguousUnderConstruction)
: "The last block for path " + f.getFullPathName()
+ " is not a BlockInfoUnderConstruction when updating its length";
lastBlock.setNumBytes(lastBlockLength);
}
/**
* When deleting a file in the current fs directory, and the file is contained
* in a snapshot, we should delete the last block if it's under construction
* and its size is 0.
*/
void cleanZeroSizeBlock(final INodeFile f,
final BlocksMapUpdateInfo collectedBlocks) {
final BlockInfo[] blocks = f.getBlocks();
if (blocks != null && blocks.length > 0
&& blocks[blocks.length - 1] instanceof BlockInfoContiguousUnderConstruction) {
BlockInfoContiguousUnderConstruction lastUC =
(BlockInfoContiguousUnderConstruction) blocks[blocks.length - 1];
if (lastUC.getNumBytes() == 0) {
// this is a 0-sized block. do not need check its UC state here
collectedBlocks.addDeleteBlock(lastUC);
f.removeLastBlock(lastUC);
}
}
}
}
| 3,296 | 35.633333 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CheckpointFaultInjector.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.File;
import java.io.IOException;
/**
* Utility class to faciliate some fault injection tests for the checkpointing
* process.
*/
class CheckpointFaultInjector {
static CheckpointFaultInjector instance = new CheckpointFaultInjector();
static CheckpointFaultInjector getInstance() {
return instance;
}
public void beforeGetImageSetsHeaders() throws IOException {}
public void afterSecondaryCallsRollEditLog() throws IOException {}
public void duringMerge() throws IOException {}
public void afterSecondaryUploadsNewImage() throws IOException {}
public void aboutToSendFile(File localfile) throws IOException {}
public boolean shouldSendShortFile(File localfile) {
return false;
}
public boolean shouldCorruptAByte(File localfile) {
return false;
}
public void afterMD5Rename() throws IOException {}
public void beforeEditsRename() throws IOException {}
}
| 1,779 | 34.6 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.Closeable;
import java.io.File;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.net.URI;
import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Properties;
import java.util.UUID;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.ThreadLocalRandom;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
import org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.StorageErrorReporter;
import org.apache.hadoop.hdfs.server.common.Util;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.hdfs.util.PersistentLongFile;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.DNS;
import org.apache.hadoop.util.Time;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
/**
* NNStorage is responsible for management of the StorageDirectories used by
* the NameNode.
*/
@InterfaceAudience.Private
public class NNStorage extends Storage implements Closeable,
StorageErrorReporter {
static final String DEPRECATED_MESSAGE_DIGEST_PROPERTY = "imageMD5Digest";
static final String LOCAL_URI_SCHEME = "file";
//
// The filenames used for storing the images
//
public enum NameNodeFile {
IMAGE ("fsimage"),
TIME ("fstime"), // from "old" pre-HDFS-1073 format
SEEN_TXID ("seen_txid"),
EDITS ("edits"),
IMAGE_NEW ("fsimage.ckpt"),
IMAGE_ROLLBACK("fsimage_rollback"),
EDITS_NEW ("edits.new"), // from "old" pre-HDFS-1073 format
EDITS_INPROGRESS ("edits_inprogress"),
EDITS_TMP ("edits_tmp"),
IMAGE_LEGACY_OIV ("fsimage_legacy_oiv"); // For pre-PB format
private String fileName = null;
private NameNodeFile(String name) { this.fileName = name; }
@VisibleForTesting
public String getName() { return fileName; }
}
/**
* Implementation of StorageDirType specific to namenode storage
* A Storage directory could be of type IMAGE which stores only fsimage,
* or of type EDITS which stores edits or of type IMAGE_AND_EDITS which
* stores both fsimage and edits.
*/
@VisibleForTesting
public static enum NameNodeDirType implements StorageDirType {
UNDEFINED,
IMAGE,
EDITS,
IMAGE_AND_EDITS;
@Override
public StorageDirType getStorageDirType() {
return this;
}
@Override
public boolean isOfType(StorageDirType type) {
if ((this == IMAGE_AND_EDITS) && (type == IMAGE || type == EDITS))
return true;
return this == type;
}
}
protected String blockpoolID = ""; // id of the block pool
/**
* flag that controls if we try to restore failed storages
*/
private boolean restoreFailedStorage = false;
private final Object restorationLock = new Object();
private boolean disablePreUpgradableLayoutCheck = false;
/**
* TxId of the last transaction that was included in the most
* recent fsimage file. This does not include any transactions
* that have since been written to the edit log.
*/
protected volatile long mostRecentCheckpointTxId = HdfsServerConstants.INVALID_TXID;
/**
* Time of the last checkpoint, in milliseconds since the epoch.
*/
private long mostRecentCheckpointTime = 0;
/**
* list of failed (and thus removed) storages
*/
final protected List<StorageDirectory> removedStorageDirs
= new CopyOnWriteArrayList<StorageDirectory>();
/**
* Properties from old layout versions that may be needed
* during upgrade only.
*/
private HashMap<String, String> deprecatedProperties;
/**
* Construct the NNStorage.
* @param conf Namenode configuration.
* @param imageDirs Directories the image can be stored in.
* @param editsDirs Directories the editlog can be stored in.
* @throws IOException if any directories are inaccessible.
*/
public NNStorage(Configuration conf,
Collection<URI> imageDirs, Collection<URI> editsDirs)
throws IOException {
super(NodeType.NAME_NODE);
storageDirs = new CopyOnWriteArrayList<StorageDirectory>();
// this may modify the editsDirs, so copy before passing in
setStorageDirectories(imageDirs,
Lists.newArrayList(editsDirs),
FSNamesystem.getSharedEditsDirs(conf));
}
@Override // Storage
public boolean isPreUpgradableLayout(StorageDirectory sd) throws IOException {
if (disablePreUpgradableLayoutCheck) {
return false;
}
File oldImageDir = new File(sd.getRoot(), "image");
if (!oldImageDir.exists()) {
return false;
}
// check the layout version inside the image file
File oldF = new File(oldImageDir, "fsimage");
RandomAccessFile oldFile = new RandomAccessFile(oldF, "rws");
try {
oldFile.seek(0);
int oldVersion = oldFile.readInt();
oldFile.close();
oldFile = null;
if (oldVersion < LAST_PRE_UPGRADE_LAYOUT_VERSION)
return false;
} finally {
IOUtils.cleanup(LOG, oldFile);
}
return true;
}
@Override // Closeable
public void close() throws IOException {
unlockAll();
storageDirs.clear();
}
/**
* Set flag whether an attempt should be made to restore failed storage
* directories at the next available oppurtuinity.
*
* @param val Whether restoration attempt should be made.
*/
void setRestoreFailedStorage(boolean val) {
LOG.warn("set restore failed storage to " + val);
restoreFailedStorage=val;
}
/**
* @return Whether failed storage directories are to be restored.
*/
boolean getRestoreFailedStorage() {
return restoreFailedStorage;
}
/**
* See if any of removed storages is "writable" again, and can be returned
* into service.
*/
void attemptRestoreRemovedStorage() {
// if directory is "alive" - copy the images there...
if(!restoreFailedStorage || removedStorageDirs.size() == 0)
return; //nothing to restore
/* We don't want more than one thread trying to restore at a time */
synchronized (this.restorationLock) {
LOG.info("NNStorage.attemptRestoreRemovedStorage: check removed(failed) "+
"storarge. removedStorages size = " + removedStorageDirs.size());
for(Iterator<StorageDirectory> it
= this.removedStorageDirs.iterator(); it.hasNext();) {
StorageDirectory sd = it.next();
File root = sd.getRoot();
LOG.info("currently disabled dir " + root.getAbsolutePath() +
"; type="+sd.getStorageDirType()
+ ";canwrite="+FileUtil.canWrite(root));
if(root.exists() && FileUtil.canWrite(root)) {
LOG.info("restoring dir " + sd.getRoot().getAbsolutePath());
this.addStorageDir(sd); // restore
this.removedStorageDirs.remove(sd);
}
}
}
}
/**
* @return A list of storage directories which are in the errored state.
*/
List<StorageDirectory> getRemovedStorageDirs() {
return this.removedStorageDirs;
}
/**
* See {@link NNStorage#setStorageDirectories(Collection, Collection, Collection)}
*/
@VisibleForTesting
synchronized void setStorageDirectories(Collection<URI> fsNameDirs,
Collection<URI> fsEditsDirs)
throws IOException {
setStorageDirectories(fsNameDirs, fsEditsDirs, new ArrayList<URI>());
}
/**
* Set the storage directories which will be used. This should only ever be
* called from inside NNStorage. However, it needs to remain package private
* for testing, as StorageDirectories need to be reinitialised after using
* Mockito.spy() on this class, as Mockito doesn't work well with inner
* classes, such as StorageDirectory in this case.
*
* Synchronized due to initialization of storageDirs and removedStorageDirs.
*
* @param fsNameDirs Locations to store images.
* @param fsEditsDirs Locations to store edit logs.
* @throws IOException
*/
@VisibleForTesting
synchronized void setStorageDirectories(Collection<URI> fsNameDirs,
Collection<URI> fsEditsDirs,
Collection<URI> sharedEditsDirs)
throws IOException {
this.storageDirs.clear();
this.removedStorageDirs.clear();
// Add all name dirs with appropriate NameNodeDirType
for (URI dirName : fsNameDirs) {
checkSchemeConsistency(dirName);
boolean isAlsoEdits = false;
for (URI editsDirName : fsEditsDirs) {
if (editsDirName.compareTo(dirName) == 0) {
isAlsoEdits = true;
fsEditsDirs.remove(editsDirName);
break;
}
}
NameNodeDirType dirType = (isAlsoEdits) ?
NameNodeDirType.IMAGE_AND_EDITS :
NameNodeDirType.IMAGE;
// Add to the list of storage directories, only if the
// URI is of type file://
if(dirName.getScheme().compareTo("file") == 0) {
this.addStorageDir(new StorageDirectory(new File(dirName.getPath()),
dirType,
sharedEditsDirs.contains(dirName))); // Don't lock the dir if it's shared.
}
}
// Add edits dirs if they are different from name dirs
for (URI dirName : fsEditsDirs) {
checkSchemeConsistency(dirName);
// Add to the list of storage directories, only if the
// URI is of type file://
if(dirName.getScheme().compareTo("file") == 0)
this.addStorageDir(new StorageDirectory(new File(dirName.getPath()),
NameNodeDirType.EDITS, sharedEditsDirs.contains(dirName)));
}
}
/**
* Return the storage directory corresponding to the passed URI
* @param uri URI of a storage directory
* @return The matching storage directory or null if none found
*/
StorageDirectory getStorageDirectory(URI uri) {
try {
uri = Util.fileAsURI(new File(uri));
Iterator<StorageDirectory> it = dirIterator();
for (; it.hasNext(); ) {
StorageDirectory sd = it.next();
if (Util.fileAsURI(sd.getRoot()).equals(uri)) {
return sd;
}
}
} catch (IOException ioe) {
LOG.warn("Error converting file to URI", ioe);
}
return null;
}
/**
* Checks the consistency of a URI, in particular if the scheme
* is specified
* @param u URI whose consistency is being checked.
*/
private static void checkSchemeConsistency(URI u) throws IOException {
String scheme = u.getScheme();
// the URI should have a proper scheme
if(scheme == null) {
throw new IOException("Undefined scheme for " + u);
}
}
/**
* Retrieve current directories of type IMAGE
* @return Collection of URI representing image directories
* @throws IOException in case of URI processing error
*/
Collection<URI> getImageDirectories() throws IOException {
return getDirectories(NameNodeDirType.IMAGE);
}
/**
* Retrieve current directories of type EDITS
* @return Collection of URI representing edits directories
* @throws IOException in case of URI processing error
*/
Collection<URI> getEditsDirectories() throws IOException {
return getDirectories(NameNodeDirType.EDITS);
}
/**
* Return number of storage directories of the given type.
* @param dirType directory type
* @return number of storage directories of type dirType
*/
int getNumStorageDirs(NameNodeDirType dirType) {
if(dirType == null)
return getNumStorageDirs();
Iterator<StorageDirectory> it = dirIterator(dirType);
int numDirs = 0;
for(; it.hasNext(); it.next())
numDirs++;
return numDirs;
}
/**
* Return the list of locations being used for a specific purpose.
* i.e. Image or edit log storage.
*
* @param dirType Purpose of locations requested.
* @throws IOException
*/
Collection<URI> getDirectories(NameNodeDirType dirType)
throws IOException {
ArrayList<URI> list = new ArrayList<URI>();
Iterator<StorageDirectory> it = (dirType == null) ? dirIterator() :
dirIterator(dirType);
for ( ;it.hasNext(); ) {
StorageDirectory sd = it.next();
try {
list.add(Util.fileAsURI(sd.getRoot()));
} catch (IOException e) {
throw new IOException("Exception while processing " +
"StorageDirectory " + sd.getRoot(), e);
}
}
return list;
}
/**
* Determine the last transaction ID noted in this storage directory.
* This txid is stored in a special seen_txid file since it might not
* correspond to the latest image or edit log. For example, an image-only
* directory will have this txid incremented when edits logs roll, even
* though the edits logs are in a different directory.
*
* @param sd StorageDirectory to check
* @return If file exists and can be read, last recorded txid. If not, 0L.
* @throws IOException On errors processing file pointed to by sd
*/
static long readTransactionIdFile(StorageDirectory sd) throws IOException {
File txidFile = getStorageFile(sd, NameNodeFile.SEEN_TXID);
return PersistentLongFile.readFile(txidFile, 0);
}
/**
* Write last checkpoint time into a separate file.
* @param sd storage directory
* @throws IOException
*/
void writeTransactionIdFile(StorageDirectory sd, long txid) throws IOException {
Preconditions.checkArgument(txid >= 0, "bad txid: " + txid);
File txIdFile = getStorageFile(sd, NameNodeFile.SEEN_TXID);
PersistentLongFile.writeFile(txIdFile, txid);
}
/**
* Set the transaction ID and time of the last checkpoint
*
* @param txid transaction id of the last checkpoint
* @param time time of the last checkpoint, in millis since the epoch
*/
void setMostRecentCheckpointInfo(long txid, long time) {
this.mostRecentCheckpointTxId = txid;
this.mostRecentCheckpointTime = time;
}
/**
* @return the transaction ID of the last checkpoint.
*/
public long getMostRecentCheckpointTxId() {
return mostRecentCheckpointTxId;
}
/**
* @return the time of the most recent checkpoint in millis since the epoch.
*/
long getMostRecentCheckpointTime() {
return mostRecentCheckpointTime;
}
/**
* Write a small file in all available storage directories that
* indicates that the namespace has reached some given transaction ID.
*
* This is used when the image is loaded to avoid accidental rollbacks
* in the case where an edit log is fully deleted but there is no
* checkpoint. See TestNameEditsConfigs.testNameEditsConfigsFailure()
* @param txid the txid that has been reached
*/
public void writeTransactionIdFileToStorage(long txid) {
// Write txid marker in all storage directories
for (StorageDirectory sd : storageDirs) {
try {
writeTransactionIdFile(sd, txid);
} catch(IOException e) {
// Close any edits stream associated with this dir and remove directory
LOG.warn("writeTransactionIdToStorage failed on " + sd,
e);
reportErrorsOnDirectory(sd);
}
}
}
/**
* Return the name of the image file that is uploaded by periodic
* checkpointing
*
* @return List of filenames to save checkpoints to.
*/
public File[] getFsImageNameCheckpoint(long txid) {
ArrayList<File> list = new ArrayList<File>();
for (Iterator<StorageDirectory> it =
dirIterator(NameNodeDirType.IMAGE); it.hasNext();) {
list.add(getStorageFile(it.next(), NameNodeFile.IMAGE_NEW, txid));
}
return list.toArray(new File[list.size()]);
}
/**
* @return The first image file with the given txid and image type.
*/
public File getFsImageName(long txid, NameNodeFile nnf) {
for (Iterator<StorageDirectory> it = dirIterator(NameNodeDirType.IMAGE);
it.hasNext();) {
StorageDirectory sd = it.next();
File fsImage = getStorageFile(sd, nnf, txid);
if (FileUtil.canRead(sd.getRoot()) && fsImage.exists()) {
return fsImage;
}
}
return null;
}
/**
* @return The first image file whose txid is the same with the given txid and
* image type is one of the given types.
*/
public File getFsImage(long txid, EnumSet<NameNodeFile> nnfs) {
for (Iterator<StorageDirectory> it = dirIterator(NameNodeDirType.IMAGE);
it.hasNext();) {
StorageDirectory sd = it.next();
for (NameNodeFile nnf : nnfs) {
File fsImage = getStorageFile(sd, nnf, txid);
if (FileUtil.canRead(sd.getRoot()) && fsImage.exists()) {
return fsImage;
}
}
}
return null;
}
public File getFsImageName(long txid) {
return getFsImageName(txid, NameNodeFile.IMAGE);
}
public File getHighestFsImageName() {
return getFsImageName(getMostRecentCheckpointTxId());
}
/** Create new dfs name directory. Caution: this destroys all files
* in this filesystem. */
private void format(StorageDirectory sd) throws IOException {
sd.clearDirectory(); // create currrent dir
writeProperties(sd);
writeTransactionIdFile(sd, 0);
LOG.info("Storage directory " + sd.getRoot()
+ " has been successfully formatted.");
}
/**
* Format all available storage directories.
*/
public void format(NamespaceInfo nsInfo) throws IOException {
Preconditions.checkArgument(nsInfo.getLayoutVersion() == 0 ||
nsInfo.getLayoutVersion() == HdfsServerConstants.NAMENODE_LAYOUT_VERSION,
"Bad layout version: %s", nsInfo.getLayoutVersion());
this.setStorageInfo(nsInfo);
this.blockpoolID = nsInfo.getBlockPoolID();
for (Iterator<StorageDirectory> it =
dirIterator(); it.hasNext();) {
StorageDirectory sd = it.next();
format(sd);
}
}
public static NamespaceInfo newNamespaceInfo()
throws UnknownHostException {
return new NamespaceInfo(newNamespaceID(), newClusterID(),
newBlockPoolID(), 0L);
}
public void format() throws IOException {
this.layoutVersion = HdfsServerConstants.NAMENODE_LAYOUT_VERSION;
for (Iterator<StorageDirectory> it =
dirIterator(); it.hasNext();) {
StorageDirectory sd = it.next();
format(sd);
}
}
/**
* Generate new namespaceID.
*
* namespaceID is a persistent attribute of the namespace.
* It is generated when the namenode is formatted and remains the same
* during the life cycle of the namenode.
* When a datanodes register they receive it as the registrationID,
* which is checked every time the datanode is communicating with the
* namenode. Datanodes that do not 'know' the namespaceID are rejected.
*
* @return new namespaceID
*/
private static int newNamespaceID() {
int newID = 0;
while(newID == 0)
newID = ThreadLocalRandom.current().nextInt(0x7FFFFFFF); // use 31 bits
return newID;
}
@Override // Storage
protected void setFieldsFromProperties(
Properties props, StorageDirectory sd) throws IOException {
super.setFieldsFromProperties(props, sd);
if (layoutVersion == 0) {
throw new IOException("NameNode directory "
+ sd.getRoot() + " is not formatted.");
}
// Set Block pool ID in version with federation support
if (NameNodeLayoutVersion.supports(
LayoutVersion.Feature.FEDERATION, getLayoutVersion())) {
String sbpid = props.getProperty("blockpoolID");
setBlockPoolID(sd.getRoot(), sbpid);
}
setDeprecatedPropertiesForUpgrade(props);
}
void readProperties(StorageDirectory sd, StartupOption startupOption)
throws IOException {
Properties props = readPropertiesFile(sd.getVersionFile());
if (HdfsServerConstants.RollingUpgradeStartupOption.ROLLBACK.matches
(startupOption)) {
int lv = Integer.parseInt(getProperty(props, sd, "layoutVersion"));
if (lv > getServiceLayoutVersion()) {
// we should not use a newer version for rollingUpgrade rollback
throw new IncorrectVersionException(getServiceLayoutVersion(), lv,
"storage directory " + sd.getRoot().getAbsolutePath());
}
props.setProperty("layoutVersion",
Integer.toString(HdfsServerConstants.NAMENODE_LAYOUT_VERSION));
}
setFieldsFromProperties(props, sd);
}
/**
* Pull any properties out of the VERSION file that are from older
* versions of HDFS and only necessary during upgrade.
*/
private void setDeprecatedPropertiesForUpgrade(Properties props) {
deprecatedProperties = new HashMap<String, String>();
String md5 = props.getProperty(DEPRECATED_MESSAGE_DIGEST_PROPERTY);
if (md5 != null) {
deprecatedProperties.put(DEPRECATED_MESSAGE_DIGEST_PROPERTY, md5);
}
}
/**
* Return a property that was stored in an earlier version of HDFS.
*
* This should only be used during upgrades.
*/
String getDeprecatedProperty(String prop) {
assert getLayoutVersion() > HdfsServerConstants.NAMENODE_LAYOUT_VERSION :
"getDeprecatedProperty should only be done when loading " +
"storage from past versions during upgrade.";
return deprecatedProperties.get(prop);
}
/**
* Write version file into the storage directory.
*
* The version file should always be written last.
* Missing or corrupted version file indicates that
* the checkpoint is not valid.
*
* @param sd storage directory
* @throws IOException
*/
@Override // Storage
protected void setPropertiesFromFields(Properties props,
StorageDirectory sd
) throws IOException {
super.setPropertiesFromFields(props, sd);
// Set blockpoolID in version with federation support
if (NameNodeLayoutVersion.supports(
LayoutVersion.Feature.FEDERATION, getLayoutVersion())) {
props.setProperty("blockpoolID", blockpoolID);
}
}
static File getStorageFile(StorageDirectory sd, NameNodeFile type, long imageTxId) {
return new File(sd.getCurrentDir(),
String.format("%s_%019d", type.getName(), imageTxId));
}
/**
* Get a storage file for one of the files that doesn't need a txid associated
* (e.g version, seen_txid)
*/
static File getStorageFile(StorageDirectory sd, NameNodeFile type) {
return new File(sd.getCurrentDir(), type.getName());
}
@VisibleForTesting
public static String getCheckpointImageFileName(long txid) {
return getNameNodeFileName(NameNodeFile.IMAGE_NEW, txid);
}
@VisibleForTesting
public static String getImageFileName(long txid) {
return getNameNodeFileName(NameNodeFile.IMAGE, txid);
}
@VisibleForTesting
public static String getRollbackImageFileName(long txid) {
return getNameNodeFileName(NameNodeFile.IMAGE_ROLLBACK, txid);
}
public static String getLegacyOIVImageFileName(long txid) {
return getNameNodeFileName(NameNodeFile.IMAGE_LEGACY_OIV, txid);
}
private static String getNameNodeFileName(NameNodeFile nnf, long txid) {
return String.format("%s_%019d", nnf.getName(), txid);
}
@VisibleForTesting
public static String getInProgressEditsFileName(long startTxId) {
return getNameNodeFileName(NameNodeFile.EDITS_INPROGRESS, startTxId);
}
static File getInProgressEditsFile(StorageDirectory sd, long startTxId) {
return new File(sd.getCurrentDir(), getInProgressEditsFileName(startTxId));
}
static File getFinalizedEditsFile(StorageDirectory sd,
long startTxId, long endTxId) {
return new File(sd.getCurrentDir(),
getFinalizedEditsFileName(startTxId, endTxId));
}
static File getTemporaryEditsFile(StorageDirectory sd,
long startTxId, long endTxId, long timestamp) {
return new File(sd.getCurrentDir(),
getTemporaryEditsFileName(startTxId, endTxId, timestamp));
}
static File getImageFile(StorageDirectory sd, NameNodeFile nnf, long txid) {
return new File(sd.getCurrentDir(), getNameNodeFileName(nnf, txid));
}
@VisibleForTesting
public static String getFinalizedEditsFileName(long startTxId, long endTxId) {
return String.format("%s_%019d-%019d", NameNodeFile.EDITS.getName(),
startTxId, endTxId);
}
public static String getTemporaryEditsFileName(long startTxId, long endTxId,
long timestamp) {
return String.format("%s_%019d-%019d_%019d", NameNodeFile.EDITS_TMP.getName(),
startTxId, endTxId, timestamp);
}
/**
* Return the first readable finalized edits file for the given txid.
*/
File findFinalizedEditsFile(long startTxId, long endTxId)
throws IOException {
File ret = findFile(NameNodeDirType.EDITS,
getFinalizedEditsFileName(startTxId, endTxId));
if (ret == null) {
throw new IOException(
"No edits file for txid " + startTxId + "-" + endTxId + " exists!");
}
return ret;
}
/**
* Return the first readable image file for the given txid and image type, or
* null if no such image can be found
*/
File findImageFile(NameNodeFile nnf, long txid) {
return findFile(NameNodeDirType.IMAGE,
getNameNodeFileName(nnf, txid));
}
/**
* Return the first readable storage file of the given name
* across any of the 'current' directories in SDs of the
* given type, or null if no such file exists.
*/
private File findFile(NameNodeDirType dirType, String name) {
for (StorageDirectory sd : dirIterable(dirType)) {
File candidate = new File(sd.getCurrentDir(), name);
if (FileUtil.canRead(sd.getCurrentDir()) &&
candidate.exists()) {
return candidate;
}
}
return null;
}
/**
* Disable the check for pre-upgradable layouts. Needed for BackupImage.
* @param val Whether to disable the preupgradeable layout check.
*/
void setDisablePreUpgradableLayoutCheck(boolean val) {
disablePreUpgradableLayoutCheck = val;
}
/**
* Marks a list of directories as having experienced an error.
*
* @param sds A list of storage directories to mark as errored.
*/
void reportErrorsOnDirectories(List<StorageDirectory> sds) {
for (StorageDirectory sd : sds) {
reportErrorsOnDirectory(sd);
}
}
/**
* Reports that a directory has experienced an error.
* Notifies listeners that the directory is no longer
* available.
*
* @param sd A storage directory to mark as errored.
*/
private void reportErrorsOnDirectory(StorageDirectory sd) {
LOG.error("Error reported on storage directory " + sd);
String lsd = listStorageDirectories();
LOG.debug("current list of storage dirs:" + lsd);
LOG.warn("About to remove corresponding storage: "
+ sd.getRoot().getAbsolutePath());
try {
sd.unlock();
} catch (Exception e) {
LOG.warn("Unable to unlock bad storage directory: "
+ sd.getRoot().getPath(), e);
}
if (this.storageDirs.remove(sd)) {
this.removedStorageDirs.add(sd);
}
lsd = listStorageDirectories();
LOG.debug("at the end current list of storage dirs:" + lsd);
}
/**
* Processes the startup options for the clusterid and blockpoolid
* for the upgrade.
* @param startOpt Startup options
* @param layoutVersion Layout version for the upgrade
* @throws IOException
*/
void processStartupOptionsForUpgrade(StartupOption startOpt, int layoutVersion)
throws IOException {
if (startOpt == StartupOption.UPGRADE || startOpt == StartupOption.UPGRADEONLY) {
// If upgrade from a release that does not support federation,
// if clusterId is provided in the startupOptions use it.
// Else generate a new cluster ID
if (!NameNodeLayoutVersion.supports(
LayoutVersion.Feature.FEDERATION, layoutVersion)) {
if (startOpt.getClusterId() == null) {
startOpt.setClusterId(newClusterID());
}
setClusterID(startOpt.getClusterId());
setBlockPoolID(newBlockPoolID());
} else {
// Upgrade from one version of federation to another supported
// version of federation doesn't require clusterID.
// Warn the user if the current clusterid didn't match with the input
// clusterid.
if (startOpt.getClusterId() != null
&& !startOpt.getClusterId().equals(getClusterID())) {
LOG.warn("Clusterid mismatch - current clusterid: " + getClusterID()
+ ", Ignoring given clusterid: " + startOpt.getClusterId());
}
}
LOG.info("Using clusterid: " + getClusterID());
}
}
/**
* Report that an IOE has occurred on some file which may
* or may not be within one of the NN image storage directories.
*/
@Override
public void reportErrorOnFile(File f) {
// We use getAbsolutePath here instead of getCanonicalPath since we know
// that there is some IO problem on that drive.
// getCanonicalPath may need to call stat() or readlink() and it's likely
// those calls would fail due to the same underlying IO problem.
String absPath = f.getAbsolutePath();
for (StorageDirectory sd : storageDirs) {
String dirPath = sd.getRoot().getAbsolutePath();
if (!dirPath.endsWith(File.separator)) {
dirPath += File.separator;
}
if (absPath.startsWith(dirPath)) {
reportErrorsOnDirectory(sd);
return;
}
}
}
/**
* Generate new clusterID.
*
* clusterID is a persistent attribute of the cluster.
* It is generated when the cluster is created and remains the same
* during the life cycle of the cluster. When a new name node is formated, if
* this is a new cluster, a new clusterID is geneated and stored. Subsequent
* name node must be given the same ClusterID during its format to be in the
* same cluster.
* When a datanode register it receive the clusterID and stick with it.
* If at any point, name node or data node tries to join another cluster, it
* will be rejected.
*
* @return new clusterID
*/
public static String newClusterID() {
return "CID-" + UUID.randomUUID().toString();
}
void setClusterID(String cid) {
clusterID = cid;
}
/**
* try to find current cluster id in the VERSION files
* returns first cluster id found in any VERSION file
* null in case none found
* @return clusterId or null in case no cluster id found
*/
public String determineClusterId() {
String cid = null;
Iterator<StorageDirectory> sdit = dirIterator(NameNodeDirType.IMAGE);
while(sdit.hasNext()) {
StorageDirectory sd = sdit.next();
try {
Properties props = readPropertiesFile(sd.getVersionFile());
cid = props.getProperty("clusterID");
LOG.info("current cluster id for sd="+sd.getCurrentDir() +
";lv=" + layoutVersion + ";cid=" + cid);
if(cid != null && !cid.equals(""))
return cid;
} catch (Exception e) {
LOG.warn("this sd not available: " + e.getLocalizedMessage());
} //ignore
}
LOG.warn("couldn't find any VERSION file containing valid ClusterId");
return null;
}
/**
* Generate new blockpoolID.
*
* @return new blockpoolID
*/
static String newBlockPoolID() throws UnknownHostException{
String ip = "unknownIP";
try {
ip = DNS.getDefaultIP("default");
} catch (UnknownHostException e) {
LOG.warn("Could not find ip address of \"default\" inteface.");
throw e;
}
int rand = DFSUtil.getSecureRandom().nextInt(Integer.MAX_VALUE);
String bpid = "BP-" + rand + "-"+ ip + "-" + Time.now();
return bpid;
}
/** Validate and set block pool ID */
public void setBlockPoolID(String bpid) {
blockpoolID = bpid;
}
/** Validate and set block pool ID */
private void setBlockPoolID(File storage, String bpid)
throws InconsistentFSStateException {
if (bpid == null || bpid.equals("")) {
throw new InconsistentFSStateException(storage, "file "
+ Storage.STORAGE_FILE_VERSION + " has no block pool Id.");
}
if (!blockpoolID.equals("") && !blockpoolID.equals(bpid)) {
throw new InconsistentFSStateException(storage,
"Unexepcted blockpoolID " + bpid + " . Expected " + blockpoolID);
}
setBlockPoolID(bpid);
}
public String getBlockPoolID() {
return blockpoolID;
}
/**
* Iterate over all current storage directories, inspecting them
* with the given inspector.
*/
void inspectStorageDirs(FSImageStorageInspector inspector)
throws IOException {
// Process each of the storage directories to find the pair of
// newest image file and edit file
for (Iterator<StorageDirectory> it = dirIterator(); it.hasNext();) {
StorageDirectory sd = it.next();
inspector.inspectDirectory(sd);
}
}
/**
* Iterate over all of the storage dirs, reading their contents to determine
* their layout versions. Returns an FSImageStorageInspector which has
* inspected each directory.
*
* <b>Note:</b> this can mutate the storage info fields (ctime, version, etc).
* @throws IOException if no valid storage dirs are found or no valid layout version
*/
FSImageStorageInspector readAndInspectDirs(EnumSet<NameNodeFile> fileTypes,
StartupOption startupOption) throws IOException {
Integer layoutVersion = null;
boolean multipleLV = false;
StringBuilder layoutVersions = new StringBuilder();
// First determine what range of layout versions we're going to inspect
for (Iterator<StorageDirectory> it = dirIterator(false);
it.hasNext();) {
StorageDirectory sd = it.next();
if (!sd.getVersionFile().exists()) {
FSImage.LOG.warn("Storage directory " + sd + " contains no VERSION file. Skipping...");
continue;
}
readProperties(sd, startupOption); // sets layoutVersion
int lv = getLayoutVersion();
if (layoutVersion == null) {
layoutVersion = Integer.valueOf(lv);
} else if (!layoutVersion.equals(lv)) {
multipleLV = true;
}
layoutVersions.append("(").append(sd.getRoot()).append(", ").append(lv).append(") ");
}
if (layoutVersion == null) {
throw new IOException("No storage directories contained VERSION information");
}
if (multipleLV) {
throw new IOException(
"Storage directories contain multiple layout versions: "
+ layoutVersions);
}
// If the storage directories are with the new layout version
// (ie edits_<txnid>) then use the new inspector, which will ignore
// the old format dirs.
FSImageStorageInspector inspector;
if (NameNodeLayoutVersion.supports(
LayoutVersion.Feature.TXID_BASED_LAYOUT, getLayoutVersion())) {
inspector = new FSImageTransactionalStorageInspector(fileTypes);
} else {
inspector = new FSImagePreTransactionalStorageInspector();
}
inspectStorageDirs(inspector);
return inspector;
}
public NamespaceInfo getNamespaceInfo() {
return new NamespaceInfo(
getNamespaceID(),
getClusterID(),
getBlockPoolID(),
getCTime());
}
}
| 37,057 | 33.344764 | 95 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeResourcePolicy.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.util.Collection;
import org.apache.hadoop.classification.InterfaceAudience;
/**
* Given a set of checkable resources, this class is capable of determining
* whether sufficient resources are available for the NN to continue operating.
*/
@InterfaceAudience.Private
final class NameNodeResourcePolicy {
/**
* Return true if and only if there are sufficient NN
* resources to continue logging edits.
*
* @param resources the collection of resources to check.
* @param minimumRedundantResources the minimum number of redundant resources
* required to continue operation.
* @return true if and only if there are sufficient NN resources to
* continue logging edits.
*/
static boolean areResourcesAvailable(
Collection<? extends CheckableNameNodeResource> resources,
int minimumRedundantResources) {
// TODO: workaround:
// - during startup, if there are no edits dirs on disk, then there is
// a call to areResourcesAvailable() with no dirs at all, which was
// previously causing the NN to enter safemode
if (resources.isEmpty()) {
return true;
}
int requiredResourceCount = 0;
int redundantResourceCount = 0;
int disabledRedundantResourceCount = 0;
for (CheckableNameNodeResource resource : resources) {
if (!resource.isRequired()) {
redundantResourceCount++;
if (!resource.isResourceAvailable()) {
disabledRedundantResourceCount++;
}
} else {
requiredResourceCount++;
if (!resource.isResourceAvailable()) {
// Short circuit - a required resource is not available.
return false;
}
}
}
if (redundantResourceCount == 0) {
// If there are no redundant resources, return true if there are any
// required resources available.
return requiredResourceCount > 0;
} else {
return redundantResourceCount - disabledRedundantResourceCount >=
minimumRedundantResources;
}
}
}
| 2,898 | 34.790123 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DefaultINodeAttributesProvider.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
/**
* A default implementation of the INodeAttributesProvider
*
*/
public class DefaultINodeAttributesProvider extends INodeAttributeProvider {
public static INodeAttributeProvider DEFAULT_PROVIDER =
new DefaultINodeAttributesProvider();
@Override
public void start() {
// NO-OP
}
@Override
public void stop() {
// NO-OP
}
@Override
public INodeAttributes getAttributes(String[] pathElements,
INodeAttributes inode) {
return inode;
}
}
| 1,344 | 28.23913 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclEntryStatusFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.util.List;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclEntryScope;
import org.apache.hadoop.fs.permission.AclEntryType;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.hdfs.util.LongBitFormat;
import com.google.common.collect.ImmutableList;
/**
* Class to pack an AclEntry into an integer. <br>
* An ACL entry is represented by a 32-bit integer in Big Endian format. <br>
* The bits can be divided in four segments: <br>
* [0:1) || [1:3) || [3:6) || [6:7) || [7:32) <br>
* <br>
* [0:1) -- the scope of the entry (AclEntryScope) <br>
* [1:3) -- the type of the entry (AclEntryType) <br>
* [3:6) -- the permission of the entry (FsAction) <br>
* [6:7) -- A flag to indicate whether Named entry or not <br>
* [7:32) -- the name of the entry, which is an ID that points to a <br>
* string in the StringTableSection. <br>
*/
public enum AclEntryStatusFormat {
SCOPE(null, 1),
TYPE(SCOPE.BITS, 2),
PERMISSION(TYPE.BITS, 3),
NAMED_ENTRY_CHECK(PERMISSION.BITS, 1),
NAME(NAMED_ENTRY_CHECK.BITS, 25);
private final LongBitFormat BITS;
private AclEntryStatusFormat(LongBitFormat previous, int length) {
BITS = new LongBitFormat(name(), previous, length, 0);
}
static AclEntryScope getScope(int aclEntry) {
int ordinal = (int) SCOPE.BITS.retrieve(aclEntry);
return AclEntryScope.values()[ordinal];
}
static AclEntryType getType(int aclEntry) {
int ordinal = (int) TYPE.BITS.retrieve(aclEntry);
return AclEntryType.values()[ordinal];
}
static FsAction getPermission(int aclEntry) {
int ordinal = (int) PERMISSION.BITS.retrieve(aclEntry);
return FsAction.values()[ordinal];
}
static String getName(int aclEntry) {
int nameExists = (int) NAMED_ENTRY_CHECK.BITS.retrieve(aclEntry);
if (nameExists == 0) {
return null;
}
int id = (int) NAME.BITS.retrieve(aclEntry);
AclEntryType type = getType(aclEntry);
if (type == AclEntryType.USER) {
return SerialNumberManager.INSTANCE.getUser(id);
} else if (type == AclEntryType.GROUP) {
return SerialNumberManager.INSTANCE.getGroup(id);
}
return null;
}
static int toInt(AclEntry aclEntry) {
long aclEntryInt = 0;
aclEntryInt = SCOPE.BITS
.combine(aclEntry.getScope().ordinal(), aclEntryInt);
aclEntryInt = TYPE.BITS.combine(aclEntry.getType().ordinal(), aclEntryInt);
aclEntryInt = PERMISSION.BITS.combine(aclEntry.getPermission().ordinal(),
aclEntryInt);
if (aclEntry.getName() != null) {
aclEntryInt = NAMED_ENTRY_CHECK.BITS.combine(1, aclEntryInt);
if (aclEntry.getType() == AclEntryType.USER) {
int userId = SerialNumberManager.INSTANCE.getUserSerialNumber(aclEntry
.getName());
aclEntryInt = NAME.BITS.combine(userId, aclEntryInt);
} else if (aclEntry.getType() == AclEntryType.GROUP) {
int groupId = SerialNumberManager.INSTANCE
.getGroupSerialNumber(aclEntry.getName());
aclEntryInt = NAME.BITS.combine(groupId, aclEntryInt);
}
}
return (int) aclEntryInt;
}
static AclEntry toAclEntry(int aclEntry) {
AclEntry.Builder builder = new AclEntry.Builder();
builder.setScope(getScope(aclEntry)).setType(getType(aclEntry))
.setPermission(getPermission(aclEntry));
if (getName(aclEntry) != null) {
builder.setName(getName(aclEntry));
}
return builder.build();
}
public static int[] toInt(List<AclEntry> aclEntries) {
int[] entries = new int[aclEntries.size()];
for (int i = 0; i < entries.length; i++) {
entries[i] = toInt(aclEntries.get(i));
}
return entries;
}
public static ImmutableList<AclEntry> toAclEntries(int[] entries) {
ImmutableList.Builder<AclEntry> b = new ImmutableList.Builder<AclEntry>();
for (int entry : entries) {
AclEntry aclEntry = toAclEntry(entry);
b.add(aclEntry);
}
return b.build();
}
}
| 4,842 | 34.610294 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SafeMode.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
/** SafeMode related operations. */
@InterfaceAudience.Private
public interface SafeMode {
/**
* Check safe mode conditions.
* If the corresponding conditions are satisfied,
* trigger the system to enter/leave safe mode.
*/
public void checkSafeMode();
/** Is the system in safe mode? */
public boolean isInSafeMode();
/**
* Is the system in startup safe mode, i.e. the system is starting up with
* safe mode turned on automatically?
*/
public boolean isInStartupSafeMode();
/** Check whether replication queues are being populated. */
public boolean isPopulatingReplQueues();
/**
* Increment number of blocks that reached minimal replication.
* @param replication current replication
*/
public void incrementSafeBlockCount(int replication);
/** Decrement number of blocks that reached minimal replication. */
public void decrementSafeBlockCount(BlockInfo b);
}
| 1,906 | 34.314815 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException;
import java.util.EnumSet;
import java.util.List;
import java.util.NavigableMap;
import java.util.TreeMap;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.CipherSuite;
import org.apache.hadoop.crypto.CryptoProtocolVersion;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.XAttrHelper;
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
import org.apache.hadoop.hdfs.protocolPB.PBHelper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries;
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants
.CRYPTO_XATTR_ENCRYPTION_ZONE;
/**
* Manages the list of encryption zones in the filesystem.
* <p/>
* The EncryptionZoneManager has its own lock, but relies on the FSDirectory
* lock being held for many operations. The FSDirectory lock should not be
* taken if the manager lock is already held.
*/
public class EncryptionZoneManager {
public static Logger LOG = LoggerFactory.getLogger(EncryptionZoneManager
.class);
/**
* EncryptionZoneInt is the internal representation of an encryption zone. The
* external representation of an EZ is embodied in an EncryptionZone and
* contains the EZ's pathname.
*/
private static class EncryptionZoneInt {
private final long inodeId;
private final CipherSuite suite;
private final CryptoProtocolVersion version;
private final String keyName;
EncryptionZoneInt(long inodeId, CipherSuite suite,
CryptoProtocolVersion version, String keyName) {
Preconditions.checkArgument(suite != CipherSuite.UNKNOWN);
Preconditions.checkArgument(version != CryptoProtocolVersion.UNKNOWN);
this.inodeId = inodeId;
this.suite = suite;
this.version = version;
this.keyName = keyName;
}
long getINodeId() {
return inodeId;
}
CipherSuite getSuite() {
return suite;
}
CryptoProtocolVersion getVersion() { return version; }
String getKeyName() {
return keyName;
}
}
private final TreeMap<Long, EncryptionZoneInt> encryptionZones;
private final FSDirectory dir;
private final int maxListEncryptionZonesResponses;
/**
* Construct a new EncryptionZoneManager.
*
* @param dir Enclosing FSDirectory
*/
public EncryptionZoneManager(FSDirectory dir, Configuration conf) {
this.dir = dir;
encryptionZones = new TreeMap<Long, EncryptionZoneInt>();
maxListEncryptionZonesResponses = conf.getInt(
DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES,
DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES_DEFAULT
);
Preconditions.checkArgument(maxListEncryptionZonesResponses >= 0,
DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES + " " +
"must be a positive integer."
);
}
/**
* Add a new encryption zone.
* <p/>
* Called while holding the FSDirectory lock.
*
* @param inodeId of the encryption zone
* @param keyName encryption zone key name
*/
void addEncryptionZone(Long inodeId, CipherSuite suite,
CryptoProtocolVersion version, String keyName) {
assert dir.hasWriteLock();
unprotectedAddEncryptionZone(inodeId, suite, version, keyName);
}
/**
* Add a new encryption zone.
* <p/>
* Does not assume that the FSDirectory lock is held.
*
* @param inodeId of the encryption zone
* @param keyName encryption zone key name
*/
void unprotectedAddEncryptionZone(Long inodeId,
CipherSuite suite, CryptoProtocolVersion version, String keyName) {
final EncryptionZoneInt ez = new EncryptionZoneInt(
inodeId, suite, version, keyName);
encryptionZones.put(inodeId, ez);
}
/**
* Remove an encryption zone.
* <p/>
* Called while holding the FSDirectory lock.
*/
void removeEncryptionZone(Long inodeId) {
assert dir.hasWriteLock();
encryptionZones.remove(inodeId);
}
/**
* Returns true if an IIP is within an encryption zone.
* <p/>
* Called while holding the FSDirectory lock.
*/
boolean isInAnEZ(INodesInPath iip)
throws UnresolvedLinkException, SnapshotAccessControlException {
assert dir.hasReadLock();
return (getEncryptionZoneForPath(iip) != null);
}
/**
* Returns the path of the EncryptionZoneInt.
* <p/>
* Called while holding the FSDirectory lock.
*/
private String getFullPathName(EncryptionZoneInt ezi) {
assert dir.hasReadLock();
return dir.getInode(ezi.getINodeId()).getFullPathName();
}
/**
* Get the key name for an encryption zone. Returns null if <tt>iip</tt> is
* not within an encryption zone.
* <p/>
* Called while holding the FSDirectory lock.
*/
String getKeyName(final INodesInPath iip) {
assert dir.hasReadLock();
EncryptionZoneInt ezi = getEncryptionZoneForPath(iip);
if (ezi == null) {
return null;
}
return ezi.getKeyName();
}
/**
* Looks up the EncryptionZoneInt for a path within an encryption zone.
* Returns null if path is not within an EZ.
* <p/>
* Must be called while holding the manager lock.
*/
private EncryptionZoneInt getEncryptionZoneForPath(INodesInPath iip) {
assert dir.hasReadLock();
Preconditions.checkNotNull(iip);
List<INode> inodes = iip.getReadOnlyINodes();
for (int i = inodes.size() - 1; i >= 0; i--) {
final INode inode = inodes.get(i);
if (inode != null) {
final EncryptionZoneInt ezi = encryptionZones.get(inode.getId());
if (ezi != null) {
return ezi;
}
}
}
return null;
}
/**
* Returns an EncryptionZone representing the ez for a given path.
* Returns an empty marker EncryptionZone if path is not in an ez.
*
* @param iip The INodesInPath of the path to check
* @return the EncryptionZone representing the ez for the path.
*/
EncryptionZone getEZINodeForPath(INodesInPath iip) {
final EncryptionZoneInt ezi = getEncryptionZoneForPath(iip);
if (ezi == null) {
return null;
} else {
return new EncryptionZone(ezi.getINodeId(), getFullPathName(ezi),
ezi.getSuite(), ezi.getVersion(), ezi.getKeyName());
}
}
/**
* Throws an exception if the provided path cannot be renamed into the
* destination because of differing encryption zones.
* <p/>
* Called while holding the FSDirectory lock.
*
* @param srcIIP source IIP
* @param dstIIP destination IIP
* @param src source path, used for debugging
* @throws IOException if the src cannot be renamed to the dst
*/
void checkMoveValidity(INodesInPath srcIIP, INodesInPath dstIIP, String src)
throws IOException {
assert dir.hasReadLock();
final EncryptionZoneInt srcEZI = getEncryptionZoneForPath(srcIIP);
final EncryptionZoneInt dstEZI = getEncryptionZoneForPath(dstIIP);
final boolean srcInEZ = (srcEZI != null);
final boolean dstInEZ = (dstEZI != null);
if (srcInEZ) {
if (!dstInEZ) {
if (srcEZI.getINodeId() == srcIIP.getLastINode().getId()) {
// src is ez root and dest is not in an ez. Allow the rename.
return;
}
throw new IOException(
src + " can't be moved from an encryption zone.");
}
} else {
if (dstInEZ) {
throw new IOException(
src + " can't be moved into an encryption zone.");
}
}
if (srcInEZ) {
if (srcEZI != dstEZI) {
final String srcEZPath = getFullPathName(srcEZI);
final String dstEZPath = getFullPathName(dstEZI);
final StringBuilder sb = new StringBuilder(src);
sb.append(" can't be moved from encryption zone ");
sb.append(srcEZPath);
sb.append(" to encryption zone ");
sb.append(dstEZPath);
sb.append(".");
throw new IOException(sb.toString());
}
}
}
/**
* Create a new encryption zone.
* <p/>
* Called while holding the FSDirectory lock.
*/
XAttr createEncryptionZone(String src, CipherSuite suite,
CryptoProtocolVersion version, String keyName)
throws IOException {
assert dir.hasWriteLock();
final INodesInPath srcIIP = dir.getINodesInPath4Write(src, false);
if (dir.isNonEmptyDirectory(srcIIP)) {
throw new IOException(
"Attempt to create an encryption zone for a non-empty directory.");
}
if (srcIIP != null &&
srcIIP.getLastINode() != null &&
!srcIIP.getLastINode().isDirectory()) {
throw new IOException("Attempt to create an encryption zone for a file.");
}
EncryptionZoneInt ezi = getEncryptionZoneForPath(srcIIP);
if (ezi != null) {
throw new IOException("Directory " + src + " is already in an " +
"encryption zone. (" + getFullPathName(ezi) + ")");
}
final HdfsProtos.ZoneEncryptionInfoProto proto =
PBHelper.convert(suite, version, keyName);
final XAttr ezXAttr = XAttrHelper
.buildXAttr(CRYPTO_XATTR_ENCRYPTION_ZONE, proto.toByteArray());
final List<XAttr> xattrs = Lists.newArrayListWithCapacity(1);
xattrs.add(ezXAttr);
// updating the xattr will call addEncryptionZone,
// done this way to handle edit log loading
FSDirXAttrOp.unprotectedSetXAttrs(dir, src, xattrs,
EnumSet.of(XAttrSetFlag.CREATE));
return ezXAttr;
}
/**
* Cursor-based listing of encryption zones.
* <p/>
* Called while holding the FSDirectory lock.
*/
BatchedListEntries<EncryptionZone> listEncryptionZones(long prevId)
throws IOException {
assert dir.hasReadLock();
NavigableMap<Long, EncryptionZoneInt> tailMap = encryptionZones.tailMap
(prevId, false);
final int numResponses = Math.min(maxListEncryptionZonesResponses,
tailMap.size());
final List<EncryptionZone> zones =
Lists.newArrayListWithExpectedSize(numResponses);
int count = 0;
for (EncryptionZoneInt ezi : tailMap.values()) {
/*
Skip EZs that are only present in snapshots. Re-resolve the path to
see if the path's current inode ID matches EZ map's INode ID.
INode#getFullPathName simply calls getParent recursively, so will return
the INode's parents at the time it was snapshotted. It will not
contain a reference INode.
*/
final String pathName = getFullPathName(ezi);
INodesInPath iip = dir.getINodesInPath(pathName, false);
INode lastINode = iip.getLastINode();
if (lastINode == null || lastINode.getId() != ezi.getINodeId()) {
continue;
}
// Add the EZ to the result list
zones.add(new EncryptionZone(ezi.getINodeId(), pathName,
ezi.getSuite(), ezi.getVersion(), ezi.getKeyName()));
count++;
if (count >= numResponses) {
break;
}
}
final boolean hasMore = (numResponses < tailMap.size());
return new BatchedListEntries<EncryptionZone>(zones, hasMore);
}
/**
* @return number of encryption zones.
*/
public int getNumEncryptionZones() {
return encryptionZones.size();
}
}
| 12,414 | 32.463612 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.PrintWriter;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
/**
* An {@link INode} representing a symbolic link.
*/
@InterfaceAudience.Private
public class INodeSymlink extends INodeWithAdditionalFields {
private final byte[] symlink; // The target URI
INodeSymlink(long id, byte[] name, PermissionStatus permissions,
long mtime, long atime, String symlink) {
super(id, name, permissions, mtime, atime);
this.symlink = DFSUtil.string2Bytes(symlink);
}
INodeSymlink(INodeSymlink that) {
super(that);
this.symlink = that.symlink;
}
@Override
void recordModification(int latestSnapshotId) {
if (isInLatestSnapshot(latestSnapshotId)) {
INodeDirectory parent = getParent();
parent.saveChild2Snapshot(this, latestSnapshotId, new INodeSymlink(this));
}
}
/** @return true unconditionally. */
@Override
public boolean isSymlink() {
return true;
}
/** @return this object. */
@Override
public INodeSymlink asSymlink() {
return this;
}
public String getSymlinkString() {
return DFSUtil.bytes2String(symlink);
}
public byte[] getSymlink() {
return symlink;
}
@Override
public void cleanSubtree(ReclaimContext reclaimContext, final int snapshotId,
int priorSnapshotId) {
if (snapshotId == Snapshot.CURRENT_STATE_ID
&& priorSnapshotId == Snapshot.NO_SNAPSHOT_ID) {
destroyAndCollectBlocks(reclaimContext);
}
}
@Override
public void destroyAndCollectBlocks(ReclaimContext reclaimContext) {
reclaimContext.removedINodes.add(this);
reclaimContext.quotaDelta().add(
new QuotaCounts.Builder().nameSpace(1).build());
}
@Override
public QuotaCounts computeQuotaUsage(BlockStoragePolicySuite bsps,
byte blockStoragePolicyId, boolean useCache, int lastSnapshotId) {
return new QuotaCounts.Builder().nameSpace(1).build();
}
@Override
public ContentSummaryComputationContext computeContentSummary(
final ContentSummaryComputationContext summary) {
summary.getCounts().addContent(Content.SYMLINK, 1);
return summary;
}
@Override
public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix,
final int snapshot) {
super.dumpTreeRecursively(out, prefix, snapshot);
out.println();
}
@Override
public void removeAclFeature() {
throw new UnsupportedOperationException("ACLs are not supported on symlinks");
}
@Override
public void addAclFeature(AclFeature f) {
throw new UnsupportedOperationException("ACLs are not supported on symlinks");
}
@Override
final XAttrFeature getXAttrFeature(int snapshotId) {
throw new UnsupportedOperationException("XAttrs are not supported on symlinks");
}
@Override
public void removeXAttrFeature() {
throw new UnsupportedOperationException("XAttrs are not supported on symlinks");
}
@Override
public void addXAttrFeature(XAttrFeature f) {
throw new UnsupportedOperationException("XAttrs are not supported on symlinks");
}
@Override
public byte getStoragePolicyID() {
throw new UnsupportedOperationException(
"Storage policy are not supported on symlinks");
}
@Override
public byte getLocalStoragePolicyID() {
throw new UnsupportedOperationException(
"Storage policy are not supported on symlinks");
}
}
| 4,466 | 29.387755 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributeProvider.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.Set;
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
@InterfaceAudience.Public
@InterfaceStability.Unstable
public abstract class INodeAttributeProvider {
/**
* The AccessControlEnforcer allows implementations to override the
* default File System permission checking logic enforced on a file system
* object
*/
public interface AccessControlEnforcer {
/**
* Checks permission on a file system object. Has to throw an Exception
* if the filesystem object is not accessessible by the calling Ugi.
* @param fsOwner Filesystem owner (The Namenode user)
* @param supergroup super user geoup
* @param callerUgi UserGroupInformation of the caller
* @param inodeAttrs Array of INode attributes for each path element in the
* the path
* @param inodes Array of INodes for each path element in the path
* @param pathByNameArr Array of byte arrays of the LocalName
* @param snapshotId the snapshotId of the requested path
* @param path Path String
* @param ancestorIndex Index of ancestor
* @param doCheckOwner perform ownership check
* @param ancestorAccess The access required by the ancestor of the path.
* @param parentAccess The access required by the parent of the path.
* @param access The access required by the path.
* @param subAccess If path is a directory, It is the access required of
* the path and all the sub-directories. If path is not a
* directory, there should ideally be no effect.
* @param ignoreEmptyDir Ignore permission checking for empty directory?
* @throws AccessControlException
*/
public abstract void checkPermission(String fsOwner, String supergroup,
UserGroupInformation callerUgi, INodeAttributes[] inodeAttrs,
INode[] inodes, byte[][] pathByNameArr, int snapshotId, String path,
int ancestorIndex, boolean doCheckOwner, FsAction ancestorAccess,
FsAction parentAccess, FsAction access, FsAction subAccess,
boolean ignoreEmptyDir)
throws AccessControlException;
}
/**
* Initialize the provider. This method is called at NameNode startup
* time.
*/
public abstract void start();
/**
* Shutdown the provider. This method is called at NameNode shutdown time.
*/
public abstract void stop();
@VisibleForTesting
String[] getPathElements(String path) {
path = path.trim();
if (path.charAt(0) != Path.SEPARATOR_CHAR) {
throw new IllegalArgumentException("It must be an absolute path: " +
path);
}
int numOfElements = StringUtils.countMatches(path, Path.SEPARATOR);
if (path.length() > 1 && path.endsWith(Path.SEPARATOR)) {
numOfElements--;
}
String[] pathElements = new String[numOfElements];
int elementIdx = 0;
int idx = 0;
int found = path.indexOf(Path.SEPARATOR_CHAR, idx);
while (found > -1) {
if (found > idx) {
pathElements[elementIdx++] = path.substring(idx, found);
}
idx = found + 1;
found = path.indexOf(Path.SEPARATOR_CHAR, idx);
}
if (idx < path.length()) {
pathElements[elementIdx] = path.substring(idx);
}
return pathElements;
}
public INodeAttributes getAttributes(String fullPath, INodeAttributes inode) {
return getAttributes(getPathElements(fullPath), inode);
}
public abstract INodeAttributes getAttributes(String[] pathElements,
INodeAttributes inode);
/**
* Can be over-ridden by implementations to provide a custom Access Control
* Enforcer that can provide an alternate implementation of the
* default permission checking logic.
* @param defaultEnforcer The Default AccessControlEnforcer
* @return The AccessControlEnforcer to use
*/
public AccessControlEnforcer getExternalAccessControlEnforcer(
AccessControlEnforcer defaultEnforcer) {
return defaultEnforcer;
}
}
| 5,277 | 37.808824 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSnapshotOp.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.fs.InvalidPathException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.FSLimitException;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshotException;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectorySnapshottableFeature;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotManager;
import org.apache.hadoop.util.ChunkedArrayList;
import java.io.IOException;
import java.util.List;
class FSDirSnapshotOp {
/** Verify if the snapshot name is legal. */
static void verifySnapshotName(FSDirectory fsd, String snapshotName,
String path)
throws FSLimitException.PathComponentTooLongException {
if (snapshotName.contains(Path.SEPARATOR)) {
throw new HadoopIllegalArgumentException(
"Snapshot name cannot contain \"" + Path.SEPARATOR + "\"");
}
final byte[] bytes = DFSUtil.string2Bytes(snapshotName);
fsd.verifyINodeName(bytes);
fsd.verifyMaxComponentLength(bytes, path);
}
/** Allow snapshot on a directory. */
static void allowSnapshot(FSDirectory fsd, SnapshotManager snapshotManager,
String path) throws IOException {
fsd.writeLock();
try {
snapshotManager.setSnapshottable(path, true);
} finally {
fsd.writeUnlock();
}
fsd.getEditLog().logAllowSnapshot(path);
}
static void disallowSnapshot(
FSDirectory fsd, SnapshotManager snapshotManager,
String path) throws IOException {
fsd.writeLock();
try {
snapshotManager.resetSnapshottable(path);
} finally {
fsd.writeUnlock();
}
fsd.getEditLog().logDisallowSnapshot(path);
}
/**
* Create a snapshot
* @param snapshotRoot The directory path where the snapshot is taken
* @param snapshotName The name of the snapshot
*/
static String createSnapshot(
FSDirectory fsd, SnapshotManager snapshotManager, String snapshotRoot,
String snapshotName, boolean logRetryCache)
throws IOException {
final INodesInPath iip = fsd.getINodesInPath4Write(snapshotRoot);
if (fsd.isPermissionEnabled()) {
FSPermissionChecker pc = fsd.getPermissionChecker();
fsd.checkOwner(pc, iip);
}
if (snapshotName == null || snapshotName.isEmpty()) {
snapshotName = Snapshot.generateDefaultSnapshotName();
} else if (!DFSUtil.isValidNameForComponent(snapshotName)) {
throw new InvalidPathException("Invalid snapshot name: " + snapshotName);
}
String snapshotPath = null;
verifySnapshotName(fsd, snapshotName, snapshotRoot);
fsd.writeLock();
try {
snapshotPath = snapshotManager.createSnapshot(iip, snapshotRoot,
snapshotName);
} finally {
fsd.writeUnlock();
}
fsd.getEditLog().logCreateSnapshot(snapshotRoot, snapshotName,
logRetryCache);
return snapshotPath;
}
static void renameSnapshot(FSDirectory fsd, SnapshotManager snapshotManager,
String path, String snapshotOldName, String snapshotNewName,
boolean logRetryCache) throws IOException {
final INodesInPath iip = fsd.getINodesInPath4Write(path);
if (fsd.isPermissionEnabled()) {
FSPermissionChecker pc = fsd.getPermissionChecker();
fsd.checkOwner(pc, iip);
}
verifySnapshotName(fsd, snapshotNewName, path);
fsd.writeLock();
try {
snapshotManager.renameSnapshot(iip, path, snapshotOldName,
snapshotNewName);
} finally {
fsd.writeUnlock();
}
fsd.getEditLog().logRenameSnapshot(path, snapshotOldName,
snapshotNewName, logRetryCache);
}
static SnapshottableDirectoryStatus[] getSnapshottableDirListing(
FSDirectory fsd, SnapshotManager snapshotManager) throws IOException {
FSPermissionChecker pc = fsd.getPermissionChecker();
fsd.readLock();
try {
final String user = pc.isSuperUser()? null : pc.getUser();
return snapshotManager.getSnapshottableDirListing(user);
} finally {
fsd.readUnlock();
}
}
static SnapshotDiffReport getSnapshotDiffReport(FSDirectory fsd,
SnapshotManager snapshotManager, String path,
String fromSnapshot, String toSnapshot) throws IOException {
SnapshotDiffReport diffs;
final FSPermissionChecker pc = fsd.getPermissionChecker();
fsd.readLock();
try {
if (fsd.isPermissionEnabled()) {
checkSubtreeReadPermission(fsd, pc, path, fromSnapshot);
checkSubtreeReadPermission(fsd, pc, path, toSnapshot);
}
INodesInPath iip = fsd.getINodesInPath(path, true);
diffs = snapshotManager.diff(iip, path, fromSnapshot, toSnapshot);
} finally {
fsd.readUnlock();
}
return diffs;
}
/**
* Delete a snapshot of a snapshottable directory
* @param snapshotRoot The snapshottable directory
* @param snapshotName The name of the to-be-deleted snapshot
* @throws IOException
*/
static INode.BlocksMapUpdateInfo deleteSnapshot(
FSDirectory fsd, SnapshotManager snapshotManager, String snapshotRoot,
String snapshotName, boolean logRetryCache)
throws IOException {
final INodesInPath iip = fsd.getINodesInPath4Write(snapshotRoot);
if (fsd.isPermissionEnabled()) {
FSPermissionChecker pc = fsd.getPermissionChecker();
fsd.checkOwner(pc, iip);
}
INode.BlocksMapUpdateInfo collectedBlocks = new INode.BlocksMapUpdateInfo();
ChunkedArrayList<INode> removedINodes = new ChunkedArrayList<>();
INode.ReclaimContext context = new INode.ReclaimContext(
fsd.getBlockStoragePolicySuite(), collectedBlocks, removedINodes, null);
fsd.writeLock();
try {
snapshotManager.deleteSnapshot(iip, snapshotName, context);
fsd.updateCount(iip, context.quotaDelta(), false);
fsd.removeFromInodeMap(removedINodes);
} finally {
fsd.writeUnlock();
}
removedINodes.clear();
fsd.getEditLog().logDeleteSnapshot(snapshotRoot, snapshotName,
logRetryCache);
return collectedBlocks;
}
private static void checkSubtreeReadPermission(
FSDirectory fsd, final FSPermissionChecker pc, String snapshottablePath,
String snapshot) throws IOException {
final String fromPath = snapshot == null ?
snapshottablePath : Snapshot.getSnapshotPath(snapshottablePath,
snapshot);
INodesInPath iip = fsd.getINodesInPath(fromPath, true);
fsd.checkPermission(pc, iip, false, null, null, FsAction.READ,
FsAction.READ);
}
/**
* Check if the given INode (or one of its descendants) is snapshottable and
* already has snapshots.
*
* @param target The given INode
* @param snapshottableDirs The list of directories that are snapshottable
* but do not have snapshots yet
*/
static void checkSnapshot(
INode target, List<INodeDirectory> snapshottableDirs)
throws SnapshotException {
if (target.isDirectory()) {
INodeDirectory targetDir = target.asDirectory();
DirectorySnapshottableFeature sf = targetDir
.getDirectorySnapshottableFeature();
if (sf != null) {
if (sf.getNumSnapshots() > 0) {
String fullPath = targetDir.getFullPathName();
throw new SnapshotException("The directory " + fullPath
+ " cannot be deleted since " + fullPath
+ " is snapshottable and already has snapshots");
} else {
if (snapshottableDirs != null) {
snapshottableDirs.add(targetDir);
}
}
}
for (INode child : targetDir.getChildrenList(Snapshot.CURRENT_STATE_ID)) {
checkSnapshot(child, snapshottableDirs);
}
}
}
}
| 8,848 | 35.717842 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclEntryScope;
import org.apache.hadoop.fs.permission.AclEntryType;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
import org.apache.hadoop.hdfs.protocolPB.PBHelper;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.LoaderContext;
import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.SaverContext;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.Counter;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.Step;
import org.apache.hadoop.hdfs.util.EnumCounters;
import org.apache.hadoop.hdfs.util.ReadOnlyList;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import com.google.protobuf.ByteString;
@InterfaceAudience.Private
public final class FSImageFormatPBINode {
private final static long USER_GROUP_STRID_MASK = (1 << 24) - 1;
private final static int USER_STRID_OFFSET = 40;
private final static int GROUP_STRID_OFFSET = 16;
private static final Log LOG = LogFactory.getLog(FSImageFormatPBINode.class);
private static final int ACL_ENTRY_NAME_MASK = (1 << 24) - 1;
private static final int ACL_ENTRY_NAME_OFFSET = 6;
private static final int ACL_ENTRY_TYPE_OFFSET = 3;
private static final int ACL_ENTRY_SCOPE_OFFSET = 5;
private static final int ACL_ENTRY_PERM_MASK = 7;
private static final int ACL_ENTRY_TYPE_MASK = 3;
private static final int ACL_ENTRY_SCOPE_MASK = 1;
private static final FsAction[] FSACTION_VALUES = FsAction.values();
private static final AclEntryScope[] ACL_ENTRY_SCOPE_VALUES = AclEntryScope
.values();
private static final AclEntryType[] ACL_ENTRY_TYPE_VALUES = AclEntryType
.values();
private static final int XATTR_NAMESPACE_MASK = 3;
private static final int XATTR_NAMESPACE_OFFSET = 30;
private static final int XATTR_NAME_MASK = (1 << 24) - 1;
private static final int XATTR_NAME_OFFSET = 6;
/* See the comments in fsimage.proto for an explanation of the following. */
private static final int XATTR_NAMESPACE_EXT_OFFSET = 5;
private static final int XATTR_NAMESPACE_EXT_MASK = 1;
private static final XAttr.NameSpace[] XATTR_NAMESPACE_VALUES =
XAttr.NameSpace.values();
public final static class Loader {
public static PermissionStatus loadPermission(long id,
final String[] stringTable) {
short perm = (short) (id & ((1 << GROUP_STRID_OFFSET) - 1));
int gsid = (int) ((id >> GROUP_STRID_OFFSET) & USER_GROUP_STRID_MASK);
int usid = (int) ((id >> USER_STRID_OFFSET) & USER_GROUP_STRID_MASK);
return new PermissionStatus(stringTable[usid], stringTable[gsid],
new FsPermission(perm));
}
public static ImmutableList<AclEntry> loadAclEntries(
AclFeatureProto proto, final String[] stringTable) {
ImmutableList.Builder<AclEntry> b = ImmutableList.builder();
for (int v : proto.getEntriesList()) {
int p = v & ACL_ENTRY_PERM_MASK;
int t = (v >> ACL_ENTRY_TYPE_OFFSET) & ACL_ENTRY_TYPE_MASK;
int s = (v >> ACL_ENTRY_SCOPE_OFFSET) & ACL_ENTRY_SCOPE_MASK;
int nid = (v >> ACL_ENTRY_NAME_OFFSET) & ACL_ENTRY_NAME_MASK;
String name = stringTable[nid];
b.add(new AclEntry.Builder().setName(name)
.setPermission(FSACTION_VALUES[p])
.setScope(ACL_ENTRY_SCOPE_VALUES[s])
.setType(ACL_ENTRY_TYPE_VALUES[t]).build());
}
return b.build();
}
public static ImmutableList<XAttr> loadXAttrs(
XAttrFeatureProto proto, final String[] stringTable) {
ImmutableList.Builder<XAttr> b = ImmutableList.builder();
for (XAttrCompactProto xAttrCompactProto : proto.getXAttrsList()) {
int v = xAttrCompactProto.getName();
int nid = (v >> XATTR_NAME_OFFSET) & XATTR_NAME_MASK;
int ns = (v >> XATTR_NAMESPACE_OFFSET) & XATTR_NAMESPACE_MASK;
ns |=
((v >> XATTR_NAMESPACE_EXT_OFFSET) & XATTR_NAMESPACE_EXT_MASK) << 2;
String name = stringTable[nid];
byte[] value = null;
if (xAttrCompactProto.getValue() != null) {
value = xAttrCompactProto.getValue().toByteArray();
}
b.add(new XAttr.Builder().setNameSpace(XATTR_NAMESPACE_VALUES[ns])
.setName(name).setValue(value).build());
}
return b.build();
}
public static ImmutableList<QuotaByStorageTypeEntry> loadQuotaByStorageTypeEntries(
QuotaByStorageTypeFeatureProto proto) {
ImmutableList.Builder<QuotaByStorageTypeEntry> b = ImmutableList.builder();
for (QuotaByStorageTypeEntryProto quotaEntry : proto.getQuotasList()) {
StorageType type = PBHelper.convertStorageType(quotaEntry.getStorageType());
long quota = quotaEntry.getQuota();
b.add(new QuotaByStorageTypeEntry.Builder().setStorageType(type)
.setQuota(quota).build());
}
return b.build();
}
public static INodeDirectory loadINodeDirectory(INodeSection.INode n,
LoaderContext state) {
assert n.getType() == INodeSection.INode.Type.DIRECTORY;
INodeSection.INodeDirectory d = n.getDirectory();
final PermissionStatus permissions = loadPermission(d.getPermission(),
state.getStringTable());
final INodeDirectory dir = new INodeDirectory(n.getId(), n.getName()
.toByteArray(), permissions, d.getModificationTime());
final long nsQuota = d.getNsQuota(), dsQuota = d.getDsQuota();
if (nsQuota >= 0 || dsQuota >= 0) {
dir.addDirectoryWithQuotaFeature(new DirectoryWithQuotaFeature.Builder().
nameSpaceQuota(nsQuota).storageSpaceQuota(dsQuota).build());
}
EnumCounters<StorageType> typeQuotas = null;
if (d.hasTypeQuotas()) {
ImmutableList<QuotaByStorageTypeEntry> qes =
loadQuotaByStorageTypeEntries(d.getTypeQuotas());
typeQuotas = new EnumCounters<StorageType>(StorageType.class,
HdfsConstants.QUOTA_RESET);
for (QuotaByStorageTypeEntry qe : qes) {
if (qe.getQuota() >= 0 && qe.getStorageType() != null &&
qe.getStorageType().supportTypeQuota()) {
typeQuotas.set(qe.getStorageType(), qe.getQuota());
}
}
if (typeQuotas.anyGreaterOrEqual(0)) {
DirectoryWithQuotaFeature q = dir.getDirectoryWithQuotaFeature();
if (q == null) {
dir.addDirectoryWithQuotaFeature(new DirectoryWithQuotaFeature.
Builder().typeQuotas(typeQuotas).build());
} else {
q.setQuota(typeQuotas);
}
}
}
if (d.hasAcl()) {
int[] entries = AclEntryStatusFormat.toInt(loadAclEntries(
d.getAcl(), state.getStringTable()));
dir.addAclFeature(new AclFeature(entries));
}
if (d.hasXAttrs()) {
dir.addXAttrFeature(new XAttrFeature(
loadXAttrs(d.getXAttrs(), state.getStringTable())));
}
return dir;
}
public static void updateBlocksMap(INodeFile file, BlockManager bm) {
// Add file->block mapping
final BlockInfo[] blocks = file.getBlocks();
if (blocks != null) {
for (int i = 0; i < blocks.length; i++) {
file.setBlock(i, bm.addBlockCollection(blocks[i], file));
}
}
}
private final FSDirectory dir;
private final FSNamesystem fsn;
private final FSImageFormatProtobuf.Loader parent;
Loader(FSNamesystem fsn, final FSImageFormatProtobuf.Loader parent) {
this.fsn = fsn;
this.dir = fsn.dir;
this.parent = parent;
}
void loadINodeDirectorySection(InputStream in) throws IOException {
final List<INodeReference> refList = parent.getLoaderContext()
.getRefList();
while (true) {
INodeDirectorySection.DirEntry e = INodeDirectorySection.DirEntry
.parseDelimitedFrom(in);
// note that in is a LimitedInputStream
if (e == null) {
break;
}
INodeDirectory p = dir.getInode(e.getParent()).asDirectory();
for (long id : e.getChildrenList()) {
INode child = dir.getInode(id);
addToParent(p, child);
}
for (int refId : e.getRefChildrenList()) {
INodeReference ref = refList.get(refId);
addToParent(p, ref);
}
}
}
void loadINodeSection(InputStream in, StartupProgress prog,
Step currentStep) throws IOException {
INodeSection s = INodeSection.parseDelimitedFrom(in);
fsn.dir.resetLastInodeId(s.getLastInodeId());
long numInodes = s.getNumInodes();
LOG.info("Loading " + numInodes + " INodes.");
prog.setTotal(Phase.LOADING_FSIMAGE, currentStep, numInodes);
Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, currentStep);
for (int i = 0; i < numInodes; ++i) {
INodeSection.INode p = INodeSection.INode.parseDelimitedFrom(in);
if (p.getId() == INodeId.ROOT_INODE_ID) {
loadRootINode(p);
} else {
INode n = loadINode(p);
dir.addToInodeMap(n);
}
counter.increment();
}
}
/**
* Load the under-construction files section, and update the lease map
*/
void loadFilesUnderConstructionSection(InputStream in) throws IOException {
while (true) {
FileUnderConstructionEntry entry = FileUnderConstructionEntry
.parseDelimitedFrom(in);
if (entry == null) {
break;
}
// update the lease manager
INodeFile file = dir.getInode(entry.getInodeId()).asFile();
FileUnderConstructionFeature uc = file.getFileUnderConstructionFeature();
Preconditions.checkState(uc != null); // file must be under-construction
fsn.leaseManager.addLease(uc.getClientName(),
entry.getInodeId());
}
}
private void addToParent(INodeDirectory parent, INode child) {
if (parent == dir.rootDir && FSDirectory.isReservedName(child)) {
throw new HadoopIllegalArgumentException("File name \""
+ child.getLocalName() + "\" is reserved. Please "
+ " change the name of the existing file or directory to another "
+ "name before upgrading to this release.");
}
// NOTE: This does not update space counts for parents
if (!parent.addChild(child)) {
return;
}
dir.cacheName(child);
if (child.isFile()) {
updateBlocksMap(child.asFile(), fsn.getBlockManager());
}
}
private INode loadINode(INodeSection.INode n) {
switch (n.getType()) {
case FILE:
return loadINodeFile(n);
case DIRECTORY:
return loadINodeDirectory(n, parent.getLoaderContext());
case SYMLINK:
return loadINodeSymlink(n);
default:
break;
}
return null;
}
private INodeFile loadINodeFile(INodeSection.INode n) {
assert n.getType() == INodeSection.INode.Type.FILE;
INodeSection.INodeFile f = n.getFile();
List<BlockProto> bp = f.getBlocksList();
short replication = (short) f.getReplication();
LoaderContext state = parent.getLoaderContext();
BlockInfo[] blocks = new BlockInfo[bp.size()];
for (int i = 0, e = bp.size(); i < e; ++i) {
blocks[i] =
new BlockInfoContiguous(PBHelper.convert(bp.get(i)), replication);
}
final PermissionStatus permissions = loadPermission(f.getPermission(),
parent.getLoaderContext().getStringTable());
final INodeFile file = new INodeFile(n.getId(),
n.getName().toByteArray(), permissions, f.getModificationTime(),
f.getAccessTime(), blocks, replication, f.getPreferredBlockSize(),
(byte)f.getStoragePolicyID());
if (f.hasAcl()) {
int[] entries = AclEntryStatusFormat.toInt(loadAclEntries(
f.getAcl(), state.getStringTable()));
file.addAclFeature(new AclFeature(entries));
}
if (f.hasXAttrs()) {
file.addXAttrFeature(new XAttrFeature(
loadXAttrs(f.getXAttrs(), state.getStringTable())));
}
// under-construction information
if (f.hasFileUC()) {
INodeSection.FileUnderConstructionFeature uc = f.getFileUC();
file.toUnderConstruction(uc.getClientName(), uc.getClientMachine());
if (blocks.length > 0) {
BlockInfo lastBlk = file.getLastBlock();
// replace the last block of file
file.setBlock(file.numBlocks() - 1, new BlockInfoContiguousUnderConstruction(
lastBlk, replication));
}
}
return file;
}
private INodeSymlink loadINodeSymlink(INodeSection.INode n) {
assert n.getType() == INodeSection.INode.Type.SYMLINK;
INodeSection.INodeSymlink s = n.getSymlink();
final PermissionStatus permissions = loadPermission(s.getPermission(),
parent.getLoaderContext().getStringTable());
INodeSymlink sym = new INodeSymlink(n.getId(), n.getName().toByteArray(),
permissions, s.getModificationTime(), s.getAccessTime(),
s.getTarget().toStringUtf8());
return sym;
}
private void loadRootINode(INodeSection.INode p) {
INodeDirectory root = loadINodeDirectory(p, parent.getLoaderContext());
final QuotaCounts q = root.getQuotaCounts();
final long nsQuota = q.getNameSpace();
final long dsQuota = q.getStorageSpace();
if (nsQuota != -1 || dsQuota != -1) {
dir.rootDir.getDirectoryWithQuotaFeature().setQuota(nsQuota, dsQuota);
}
final EnumCounters<StorageType> typeQuotas = q.getTypeSpaces();
if (typeQuotas.anyGreaterOrEqual(0)) {
dir.rootDir.getDirectoryWithQuotaFeature().setQuota(typeQuotas);
}
dir.rootDir.cloneModificationTime(root);
dir.rootDir.clonePermissionStatus(root);
// root dir supports having extended attributes according to POSIX
final XAttrFeature f = root.getXAttrFeature();
if (f != null) {
dir.rootDir.addXAttrFeature(f);
}
}
}
public final static class Saver {
private static long buildPermissionStatus(INodeAttributes n,
final SaverContext.DeduplicationMap<String> stringMap) {
long userId = stringMap.getId(n.getUserName());
long groupId = stringMap.getId(n.getGroupName());
return ((userId & USER_GROUP_STRID_MASK) << USER_STRID_OFFSET)
| ((groupId & USER_GROUP_STRID_MASK) << GROUP_STRID_OFFSET)
| n.getFsPermissionShort();
}
private static AclFeatureProto.Builder buildAclEntries(AclFeature f,
final SaverContext.DeduplicationMap<String> map) {
AclFeatureProto.Builder b = AclFeatureProto.newBuilder();
for (int pos = 0, e; pos < f.getEntriesSize(); pos++) {
e = f.getEntryAt(pos);
int nameId = map.getId(AclEntryStatusFormat.getName(e));
int v = ((nameId & ACL_ENTRY_NAME_MASK) << ACL_ENTRY_NAME_OFFSET)
| (AclEntryStatusFormat.getType(e).ordinal() << ACL_ENTRY_TYPE_OFFSET)
| (AclEntryStatusFormat.getScope(e).ordinal() << ACL_ENTRY_SCOPE_OFFSET)
| (AclEntryStatusFormat.getPermission(e).ordinal());
b.addEntries(v);
}
return b;
}
private static XAttrFeatureProto.Builder buildXAttrs(XAttrFeature f,
final SaverContext.DeduplicationMap<String> stringMap) {
XAttrFeatureProto.Builder b = XAttrFeatureProto.newBuilder();
for (XAttr a : f.getXAttrs()) {
XAttrCompactProto.Builder xAttrCompactBuilder = XAttrCompactProto.
newBuilder();
int nsOrd = a.getNameSpace().ordinal();
Preconditions.checkArgument(nsOrd < 8, "Too many namespaces.");
int v = ((nsOrd & XATTR_NAMESPACE_MASK) << XATTR_NAMESPACE_OFFSET)
| ((stringMap.getId(a.getName()) & XATTR_NAME_MASK) <<
XATTR_NAME_OFFSET);
v |= (((nsOrd >> 2) & XATTR_NAMESPACE_EXT_MASK) <<
XATTR_NAMESPACE_EXT_OFFSET);
xAttrCompactBuilder.setName(v);
if (a.getValue() != null) {
xAttrCompactBuilder.setValue(PBHelper.getByteString(a.getValue()));
}
b.addXAttrs(xAttrCompactBuilder.build());
}
return b;
}
private static QuotaByStorageTypeFeatureProto.Builder
buildQuotaByStorageTypeEntries(QuotaCounts q) {
QuotaByStorageTypeFeatureProto.Builder b =
QuotaByStorageTypeFeatureProto.newBuilder();
for (StorageType t: StorageType.getTypesSupportingQuota()) {
if (q.getTypeSpace(t) >= 0) {
QuotaByStorageTypeEntryProto.Builder eb =
QuotaByStorageTypeEntryProto.newBuilder().
setStorageType(PBHelper.convertStorageType(t)).
setQuota(q.getTypeSpace(t));
b.addQuotas(eb);
}
}
return b;
}
public static INodeSection.INodeFile.Builder buildINodeFile(
INodeFileAttributes file, final SaverContext state) {
INodeSection.INodeFile.Builder b = INodeSection.INodeFile.newBuilder()
.setAccessTime(file.getAccessTime())
.setModificationTime(file.getModificationTime())
.setPermission(buildPermissionStatus(file, state.getStringMap()))
.setPreferredBlockSize(file.getPreferredBlockSize())
.setReplication(file.getFileReplication())
.setStoragePolicyID(file.getLocalStoragePolicyID());
AclFeature f = file.getAclFeature();
if (f != null) {
b.setAcl(buildAclEntries(f, state.getStringMap()));
}
XAttrFeature xAttrFeature = file.getXAttrFeature();
if (xAttrFeature != null) {
b.setXAttrs(buildXAttrs(xAttrFeature, state.getStringMap()));
}
return b;
}
public static INodeSection.INodeDirectory.Builder buildINodeDirectory(
INodeDirectoryAttributes dir, final SaverContext state) {
QuotaCounts quota = dir.getQuotaCounts();
INodeSection.INodeDirectory.Builder b = INodeSection.INodeDirectory
.newBuilder().setModificationTime(dir.getModificationTime())
.setNsQuota(quota.getNameSpace())
.setDsQuota(quota.getStorageSpace())
.setPermission(buildPermissionStatus(dir, state.getStringMap()));
if (quota.getTypeSpaces().anyGreaterOrEqual(0)) {
b.setTypeQuotas(buildQuotaByStorageTypeEntries(quota));
}
AclFeature f = dir.getAclFeature();
if (f != null) {
b.setAcl(buildAclEntries(f, state.getStringMap()));
}
XAttrFeature xAttrFeature = dir.getXAttrFeature();
if (xAttrFeature != null) {
b.setXAttrs(buildXAttrs(xAttrFeature, state.getStringMap()));
}
return b;
}
private final FSNamesystem fsn;
private final FileSummary.Builder summary;
private final SaveNamespaceContext context;
private final FSImageFormatProtobuf.Saver parent;
Saver(FSImageFormatProtobuf.Saver parent, FileSummary.Builder summary) {
this.parent = parent;
this.summary = summary;
this.context = parent.getContext();
this.fsn = context.getSourceNamesystem();
}
void serializeINodeDirectorySection(OutputStream out) throws IOException {
Iterator<INodeWithAdditionalFields> iter = fsn.getFSDirectory()
.getINodeMap().getMapIterator();
final ArrayList<INodeReference> refList = parent.getSaverContext()
.getRefList();
int i = 0;
while (iter.hasNext()) {
INodeWithAdditionalFields n = iter.next();
if (!n.isDirectory()) {
continue;
}
ReadOnlyList<INode> children = n.asDirectory().getChildrenList(
Snapshot.CURRENT_STATE_ID);
if (children.size() > 0) {
INodeDirectorySection.DirEntry.Builder b = INodeDirectorySection.
DirEntry.newBuilder().setParent(n.getId());
for (INode inode : children) {
if (!inode.isReference()) {
b.addChildren(inode.getId());
} else {
refList.add(inode.asReference());
b.addRefChildren(refList.size() - 1);
}
}
INodeDirectorySection.DirEntry e = b.build();
e.writeDelimitedTo(out);
}
++i;
if (i % FSImageFormatProtobuf.Saver.CHECK_CANCEL_INTERVAL == 0) {
context.checkCancelled();
}
}
parent.commitSection(summary,
FSImageFormatProtobuf.SectionName.INODE_DIR);
}
void serializeINodeSection(OutputStream out) throws IOException {
INodeMap inodesMap = fsn.dir.getINodeMap();
INodeSection.Builder b = INodeSection.newBuilder()
.setLastInodeId(fsn.dir.getLastInodeId()).setNumInodes(inodesMap.size());
INodeSection s = b.build();
s.writeDelimitedTo(out);
int i = 0;
Iterator<INodeWithAdditionalFields> iter = inodesMap.getMapIterator();
while (iter.hasNext()) {
INodeWithAdditionalFields n = iter.next();
save(out, n);
++i;
if (i % FSImageFormatProtobuf.Saver.CHECK_CANCEL_INTERVAL == 0) {
context.checkCancelled();
}
}
parent.commitSection(summary, FSImageFormatProtobuf.SectionName.INODE);
}
void serializeFilesUCSection(OutputStream out) throws IOException {
Collection<Long> filesWithUC = fsn.getLeaseManager()
.getINodeIdWithLeases();
for (Long id : filesWithUC) {
INode inode = fsn.getFSDirectory().getInode(id);
if (inode == null) {
LOG.warn("Fail to find inode " + id + " when saving the leases.");
continue;
}
INodeFile file = inode.asFile();
if (!file.isUnderConstruction()) {
LOG.warn("Fail to save the lease for inode id " + id
+ " as the file is not under construction");
continue;
}
String path = file.getFullPathName();
FileUnderConstructionEntry.Builder b = FileUnderConstructionEntry
.newBuilder().setInodeId(file.getId()).setFullPath(path);
FileUnderConstructionEntry e = b.build();
e.writeDelimitedTo(out);
}
parent.commitSection(summary,
FSImageFormatProtobuf.SectionName.FILES_UNDERCONSTRUCTION);
}
private void save(OutputStream out, INode n) throws IOException {
if (n.isDirectory()) {
save(out, n.asDirectory());
} else if (n.isFile()) {
save(out, n.asFile());
} else if (n.isSymlink()) {
save(out, n.asSymlink());
}
}
private void save(OutputStream out, INodeDirectory n) throws IOException {
INodeSection.INodeDirectory.Builder b = buildINodeDirectory(n,
parent.getSaverContext());
INodeSection.INode r = buildINodeCommon(n)
.setType(INodeSection.INode.Type.DIRECTORY).setDirectory(b).build();
r.writeDelimitedTo(out);
}
private void save(OutputStream out, INodeFile n) throws IOException {
INodeSection.INodeFile.Builder b = buildINodeFile(n,
parent.getSaverContext());
if (n.getBlocks() != null) {
for (Block block : n.getBlocks()) {
b.addBlocks(PBHelper.convert(block));
}
}
FileUnderConstructionFeature uc = n.getFileUnderConstructionFeature();
if (uc != null) {
INodeSection.FileUnderConstructionFeature f =
INodeSection.FileUnderConstructionFeature
.newBuilder().setClientName(uc.getClientName())
.setClientMachine(uc.getClientMachine()).build();
b.setFileUC(f);
}
INodeSection.INode r = buildINodeCommon(n)
.setType(INodeSection.INode.Type.FILE).setFile(b).build();
r.writeDelimitedTo(out);
}
private void save(OutputStream out, INodeSymlink n) throws IOException {
SaverContext state = parent.getSaverContext();
INodeSection.INodeSymlink.Builder b = INodeSection.INodeSymlink
.newBuilder()
.setPermission(buildPermissionStatus(n, state.getStringMap()))
.setTarget(ByteString.copyFrom(n.getSymlink()))
.setModificationTime(n.getModificationTime())
.setAccessTime(n.getAccessTime());
INodeSection.INode r = buildINodeCommon(n)
.setType(INodeSection.INode.Type.SYMLINK).setSymlink(b).build();
r.writeDelimitedTo(out);
}
private final INodeSection.INode.Builder buildINodeCommon(INode n) {
return INodeSection.INode.newBuilder()
.setId(n.getId())
.setName(ByteString.copyFrom(n.getLocalNameBytes()));
}
}
private FSImageFormatPBINode() {
}
}
| 27,633 | 39.758112 | 116 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.util.Map;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
/**
* This is the JMX management interface for namenode information
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public interface NameNodeMXBean {
/**
* Gets the version of Hadoop.
*
* @return the version
*/
public String getVersion();
/**
* Get the version of software running on the Namenode
* @return a string representing the version
*/
public String getSoftwareVersion();
/**
* Gets the used space by data nodes.
*
* @return the used space by data nodes
*/
public long getUsed();
/**
* Gets total non-used raw bytes.
*
* @return total non-used raw bytes
*/
public long getFree();
/**
* Gets total raw bytes including non-dfs used space.
*
* @return the total raw bytes including non-dfs used space
*/
public long getTotal();
/**
* Gets the safemode status
*
* @return the safemode status
*
*/
public String getSafemode();
/**
* Checks if upgrade is finalized.
*
* @return true, if upgrade is finalized
*/
public boolean isUpgradeFinalized();
/**
* Gets the RollingUpgrade information.
*
* @return Rolling upgrade information if an upgrade is in progress. Else
* (e.g. if there is no upgrade or the upgrade is finalized), returns null.
*/
public RollingUpgradeInfo.Bean getRollingUpgradeStatus();
/**
* Gets total used space by data nodes for non DFS purposes such as storing
* temporary files on the local file system
*
* @return the non dfs space of the cluster
*/
public long getNonDfsUsedSpace();
/**
* Gets the total used space by data nodes as percentage of total capacity
*
* @return the percentage of used space on the cluster.
*/
public float getPercentUsed();
/**
* Gets the total remaining space by data nodes as percentage of total
* capacity
*
* @return the percentage of the remaining space on the cluster
*/
public float getPercentRemaining();
/**
* Returns the amount of cache used by the datanode (in bytes).
*/
public long getCacheUsed();
/**
* Returns the total cache capacity of the datanode (in bytes).
*/
public long getCacheCapacity();
/**
* Get the total space used by the block pools of this namenode
*/
public long getBlockPoolUsedSpace();
/**
* Get the total space used by the block pool as percentage of total capacity
*/
public float getPercentBlockPoolUsed();
/**
* Gets the total numbers of blocks on the cluster.
*
* @return the total number of blocks of the cluster
*/
public long getTotalBlocks();
/**
* Gets the total number of files on the cluster
*
* @return the total number of files on the cluster
*/
public long getTotalFiles();
/**
* Gets the total number of missing blocks on the cluster
*
* @return the total number of missing blocks on the cluster
*/
public long getNumberOfMissingBlocks();
/**
* Gets the total number of missing blocks on the cluster with
* replication factor 1
*
* @return the total number of missing blocks on the cluster with
* replication factor 1
*/
public long getNumberOfMissingBlocksWithReplicationFactorOne();
/**
* Gets the number of threads.
*
* @return the number of threads
*/
public int getThreads();
/**
* Gets the live node information of the cluster.
*
* @return the live node information
*/
public String getLiveNodes();
/**
* Gets the dead node information of the cluster.
*
* @return the dead node information
*/
public String getDeadNodes();
/**
* Gets the decommissioning node information of the cluster.
*
* @return the decommissioning node information
*/
public String getDecomNodes();
/**
* Gets the cluster id.
*
* @return the cluster id
*/
public String getClusterId();
/**
* Gets the block pool id.
*
* @return the block pool id
*/
public String getBlockPoolId();
/**
* Get status information about the directories storing image and edits logs
* of the NN.
*
* @return the name dir status information, as a JSON string.
*/
public String getNameDirStatuses();
/**
* Get Max, Median, Min and Standard Deviation of DataNodes usage.
*
* @return the DataNode usage information, as a JSON string.
*/
public String getNodeUsage();
/**
* Get status information about the journals of the NN.
*
* @return the name journal status information, as a JSON string.
*/
public String getNameJournalStatus();
/**
* Get information about the transaction ID, including the last applied
* transaction ID and the most recent checkpoint's transaction ID
*/
public String getJournalTransactionInfo();
/**
* Gets the NN start time
*
* @return the NN start time
*/
public String getNNStarted();
/**
* Get the compilation information which contains date, user and branch
*
* @return the compilation information, as a JSON string.
*/
public String getCompileInfo();
/**
* Get the list of corrupt files
*
* @return the list of corrupt files, as a JSON string.
*/
public String getCorruptFiles();
/**
* Get the number of distinct versions of live datanodes
*
* @return the number of distinct versions of live datanodes
*/
public int getDistinctVersionCount();
/**
* Get the number of live datanodes for each distinct versions
*
* @return the number of live datanodes for each distinct versions
*/
public Map<String, Integer> getDistinctVersions();
}
| 6,720 | 23.892593 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Trash;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
import org.apache.hadoop.ha.HAServiceStatus;
import org.apache.hadoop.ha.HealthCheckFailedException;
import org.apache.hadoop.ha.ServiceFailedException;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.HAUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeStartupOption;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.namenode.ha.ActiveState;
import org.apache.hadoop.hdfs.server.namenode.ha.BootstrapStandby;
import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
import org.apache.hadoop.hdfs.server.namenode.ha.HAState;
import org.apache.hadoop.hdfs.server.namenode.ha.StandbyState;
import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgressMetrics;
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.JournalProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.ipc.RefreshCallQueueProtocol;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.ipc.StandbyException;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.metrics2.util.MBeans;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.RefreshUserMappingsProtocol;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
import org.apache.hadoop.tools.GetUserMappingsProtocol;
import org.apache.hadoop.tracing.SpanReceiverHost;
import org.apache.hadoop.tracing.TraceAdminProtocol;
import org.apache.hadoop.util.ExitUtil.ExitException;
import org.apache.hadoop.util.JvmPauseMonitor;
import org.apache.hadoop.util.ServicePlugin;
import org.apache.hadoop.util.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.management.ObjectName;
import java.io.IOException;
import java.io.PrintStream;
import java.net.InetSocketAddress;
import java.net.URI;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import java.util.concurrent.atomic.AtomicBoolean;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_DEFAULT;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_ZKFC_PORT_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BACKUP_SERVICE_RPC_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_BIND_HOST_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_BIND_HOST_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_PLUGINS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_BIND_HOST_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTPS_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_STARTUP_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICE_ID;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS;
import static org.apache.hadoop.util.ExitUtil.terminate;
import static org.apache.hadoop.util.ToolRunner.confirmPrompt;
/**********************************************************
* NameNode serves as both directory namespace manager and
* "inode table" for the Hadoop DFS. There is a single NameNode
* running in any DFS deployment. (Well, except when there
* is a second backup/failover NameNode, or when using federated NameNodes.)
*
* The NameNode controls two critical tables:
* 1) filename->blocksequence (namespace)
* 2) block->machinelist ("inodes")
*
* The first table is stored on disk and is very precious.
* The second table is rebuilt every time the NameNode comes up.
*
* 'NameNode' refers to both this class as well as the 'NameNode server'.
* The 'FSNamesystem' class actually performs most of the filesystem
* management. The majority of the 'NameNode' class itself is concerned
* with exposing the IPC interface and the HTTP server to the outside world,
* plus some configuration management.
*
* NameNode implements the
* {@link org.apache.hadoop.hdfs.protocol.ClientProtocol} interface, which
* allows clients to ask for DFS services.
* {@link org.apache.hadoop.hdfs.protocol.ClientProtocol} is not designed for
* direct use by authors of DFS client code. End-users should instead use the
* {@link org.apache.hadoop.fs.FileSystem} class.
*
* NameNode also implements the
* {@link org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol} interface,
* used by DataNodes that actually store DFS data blocks. These
* methods are invoked repeatedly and automatically by all the
* DataNodes in a DFS deployment.
*
* NameNode also implements the
* {@link org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol} interface,
* used by secondary namenodes or rebalancing processes to get partial
* NameNode state, for example partial blocksMap etc.
**********************************************************/
@InterfaceAudience.Private
public class NameNode implements NameNodeStatusMXBean {
static{
HdfsConfiguration.init();
}
/**
* Categories of operations supported by the namenode.
*/
public static enum OperationCategory {
/** Operations that are state agnostic */
UNCHECKED,
/** Read operation that does not change the namespace state */
READ,
/** Write operation that changes the namespace state */
WRITE,
/** Operations related to checkpointing */
CHECKPOINT,
/** Operations related to {@link JournalProtocol} */
JOURNAL
}
/**
* HDFS configuration can have three types of parameters:
* <ol>
* <li>Parameters that are common for all the name services in the cluster.</li>
* <li>Parameters that are specific to a name service. These keys are suffixed
* with nameserviceId in the configuration. For example,
* "dfs.namenode.rpc-address.nameservice1".</li>
* <li>Parameters that are specific to a single name node. These keys are suffixed
* with nameserviceId and namenodeId in the configuration. for example,
* "dfs.namenode.rpc-address.nameservice1.namenode1"</li>
* </ol>
*
* In the latter cases, operators may specify the configuration without
* any suffix, with a nameservice suffix, or with a nameservice and namenode
* suffix. The more specific suffix will take precedence.
*
* These keys are specific to a given namenode, and thus may be configured
* globally, for a nameservice, or for a specific namenode within a nameservice.
*/
public static final String[] NAMENODE_SPECIFIC_KEYS = {
DFS_NAMENODE_RPC_ADDRESS_KEY,
DFS_NAMENODE_RPC_BIND_HOST_KEY,
DFS_NAMENODE_NAME_DIR_KEY,
DFS_NAMENODE_EDITS_DIR_KEY,
DFS_NAMENODE_SHARED_EDITS_DIR_KEY,
DFS_NAMENODE_CHECKPOINT_DIR_KEY,
DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY,
DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY,
DFS_NAMENODE_HTTP_ADDRESS_KEY,
DFS_NAMENODE_HTTPS_ADDRESS_KEY,
DFS_NAMENODE_HTTP_BIND_HOST_KEY,
DFS_NAMENODE_HTTPS_BIND_HOST_KEY,
DFS_NAMENODE_KEYTAB_FILE_KEY,
DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
DFS_NAMENODE_SECONDARY_HTTPS_ADDRESS_KEY,
DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY,
DFS_NAMENODE_BACKUP_ADDRESS_KEY,
DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY,
DFS_NAMENODE_BACKUP_SERVICE_RPC_ADDRESS_KEY,
DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY,
DFS_NAMENODE_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY,
DFS_HA_FENCE_METHODS_KEY,
DFS_HA_ZKFC_PORT_KEY,
DFS_HA_FENCE_METHODS_KEY
};
/**
* @see #NAMENODE_SPECIFIC_KEYS
* These keys are specific to a nameservice, but may not be overridden
* for a specific namenode.
*/
public static final String[] NAMESERVICE_SPECIFIC_KEYS = {
DFS_HA_AUTO_FAILOVER_ENABLED_KEY
};
private static final String USAGE = "Usage: hdfs namenode ["
+ StartupOption.BACKUP.getName() + "] | \n\t["
+ StartupOption.CHECKPOINT.getName() + "] | \n\t["
+ StartupOption.FORMAT.getName() + " ["
+ StartupOption.CLUSTERID.getName() + " cid ] ["
+ StartupOption.FORCE.getName() + "] ["
+ StartupOption.NONINTERACTIVE.getName() + "] ] | \n\t["
+ StartupOption.UPGRADE.getName() +
" [" + StartupOption.CLUSTERID.getName() + " cid]" +
" [" + StartupOption.RENAMERESERVED.getName() + "<k-v pairs>] ] | \n\t["
+ StartupOption.UPGRADEONLY.getName() +
" [" + StartupOption.CLUSTERID.getName() + " cid]" +
" [" + StartupOption.RENAMERESERVED.getName() + "<k-v pairs>] ] | \n\t["
+ StartupOption.ROLLBACK.getName() + "] | \n\t["
+ StartupOption.ROLLINGUPGRADE.getName() + " "
+ RollingUpgradeStartupOption.getAllOptionString() + " ] | \n\t["
+ StartupOption.FINALIZE.getName() + "] | \n\t["
+ StartupOption.IMPORT.getName() + "] | \n\t["
+ StartupOption.INITIALIZESHAREDEDITS.getName() + "] | \n\t["
+ StartupOption.BOOTSTRAPSTANDBY.getName() + "] | \n\t["
+ StartupOption.RECOVER.getName() + " [ "
+ StartupOption.FORCE.getName() + "] ] | \n\t["
+ StartupOption.METADATAVERSION.getName() + " ] "
+ " ]";
public long getProtocolVersion(String protocol,
long clientVersion) throws IOException {
if (protocol.equals(ClientProtocol.class.getName())) {
return ClientProtocol.versionID;
} else if (protocol.equals(DatanodeProtocol.class.getName())){
return DatanodeProtocol.versionID;
} else if (protocol.equals(NamenodeProtocol.class.getName())){
return NamenodeProtocol.versionID;
} else if (protocol.equals(RefreshAuthorizationPolicyProtocol.class.getName())){
return RefreshAuthorizationPolicyProtocol.versionID;
} else if (protocol.equals(RefreshUserMappingsProtocol.class.getName())){
return RefreshUserMappingsProtocol.versionID;
} else if (protocol.equals(RefreshCallQueueProtocol.class.getName())) {
return RefreshCallQueueProtocol.versionID;
} else if (protocol.equals(GetUserMappingsProtocol.class.getName())){
return GetUserMappingsProtocol.versionID;
} else if (protocol.equals(TraceAdminProtocol.class.getName())){
return TraceAdminProtocol.versionID;
} else {
throw new IOException("Unknown protocol to name node: " + protocol);
}
}
public static final int DEFAULT_PORT = 8020;
public static final Logger LOG =
LoggerFactory.getLogger(NameNode.class.getName());
public static final Logger stateChangeLog =
LoggerFactory.getLogger("org.apache.hadoop.hdfs.StateChange");
public static final Logger blockStateChangeLog =
LoggerFactory.getLogger("BlockStateChange");
public static final HAState ACTIVE_STATE = new ActiveState();
public static final HAState STANDBY_STATE = new StandbyState();
protected FSNamesystem namesystem;
protected final Configuration conf;
protected final NamenodeRole role;
private volatile HAState state;
private final boolean haEnabled;
private final HAContext haContext;
protected final boolean allowStaleStandbyReads;
private AtomicBoolean started = new AtomicBoolean(false);
/** httpServer */
protected NameNodeHttpServer httpServer;
private Thread emptier;
/** only used for testing purposes */
protected boolean stopRequested = false;
/** Registration information of this name-node */
protected NamenodeRegistration nodeRegistration;
/** Activated plug-ins. */
private List<ServicePlugin> plugins;
private NameNodeRpcServer rpcServer;
private JvmPauseMonitor pauseMonitor;
private ObjectName nameNodeStatusBeanName;
SpanReceiverHost spanReceiverHost;
/**
* The namenode address that clients will use to access this namenode
* or the name service. For HA configurations using logical URI, it
* will be the logical address.
*/
private String clientNamenodeAddress;
/** Format a new filesystem. Destroys any filesystem that may already
* exist at this location. **/
public static void format(Configuration conf) throws IOException {
format(conf, true, true);
}
static NameNodeMetrics metrics;
private static final StartupProgress startupProgress = new StartupProgress();
/** Return the {@link FSNamesystem} object.
* @return {@link FSNamesystem} object.
*/
public FSNamesystem getNamesystem() {
return namesystem;
}
public NamenodeProtocols getRpcServer() {
return rpcServer;
}
static void initMetrics(Configuration conf, NamenodeRole role) {
metrics = NameNodeMetrics.create(conf, role);
}
public static NameNodeMetrics getNameNodeMetrics() {
return metrics;
}
/**
* Returns object used for reporting namenode startup progress.
*
* @return StartupProgress for reporting namenode startup progress
*/
public static StartupProgress getStartupProgress() {
return startupProgress;
}
/**
* Return the service name of the issued delegation token.
*
* @return The name service id in HA-mode, or the rpc address in non-HA mode
*/
public String getTokenServiceName() {
return getClientNamenodeAddress();
}
/**
* Set the namenode address that will be used by clients to access this
* namenode or name service. This needs to be called before the config
* is overriden.
*/
public void setClientNamenodeAddress(Configuration conf) {
String nnAddr = conf.get(FS_DEFAULT_NAME_KEY);
if (nnAddr == null) {
// default fs is not set.
clientNamenodeAddress = null;
return;
}
LOG.info("{} is {}", FS_DEFAULT_NAME_KEY, nnAddr);
URI nnUri = URI.create(nnAddr);
String nnHost = nnUri.getHost();
if (nnHost == null) {
clientNamenodeAddress = null;
return;
}
if (DFSUtilClient.getNameServiceIds(conf).contains(nnHost)) {
// host name is logical
clientNamenodeAddress = nnHost;
} else if (nnUri.getPort() > 0) {
// physical address with a valid port
clientNamenodeAddress = nnUri.getAuthority();
} else {
// the port is missing or 0. Figure out real bind address later.
clientNamenodeAddress = null;
return;
}
LOG.info("Clients are to use {} to access"
+ " this namenode/service.", clientNamenodeAddress );
}
/**
* Get the namenode address to be used by clients.
* @return nn address
*/
public String getClientNamenodeAddress() {
return clientNamenodeAddress;
}
public static InetSocketAddress getAddress(String address) {
return NetUtils.createSocketAddr(address, DEFAULT_PORT);
}
/**
* Set the configuration property for the service rpc address
* to address
*/
public static void setServiceAddress(Configuration conf,
String address) {
LOG.info("Setting ADDRESS {}", address);
conf.set(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, address);
}
/**
* Fetches the address for services to use when connecting to namenode
* based on the value of fallback returns null if the special
* address is not specified or returns the default namenode address
* to be used by both clients and services.
* Services here are datanodes, backup node, any non client connection
*/
public static InetSocketAddress getServiceAddress(Configuration conf,
boolean fallback) {
String addr = conf.getTrimmed(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY);
if (addr == null || addr.isEmpty()) {
return fallback ? getAddress(conf) : null;
}
return getAddress(addr);
}
public static InetSocketAddress getAddress(Configuration conf) {
URI filesystemURI = FileSystem.getDefaultUri(conf);
return getAddress(filesystemURI);
}
/**
* @return address of file system
*/
public static InetSocketAddress getAddress(URI filesystemURI) {
String authority = filesystemURI.getAuthority();
if (authority == null) {
throw new IllegalArgumentException(String.format(
"Invalid URI for NameNode address (check %s): %s has no authority.",
FileSystem.FS_DEFAULT_NAME_KEY, filesystemURI.toString()));
}
if (!HdfsConstants.HDFS_URI_SCHEME.equalsIgnoreCase(
filesystemURI.getScheme())) {
throw new IllegalArgumentException(String.format(
"Invalid URI for NameNode address (check %s): %s is not of scheme '%s'.",
FileSystem.FS_DEFAULT_NAME_KEY, filesystemURI.toString(),
HdfsConstants.HDFS_URI_SCHEME));
}
return getAddress(authority);
}
public static URI getUri(InetSocketAddress namenode) {
int port = namenode.getPort();
String portString = port == DEFAULT_PORT ? "" : (":"+port);
return URI.create(HdfsConstants.HDFS_URI_SCHEME + "://"
+ namenode.getHostName()+portString);
}
//
// Common NameNode methods implementation for the active name-node role.
//
public NamenodeRole getRole() {
return role;
}
boolean isRole(NamenodeRole that) {
return role.equals(that);
}
/**
* Given a configuration get the address of the service rpc server
* If the service rpc is not configured returns null
*/
protected InetSocketAddress getServiceRpcServerAddress(Configuration conf) {
return NameNode.getServiceAddress(conf, false);
}
protected InetSocketAddress getRpcServerAddress(Configuration conf) {
return getAddress(conf);
}
/** Given a configuration get the bind host of the service rpc server
* If the bind host is not configured returns null.
*/
protected String getServiceRpcServerBindHost(Configuration conf) {
String addr = conf.getTrimmed(DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY);
if (addr == null || addr.isEmpty()) {
return null;
}
return addr;
}
/** Given a configuration get the bind host of the client rpc server
* If the bind host is not configured returns null.
*/
protected String getRpcServerBindHost(Configuration conf) {
String addr = conf.getTrimmed(DFS_NAMENODE_RPC_BIND_HOST_KEY);
if (addr == null || addr.isEmpty()) {
return null;
}
return addr;
}
/**
* Modifies the configuration passed to contain the service rpc address setting
*/
protected void setRpcServiceServerAddress(Configuration conf,
InetSocketAddress serviceRPCAddress) {
setServiceAddress(conf, NetUtils.getHostPortString(serviceRPCAddress));
}
protected void setRpcServerAddress(Configuration conf,
InetSocketAddress rpcAddress) {
FileSystem.setDefaultUri(conf, getUri(rpcAddress));
}
protected InetSocketAddress getHttpServerAddress(Configuration conf) {
return getHttpAddress(conf);
}
/**
* HTTP server address for binding the endpoint. This method is
* for use by the NameNode and its derivatives. It may return
* a different address than the one that should be used by clients to
* connect to the NameNode. See
* {@link DFSConfigKeys#DFS_NAMENODE_HTTP_BIND_HOST_KEY}
*
* @param conf
* @return
*/
protected InetSocketAddress getHttpServerBindAddress(Configuration conf) {
InetSocketAddress bindAddress = getHttpServerAddress(conf);
// If DFS_NAMENODE_HTTP_BIND_HOST_KEY exists then it overrides the
// host name portion of DFS_NAMENODE_HTTP_ADDRESS_KEY.
final String bindHost = conf.getTrimmed(DFS_NAMENODE_HTTP_BIND_HOST_KEY);
if (bindHost != null && !bindHost.isEmpty()) {
bindAddress = new InetSocketAddress(bindHost, bindAddress.getPort());
}
return bindAddress;
}
/** @return the NameNode HTTP address. */
public static InetSocketAddress getHttpAddress(Configuration conf) {
return NetUtils.createSocketAddr(
conf.getTrimmed(DFS_NAMENODE_HTTP_ADDRESS_KEY, DFS_NAMENODE_HTTP_ADDRESS_DEFAULT));
}
protected void loadNamesystem(Configuration conf) throws IOException {
this.namesystem = FSNamesystem.loadFromDisk(conf);
}
NamenodeRegistration getRegistration() {
return nodeRegistration;
}
NamenodeRegistration setRegistration() {
nodeRegistration = new NamenodeRegistration(
NetUtils.getHostPortString(rpcServer.getRpcAddress()),
NetUtils.getHostPortString(getHttpAddress()),
getFSImage().getStorage(), getRole());
return nodeRegistration;
}
/* optimize ugi lookup for RPC operations to avoid a trip through
* UGI.getCurrentUser which is synch'ed
*/
public static UserGroupInformation getRemoteUser() throws IOException {
UserGroupInformation ugi = Server.getRemoteUser();
return (ugi != null) ? ugi : UserGroupInformation.getCurrentUser();
}
/**
* Login as the configured user for the NameNode.
*/
void loginAsNameNodeUser(Configuration conf) throws IOException {
InetSocketAddress socAddr = getRpcServerAddress(conf);
SecurityUtil.login(conf, DFS_NAMENODE_KEYTAB_FILE_KEY,
DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName());
}
/**
* Initialize name-node.
*
* @param conf the configuration
*/
protected void initialize(Configuration conf) throws IOException {
if (conf.get(HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS) == null) {
String intervals = conf.get(DFS_METRICS_PERCENTILES_INTERVALS_KEY);
if (intervals != null) {
conf.set(HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS,
intervals);
}
}
UserGroupInformation.setConfiguration(conf);
loginAsNameNodeUser(conf);
NameNode.initMetrics(conf, this.getRole());
StartupProgressMetrics.register(startupProgress);
if (NamenodeRole.NAMENODE == role) {
startHttpServer(conf);
}
this.spanReceiverHost =
SpanReceiverHost.get(conf, DFSConfigKeys.DFS_SERVER_HTRACE_PREFIX);
loadNamesystem(conf);
rpcServer = createRpcServer(conf);
if (clientNamenodeAddress == null) {
// This is expected for MiniDFSCluster. Set it now using
// the RPC server's bind address.
clientNamenodeAddress =
NetUtils.getHostPortString(rpcServer.getRpcAddress());
LOG.info("Clients are to use " + clientNamenodeAddress + " to access"
+ " this namenode/service.");
}
if (NamenodeRole.NAMENODE == role) {
httpServer.setNameNodeAddress(getNameNodeAddress());
httpServer.setFSImage(getFSImage());
}
pauseMonitor = new JvmPauseMonitor(conf);
pauseMonitor.start();
metrics.getJvmMetrics().setPauseMonitor(pauseMonitor);
startCommonServices(conf);
}
/**
* Create the RPC server implementation. Used as an extension point for the
* BackupNode.
*/
protected NameNodeRpcServer createRpcServer(Configuration conf)
throws IOException {
return new NameNodeRpcServer(conf, this);
}
/** Start the services common to active and standby states */
private void startCommonServices(Configuration conf) throws IOException {
namesystem.startCommonServices(conf, haContext);
registerNNSMXBean();
if (NamenodeRole.NAMENODE != role) {
startHttpServer(conf);
httpServer.setNameNodeAddress(getNameNodeAddress());
httpServer.setFSImage(getFSImage());
}
rpcServer.start();
plugins = conf.getInstances(DFS_NAMENODE_PLUGINS_KEY,
ServicePlugin.class);
for (ServicePlugin p: plugins) {
try {
p.start(this);
} catch (Throwable t) {
LOG.warn("ServicePlugin " + p + " could not be started", t);
}
}
LOG.info(getRole() + " RPC up at: " + rpcServer.getRpcAddress());
if (rpcServer.getServiceRpcAddress() != null) {
LOG.info(getRole() + " service RPC up at: "
+ rpcServer.getServiceRpcAddress());
}
}
private void stopCommonServices() {
if(rpcServer != null) rpcServer.stop();
if(namesystem != null) namesystem.close();
if (pauseMonitor != null) pauseMonitor.stop();
if (plugins != null) {
for (ServicePlugin p : plugins) {
try {
p.stop();
} catch (Throwable t) {
LOG.warn("ServicePlugin " + p + " could not be stopped", t);
}
}
}
stopHttpServer();
}
private void startTrashEmptier(final Configuration conf) throws IOException {
long trashInterval =
conf.getLong(FS_TRASH_INTERVAL_KEY, FS_TRASH_INTERVAL_DEFAULT);
if (trashInterval == 0) {
return;
} else if (trashInterval < 0) {
throw new IOException("Cannot start trash emptier with negative interval."
+ " Set " + FS_TRASH_INTERVAL_KEY + " to a positive value.");
}
// This may be called from the transitionToActive code path, in which
// case the current user is the administrator, not the NN. The trash
// emptier needs to run as the NN. See HDFS-3972.
FileSystem fs = SecurityUtil.doAsLoginUser(
new PrivilegedExceptionAction<FileSystem>() {
@Override
public FileSystem run() throws IOException {
return FileSystem.get(conf);
}
});
this.emptier = new Thread(new Trash(fs, conf).getEmptier(), "Trash Emptier");
this.emptier.setDaemon(true);
this.emptier.start();
}
private void stopTrashEmptier() {
if (this.emptier != null) {
emptier.interrupt();
emptier = null;
}
}
private void startHttpServer(final Configuration conf) throws IOException {
httpServer = new NameNodeHttpServer(conf, this, getHttpServerBindAddress(conf));
httpServer.start();
httpServer.setStartupProgress(startupProgress);
}
private void stopHttpServer() {
try {
if (httpServer != null) httpServer.stop();
} catch (Exception e) {
LOG.error("Exception while stopping httpserver", e);
}
}
/**
* Start NameNode.
* <p>
* The name-node can be started with one of the following startup options:
* <ul>
* <li>{@link StartupOption#REGULAR REGULAR} - normal name node startup</li>
* <li>{@link StartupOption#FORMAT FORMAT} - format name node</li>
* <li>{@link StartupOption#BACKUP BACKUP} - start backup node</li>
* <li>{@link StartupOption#CHECKPOINT CHECKPOINT} - start checkpoint node</li>
* <li>{@link StartupOption#UPGRADE UPGRADE} - start the cluster
* <li>{@link StartupOption#UPGRADEONLY UPGRADEONLY} - upgrade the cluster
* upgrade and create a snapshot of the current file system state</li>
* <li>{@link StartupOption#RECOVER RECOVERY} - recover name node
* metadata</li>
* <li>{@link StartupOption#ROLLBACK ROLLBACK} - roll the
* cluster back to the previous state</li>
* <li>{@link StartupOption#FINALIZE FINALIZE} - finalize
* previous upgrade</li>
* <li>{@link StartupOption#IMPORT IMPORT} - import checkpoint</li>
* </ul>
* The option is passed via configuration field:
* <tt>dfs.namenode.startup</tt>
*
* The conf will be modified to reflect the actual ports on which
* the NameNode is up and running if the user passes the port as
* <code>zero</code> in the conf.
*
* @param conf confirguration
* @throws IOException
*/
public NameNode(Configuration conf) throws IOException {
this(conf, NamenodeRole.NAMENODE);
}
protected NameNode(Configuration conf, NamenodeRole role)
throws IOException {
this.conf = conf;
this.role = role;
setClientNamenodeAddress(conf);
String nsId = getNameServiceId(conf);
String namenodeId = HAUtil.getNameNodeId(conf, nsId);
this.haEnabled = HAUtil.isHAEnabled(conf, nsId);
state = createHAState(getStartupOption(conf));
this.allowStaleStandbyReads = HAUtil.shouldAllowStandbyReads(conf);
this.haContext = createHAContext();
try {
initializeGenericKeys(conf, nsId, namenodeId);
initialize(conf);
try {
haContext.writeLock();
state.prepareToEnterState(haContext);
state.enterState(haContext);
} finally {
haContext.writeUnlock();
}
} catch (IOException e) {
this.stop();
throw e;
} catch (HadoopIllegalArgumentException e) {
this.stop();
throw e;
}
this.started.set(true);
}
protected HAState createHAState(StartupOption startOpt) {
if (!haEnabled || startOpt == StartupOption.UPGRADE
|| startOpt == StartupOption.UPGRADEONLY) {
return ACTIVE_STATE;
} else {
return STANDBY_STATE;
}
}
protected HAContext createHAContext() {
return new NameNodeHAContext();
}
/**
* Wait for service to finish.
* (Normally, it runs forever.)
*/
public void join() {
try {
rpcServer.join();
} catch (InterruptedException ie) {
LOG.info("Caught interrupted exception ", ie);
}
}
/**
* Stop all NameNode threads and wait for all to finish.
*/
public void stop() {
synchronized(this) {
if (stopRequested)
return;
stopRequested = true;
}
try {
if (state != null) {
state.exitState(haContext);
}
} catch (ServiceFailedException e) {
LOG.warn("Encountered exception while exiting state ", e);
} finally {
stopCommonServices();
if (metrics != null) {
metrics.shutdown();
}
if (namesystem != null) {
namesystem.shutdown();
}
if (nameNodeStatusBeanName != null) {
MBeans.unregister(nameNodeStatusBeanName);
nameNodeStatusBeanName = null;
}
if (this.spanReceiverHost != null) {
this.spanReceiverHost.closeReceivers();
}
}
}
synchronized boolean isStopRequested() {
return stopRequested;
}
/**
* Is the cluster currently in safe mode?
*/
public boolean isInSafeMode() {
return namesystem.isInSafeMode();
}
/** get FSImage */
@VisibleForTesting
public FSImage getFSImage() {
return namesystem.getFSImage();
}
/**
* @return NameNode RPC address
*/
public InetSocketAddress getNameNodeAddress() {
return rpcServer.getRpcAddress();
}
/**
* @return NameNode RPC address in "host:port" string form
*/
public String getNameNodeAddressHostPortString() {
return NetUtils.getHostPortString(rpcServer.getRpcAddress());
}
/**
* @return NameNode service RPC address if configured, the
* NameNode RPC address otherwise
*/
public InetSocketAddress getServiceRpcAddress() {
final InetSocketAddress serviceAddr = rpcServer.getServiceRpcAddress();
return serviceAddr == null ? rpcServer.getRpcAddress() : serviceAddr;
}
/**
* @return NameNode HTTP address, used by the Web UI, image transfer,
* and HTTP-based file system clients like Hftp and WebHDFS
*/
public InetSocketAddress getHttpAddress() {
return httpServer.getHttpAddress();
}
/**
* @return NameNode HTTPS address, used by the Web UI, image transfer,
* and HTTP-based file system clients like Hftp and WebHDFS
*/
public InetSocketAddress getHttpsAddress() {
return httpServer.getHttpsAddress();
}
/**
* Verify that configured directories exist, then
* Interactively confirm that formatting is desired
* for each existing directory and format them.
*
* @param conf configuration to use
* @param force if true, format regardless of whether dirs exist
* @return true if formatting was aborted, false otherwise
* @throws IOException
*/
private static boolean format(Configuration conf, boolean force,
boolean isInteractive) throws IOException {
String nsId = DFSUtil.getNamenodeNameServiceId(conf);
String namenodeId = HAUtil.getNameNodeId(conf, nsId);
initializeGenericKeys(conf, nsId, namenodeId);
checkAllowFormat(conf);
if (UserGroupInformation.isSecurityEnabled()) {
InetSocketAddress socAddr = getAddress(conf);
SecurityUtil.login(conf, DFS_NAMENODE_KEYTAB_FILE_KEY,
DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName());
}
Collection<URI> nameDirsToFormat = FSNamesystem.getNamespaceDirs(conf);
List<URI> sharedDirs = FSNamesystem.getSharedEditsDirs(conf);
List<URI> dirsToPrompt = new ArrayList<URI>();
dirsToPrompt.addAll(nameDirsToFormat);
dirsToPrompt.addAll(sharedDirs);
List<URI> editDirsToFormat =
FSNamesystem.getNamespaceEditsDirs(conf);
// if clusterID is not provided - see if you can find the current one
String clusterId = StartupOption.FORMAT.getClusterId();
if(clusterId == null || clusterId.equals("")) {
//Generate a new cluster id
clusterId = NNStorage.newClusterID();
}
System.out.println("Formatting using clusterid: " + clusterId);
FSImage fsImage = new FSImage(conf, nameDirsToFormat, editDirsToFormat);
try {
FSNamesystem fsn = new FSNamesystem(conf, fsImage);
fsImage.getEditLog().initJournalsForWrite();
if (!fsImage.confirmFormat(force, isInteractive)) {
return true; // aborted
}
fsImage.format(fsn, clusterId);
} catch (IOException ioe) {
LOG.warn("Encountered exception during format: ", ioe);
fsImage.close();
throw ioe;
}
return false;
}
public static void checkAllowFormat(Configuration conf) throws IOException {
if (!conf.getBoolean(DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY,
DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_DEFAULT)) {
throw new IOException("The option " + DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY
+ " is set to false for this filesystem, so it "
+ "cannot be formatted. You will need to set "
+ DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY +" parameter "
+ "to true in order to format this filesystem");
}
}
@VisibleForTesting
public static boolean initializeSharedEdits(Configuration conf) throws IOException {
return initializeSharedEdits(conf, true);
}
@VisibleForTesting
public static boolean initializeSharedEdits(Configuration conf,
boolean force) throws IOException {
return initializeSharedEdits(conf, force, false);
}
/**
* Clone the supplied configuration but remove the shared edits dirs.
*
* @param conf Supplies the original configuration.
* @return Cloned configuration without the shared edit dirs.
* @throws IOException on failure to generate the configuration.
*/
private static Configuration getConfigurationWithoutSharedEdits(
Configuration conf)
throws IOException {
List<URI> editsDirs = FSNamesystem.getNamespaceEditsDirs(conf, false);
String editsDirsString = Joiner.on(",").join(editsDirs);
Configuration confWithoutShared = new Configuration(conf);
confWithoutShared.unset(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY);
confWithoutShared.setStrings(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
editsDirsString);
return confWithoutShared;
}
/**
* Format a new shared edits dir and copy in enough edit log segments so that
* the standby NN can start up.
*
* @param conf configuration
* @param force format regardless of whether or not the shared edits dir exists
* @param interactive prompt the user when a dir exists
* @return true if the command aborts, false otherwise
*/
private static boolean initializeSharedEdits(Configuration conf,
boolean force, boolean interactive) throws IOException {
String nsId = DFSUtil.getNamenodeNameServiceId(conf);
String namenodeId = HAUtil.getNameNodeId(conf, nsId);
initializeGenericKeys(conf, nsId, namenodeId);
if (conf.get(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY) == null) {
LOG.error("No shared edits directory configured for namespace " +
nsId + " namenode " + namenodeId);
return false;
}
if (UserGroupInformation.isSecurityEnabled()) {
InetSocketAddress socAddr = getAddress(conf);
SecurityUtil.login(conf, DFS_NAMENODE_KEYTAB_FILE_KEY,
DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName());
}
NNStorage existingStorage = null;
FSImage sharedEditsImage = null;
try {
FSNamesystem fsns =
FSNamesystem.loadFromDisk(getConfigurationWithoutSharedEdits(conf));
existingStorage = fsns.getFSImage().getStorage();
NamespaceInfo nsInfo = existingStorage.getNamespaceInfo();
List<URI> sharedEditsDirs = FSNamesystem.getSharedEditsDirs(conf);
sharedEditsImage = new FSImage(conf,
Lists.<URI>newArrayList(),
sharedEditsDirs);
sharedEditsImage.getEditLog().initJournalsForWrite();
if (!sharedEditsImage.confirmFormat(force, interactive)) {
return true; // abort
}
NNStorage newSharedStorage = sharedEditsImage.getStorage();
// Call Storage.format instead of FSImage.format here, since we don't
// actually want to save a checkpoint - just prime the dirs with
// the existing namespace info
newSharedStorage.format(nsInfo);
sharedEditsImage.getEditLog().formatNonFileJournals(nsInfo);
// Need to make sure the edit log segments are in good shape to initialize
// the shared edits dir.
fsns.getFSImage().getEditLog().close();
fsns.getFSImage().getEditLog().initJournalsForWrite();
fsns.getFSImage().getEditLog().recoverUnclosedStreams();
copyEditLogSegmentsToSharedDir(fsns, sharedEditsDirs, newSharedStorage,
conf);
} catch (IOException ioe) {
LOG.error("Could not initialize shared edits dir", ioe);
return true; // aborted
} finally {
if (sharedEditsImage != null) {
try {
sharedEditsImage.close();
} catch (IOException ioe) {
LOG.warn("Could not close sharedEditsImage", ioe);
}
}
// Have to unlock storage explicitly for the case when we're running in a
// unit test, which runs in the same JVM as NNs.
if (existingStorage != null) {
try {
existingStorage.unlockAll();
} catch (IOException ioe) {
LOG.warn("Could not unlock storage directories", ioe);
return true; // aborted
}
}
}
return false; // did not abort
}
private static void copyEditLogSegmentsToSharedDir(FSNamesystem fsns,
Collection<URI> sharedEditsDirs, NNStorage newSharedStorage,
Configuration conf) throws IOException {
Preconditions.checkArgument(!sharedEditsDirs.isEmpty(),
"No shared edits specified");
// Copy edit log segments into the new shared edits dir.
List<URI> sharedEditsUris = new ArrayList<URI>(sharedEditsDirs);
FSEditLog newSharedEditLog = new FSEditLog(conf, newSharedStorage,
sharedEditsUris);
newSharedEditLog.initJournalsForWrite();
newSharedEditLog.recoverUnclosedStreams();
FSEditLog sourceEditLog = fsns.getFSImage().editLog;
long fromTxId = fsns.getFSImage().getMostRecentCheckpointTxId();
Collection<EditLogInputStream> streams = null;
try {
streams = sourceEditLog.selectInputStreams(fromTxId + 1, 0);
// Set the nextTxid to the CheckpointTxId+1
newSharedEditLog.setNextTxId(fromTxId + 1);
// Copy all edits after last CheckpointTxId to shared edits dir
for (EditLogInputStream stream : streams) {
LOG.debug("Beginning to copy stream " + stream + " to shared edits");
FSEditLogOp op;
boolean segmentOpen = false;
while ((op = stream.readOp()) != null) {
if (LOG.isTraceEnabled()) {
LOG.trace("copying op: " + op);
}
if (!segmentOpen) {
newSharedEditLog.startLogSegment(op.txid, false,
fsns.getEffectiveLayoutVersion());
segmentOpen = true;
}
newSharedEditLog.logEdit(op);
if (op.opCode == FSEditLogOpCodes.OP_END_LOG_SEGMENT) {
newSharedEditLog.logSync();
newSharedEditLog.endCurrentLogSegment(false);
LOG.debug("ending log segment because of END_LOG_SEGMENT op in "
+ stream);
segmentOpen = false;
}
}
if (segmentOpen) {
LOG.debug("ending log segment because of end of stream in " + stream);
newSharedEditLog.logSync();
newSharedEditLog.endCurrentLogSegment(false);
segmentOpen = false;
}
}
} finally {
if (streams != null) {
FSEditLog.closeAllStreams(streams);
}
}
}
@VisibleForTesting
public static boolean doRollback(Configuration conf,
boolean isConfirmationNeeded) throws IOException {
String nsId = DFSUtil.getNamenodeNameServiceId(conf);
String namenodeId = HAUtil.getNameNodeId(conf, nsId);
initializeGenericKeys(conf, nsId, namenodeId);
FSNamesystem nsys = new FSNamesystem(conf, new FSImage(conf));
System.err.print(
"\"rollBack\" will remove the current state of the file system,\n"
+ "returning you to the state prior to initiating your recent.\n"
+ "upgrade. This action is permanent and cannot be undone. If you\n"
+ "are performing a rollback in an HA environment, you should be\n"
+ "certain that no NameNode process is running on any host.");
if (isConfirmationNeeded) {
if (!confirmPrompt("Roll back file system state?")) {
System.err.println("Rollback aborted.");
return true;
}
}
nsys.getFSImage().doRollback(nsys);
return false;
}
private static void printUsage(PrintStream out) {
out.println(USAGE + "\n");
}
@VisibleForTesting
static StartupOption parseArguments(String args[]) {
int argsLen = (args == null) ? 0 : args.length;
StartupOption startOpt = StartupOption.REGULAR;
for(int i=0; i < argsLen; i++) {
String cmd = args[i];
if (StartupOption.FORMAT.getName().equalsIgnoreCase(cmd)) {
startOpt = StartupOption.FORMAT;
for (i = i + 1; i < argsLen; i++) {
if (args[i].equalsIgnoreCase(StartupOption.CLUSTERID.getName())) {
i++;
if (i >= argsLen) {
// if no cluster id specified, return null
LOG.error("Must specify a valid cluster ID after the "
+ StartupOption.CLUSTERID.getName() + " flag");
return null;
}
String clusterId = args[i];
// Make sure an id is specified and not another flag
if (clusterId.isEmpty() ||
clusterId.equalsIgnoreCase(StartupOption.FORCE.getName()) ||
clusterId.equalsIgnoreCase(
StartupOption.NONINTERACTIVE.getName())) {
LOG.error("Must specify a valid cluster ID after the "
+ StartupOption.CLUSTERID.getName() + " flag");
return null;
}
startOpt.setClusterId(clusterId);
}
if (args[i].equalsIgnoreCase(StartupOption.FORCE.getName())) {
startOpt.setForceFormat(true);
}
if (args[i].equalsIgnoreCase(StartupOption.NONINTERACTIVE.getName())) {
startOpt.setInteractiveFormat(false);
}
}
} else if (StartupOption.GENCLUSTERID.getName().equalsIgnoreCase(cmd)) {
startOpt = StartupOption.GENCLUSTERID;
} else if (StartupOption.REGULAR.getName().equalsIgnoreCase(cmd)) {
startOpt = StartupOption.REGULAR;
} else if (StartupOption.BACKUP.getName().equalsIgnoreCase(cmd)) {
startOpt = StartupOption.BACKUP;
} else if (StartupOption.CHECKPOINT.getName().equalsIgnoreCase(cmd)) {
startOpt = StartupOption.CHECKPOINT;
} else if (StartupOption.UPGRADE.getName().equalsIgnoreCase(cmd)
|| StartupOption.UPGRADEONLY.getName().equalsIgnoreCase(cmd)) {
startOpt = StartupOption.UPGRADE.getName().equalsIgnoreCase(cmd) ?
StartupOption.UPGRADE : StartupOption.UPGRADEONLY;
/* Can be followed by CLUSTERID with a required parameter or
* RENAMERESERVED with an optional parameter
*/
while (i + 1 < argsLen) {
String flag = args[i + 1];
if (flag.equalsIgnoreCase(StartupOption.CLUSTERID.getName())) {
if (i + 2 < argsLen) {
i += 2;
startOpt.setClusterId(args[i]);
} else {
LOG.error("Must specify a valid cluster ID after the "
+ StartupOption.CLUSTERID.getName() + " flag");
return null;
}
} else if (flag.equalsIgnoreCase(StartupOption.RENAMERESERVED
.getName())) {
if (i + 2 < argsLen) {
FSImageFormat.setRenameReservedPairs(args[i + 2]);
i += 2;
} else {
FSImageFormat.useDefaultRenameReservedPairs();
i += 1;
}
} else {
LOG.error("Unknown upgrade flag " + flag);
return null;
}
}
} else if (StartupOption.ROLLINGUPGRADE.getName().equalsIgnoreCase(cmd)) {
startOpt = StartupOption.ROLLINGUPGRADE;
++i;
if (i >= argsLen) {
LOG.error("Must specify a rolling upgrade startup option "
+ RollingUpgradeStartupOption.getAllOptionString());
return null;
}
startOpt.setRollingUpgradeStartupOption(args[i]);
} else if (StartupOption.ROLLBACK.getName().equalsIgnoreCase(cmd)) {
startOpt = StartupOption.ROLLBACK;
} else if (StartupOption.FINALIZE.getName().equalsIgnoreCase(cmd)) {
startOpt = StartupOption.FINALIZE;
} else if (StartupOption.IMPORT.getName().equalsIgnoreCase(cmd)) {
startOpt = StartupOption.IMPORT;
} else if (StartupOption.BOOTSTRAPSTANDBY.getName().equalsIgnoreCase(cmd)) {
startOpt = StartupOption.BOOTSTRAPSTANDBY;
return startOpt;
} else if (StartupOption.INITIALIZESHAREDEDITS.getName().equalsIgnoreCase(cmd)) {
startOpt = StartupOption.INITIALIZESHAREDEDITS;
for (i = i + 1 ; i < argsLen; i++) {
if (StartupOption.NONINTERACTIVE.getName().equals(args[i])) {
startOpt.setInteractiveFormat(false);
} else if (StartupOption.FORCE.getName().equals(args[i])) {
startOpt.setForceFormat(true);
} else {
LOG.error("Invalid argument: " + args[i]);
return null;
}
}
return startOpt;
} else if (StartupOption.RECOVER.getName().equalsIgnoreCase(cmd)) {
if (startOpt != StartupOption.REGULAR) {
throw new RuntimeException("Can't combine -recover with " +
"other startup options.");
}
startOpt = StartupOption.RECOVER;
while (++i < argsLen) {
if (args[i].equalsIgnoreCase(
StartupOption.FORCE.getName())) {
startOpt.setForce(MetaRecoveryContext.FORCE_FIRST_CHOICE);
} else {
throw new RuntimeException("Error parsing recovery options: " +
"can't understand option \"" + args[i] + "\"");
}
}
} else if (StartupOption.METADATAVERSION.getName().equalsIgnoreCase(cmd)) {
startOpt = StartupOption.METADATAVERSION;
} else {
return null;
}
}
return startOpt;
}
private static void setStartupOption(Configuration conf, StartupOption opt) {
conf.set(DFS_NAMENODE_STARTUP_KEY, opt.name());
}
static StartupOption getStartupOption(Configuration conf) {
return StartupOption.valueOf(conf.get(DFS_NAMENODE_STARTUP_KEY,
StartupOption.REGULAR.toString()));
}
private static void doRecovery(StartupOption startOpt, Configuration conf)
throws IOException {
String nsId = DFSUtil.getNamenodeNameServiceId(conf);
String namenodeId = HAUtil.getNameNodeId(conf, nsId);
initializeGenericKeys(conf, nsId, namenodeId);
if (startOpt.getForce() < MetaRecoveryContext.FORCE_ALL) {
if (!confirmPrompt("You have selected Metadata Recovery mode. " +
"This mode is intended to recover lost metadata on a corrupt " +
"filesystem. Metadata recovery mode often permanently deletes " +
"data from your HDFS filesystem. Please back up your edit log " +
"and fsimage before trying this!\n\n" +
"Are you ready to proceed? (Y/N)\n")) {
System.err.println("Recovery aborted at user request.\n");
return;
}
}
MetaRecoveryContext.LOG.info("starting recovery...");
UserGroupInformation.setConfiguration(conf);
NameNode.initMetrics(conf, startOpt.toNodeRole());
FSNamesystem fsn = null;
try {
fsn = FSNamesystem.loadFromDisk(conf);
fsn.getFSImage().saveNamespace(fsn);
MetaRecoveryContext.LOG.info("RECOVERY COMPLETE");
} catch (IOException e) {
MetaRecoveryContext.LOG.info("RECOVERY FAILED: caught exception", e);
throw e;
} catch (RuntimeException e) {
MetaRecoveryContext.LOG.info("RECOVERY FAILED: caught exception", e);
throw e;
} finally {
if (fsn != null)
fsn.close();
}
}
/**
* Verify that configured directories exist, then print the metadata versions
* of the software and the image.
*
* @param conf configuration to use
* @throws IOException
*/
private static boolean printMetadataVersion(Configuration conf)
throws IOException {
final String nsId = DFSUtil.getNamenodeNameServiceId(conf);
final String namenodeId = HAUtil.getNameNodeId(conf, nsId);
NameNode.initializeGenericKeys(conf, nsId, namenodeId);
final FSImage fsImage = new FSImage(conf);
final FSNamesystem fs = new FSNamesystem(conf, fsImage, false);
return fsImage.recoverTransitionRead(
StartupOption.METADATAVERSION, fs, null);
}
public static NameNode createNameNode(String argv[], Configuration conf)
throws IOException {
LOG.info("createNameNode " + Arrays.asList(argv));
if (conf == null)
conf = new HdfsConfiguration();
StartupOption startOpt = parseArguments(argv);
if (startOpt == null) {
printUsage(System.err);
return null;
}
setStartupOption(conf, startOpt);
switch (startOpt) {
case FORMAT: {
boolean aborted = format(conf, startOpt.getForceFormat(),
startOpt.getInteractiveFormat());
terminate(aborted ? 1 : 0);
return null; // avoid javac warning
}
case GENCLUSTERID: {
System.err.println("Generating new cluster id:");
System.out.println(NNStorage.newClusterID());
terminate(0);
return null;
}
case FINALIZE: {
System.err.println("Use of the argument '" + StartupOption.FINALIZE +
"' is no longer supported. To finalize an upgrade, start the NN " +
" and then run `hdfs dfsadmin -finalizeUpgrade'");
terminate(1);
return null; // avoid javac warning
}
case ROLLBACK: {
boolean aborted = doRollback(conf, true);
terminate(aborted ? 1 : 0);
return null; // avoid warning
}
case BOOTSTRAPSTANDBY: {
String toolArgs[] = Arrays.copyOfRange(argv, 1, argv.length);
int rc = BootstrapStandby.run(toolArgs, conf);
terminate(rc);
return null; // avoid warning
}
case INITIALIZESHAREDEDITS: {
boolean aborted = initializeSharedEdits(conf,
startOpt.getForceFormat(),
startOpt.getInteractiveFormat());
terminate(aborted ? 1 : 0);
return null; // avoid warning
}
case BACKUP:
case CHECKPOINT: {
NamenodeRole role = startOpt.toNodeRole();
DefaultMetricsSystem.initialize(role.toString().replace(" ", ""));
return new BackupNode(conf, role);
}
case RECOVER: {
NameNode.doRecovery(startOpt, conf);
return null;
}
case METADATAVERSION: {
printMetadataVersion(conf);
terminate(0);
return null; // avoid javac warning
}
case UPGRADEONLY: {
DefaultMetricsSystem.initialize("NameNode");
new NameNode(conf);
terminate(0);
return null;
}
default: {
DefaultMetricsSystem.initialize("NameNode");
return new NameNode(conf);
}
}
}
/**
* In federation configuration is set for a set of
* namenode and secondary namenode/backup/checkpointer, which are
* grouped under a logical nameservice ID. The configuration keys specific
* to them have suffix set to configured nameserviceId.
*
* This method copies the value from specific key of format key.nameserviceId
* to key, to set up the generic configuration. Once this is done, only
* generic version of the configuration is read in rest of the code, for
* backward compatibility and simpler code changes.
*
* @param conf
* Configuration object to lookup specific key and to set the value
* to the key passed. Note the conf object is modified
* @param nameserviceId name service Id (to distinguish federated NNs)
* @param namenodeId the namenode ID (to distinguish HA NNs)
* @see DFSUtil#setGenericConf(Configuration, String, String, String...)
*/
public static void initializeGenericKeys(Configuration conf,
String nameserviceId, String namenodeId) {
if ((nameserviceId != null && !nameserviceId.isEmpty()) ||
(namenodeId != null && !namenodeId.isEmpty())) {
if (nameserviceId != null) {
conf.set(DFS_NAMESERVICE_ID, nameserviceId);
}
if (namenodeId != null) {
conf.set(DFS_HA_NAMENODE_ID_KEY, namenodeId);
}
DFSUtil.setGenericConf(conf, nameserviceId, namenodeId,
NAMENODE_SPECIFIC_KEYS);
DFSUtil.setGenericConf(conf, nameserviceId, null,
NAMESERVICE_SPECIFIC_KEYS);
}
// If the RPC address is set use it to (re-)configure the default FS
if (conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY) != null) {
URI defaultUri = URI.create(HdfsConstants.HDFS_URI_SCHEME + "://"
+ conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY));
conf.set(FS_DEFAULT_NAME_KEY, defaultUri.toString());
if (LOG.isDebugEnabled()) {
LOG.debug("Setting " + FS_DEFAULT_NAME_KEY + " to " + defaultUri.toString());
}
}
}
/**
* Get the name service Id for the node
* @return name service Id or null if federation is not configured
*/
protected String getNameServiceId(Configuration conf) {
return DFSUtil.getNamenodeNameServiceId(conf);
}
/**
*/
public static void main(String argv[]) throws Exception {
if (DFSUtil.parseHelpArgument(argv, NameNode.USAGE, System.out, true)) {
System.exit(0);
}
try {
StringUtils.startupShutdownMessage(NameNode.class, argv, LOG);
NameNode namenode = createNameNode(argv, null);
if (namenode != null) {
namenode.join();
}
} catch (Throwable e) {
LOG.error("Failed to start namenode.", e);
terminate(1, e);
}
}
synchronized void monitorHealth()
throws HealthCheckFailedException, AccessControlException {
namesystem.checkSuperuserPrivilege();
if (!haEnabled) {
return; // no-op, if HA is not enabled
}
getNamesystem().checkAvailableResources();
if (!getNamesystem().nameNodeHasResourcesAvailable()) {
throw new HealthCheckFailedException(
"The NameNode has no resources available");
}
}
synchronized void transitionToActive()
throws ServiceFailedException, AccessControlException {
namesystem.checkSuperuserPrivilege();
if (!haEnabled) {
throw new ServiceFailedException("HA for namenode is not enabled");
}
state.setState(haContext, ACTIVE_STATE);
}
synchronized void transitionToStandby()
throws ServiceFailedException, AccessControlException {
namesystem.checkSuperuserPrivilege();
if (!haEnabled) {
throw new ServiceFailedException("HA for namenode is not enabled");
}
state.setState(haContext, STANDBY_STATE);
}
synchronized HAServiceStatus getServiceStatus()
throws ServiceFailedException, AccessControlException {
namesystem.checkSuperuserPrivilege();
if (!haEnabled) {
throw new ServiceFailedException("HA for namenode is not enabled");
}
if (state == null) {
return new HAServiceStatus(HAServiceState.INITIALIZING);
}
HAServiceState retState = state.getServiceState();
HAServiceStatus ret = new HAServiceStatus(retState);
if (retState == HAServiceState.STANDBY) {
String safemodeTip = namesystem.getSafeModeTip();
if (!safemodeTip.isEmpty()) {
ret.setNotReadyToBecomeActive(
"The NameNode is in safemode. " +
safemodeTip);
} else {
ret.setReadyToBecomeActive();
}
} else if (retState == HAServiceState.ACTIVE) {
ret.setReadyToBecomeActive();
} else {
ret.setNotReadyToBecomeActive("State is " + state);
}
return ret;
}
synchronized HAServiceState getServiceState() {
if (state == null) {
return HAServiceState.INITIALIZING;
}
return state.getServiceState();
}
/**
* Register NameNodeStatusMXBean
*/
private void registerNNSMXBean() {
nameNodeStatusBeanName = MBeans.register("NameNode", "NameNodeStatus", this);
}
@Override // NameNodeStatusMXBean
public String getNNRole() {
String roleStr = "";
NamenodeRole role = getRole();
if (null != role) {
roleStr = role.toString();
}
return roleStr;
}
@Override // NameNodeStatusMXBean
public String getState() {
String servStateStr = "";
HAServiceState servState = getServiceState();
if (null != servState) {
servStateStr = servState.toString();
}
return servStateStr;
}
@Override // NameNodeStatusMXBean
public String getHostAndPort() {
return getNameNodeAddressHostPortString();
}
@Override // NameNodeStatusMXBean
public boolean isSecurityEnabled() {
return UserGroupInformation.isSecurityEnabled();
}
@Override // NameNodeStatusMXBean
public long getLastHATransitionTime() {
return state.getLastHATransitionTime();
}
/**
* Shutdown the NN immediately in an ungraceful way. Used when it would be
* unsafe for the NN to continue operating, e.g. during a failed HA state
* transition.
*
* @param t exception which warrants the shutdown. Printed to the NN log
* before exit.
* @throws ExitException thrown only for testing.
*/
protected synchronized void doImmediateShutdown(Throwable t)
throws ExitException {
String message = "Error encountered requiring NN shutdown. " +
"Shutting down immediately.";
try {
LOG.error(message, t);
} catch (Throwable ignored) {
// This is unlikely to happen, but there's nothing we can do if it does.
}
terminate(1, t);
}
/**
* Class used to expose {@link NameNode} as context to {@link HAState}
*/
protected class NameNodeHAContext implements HAContext {
@Override
public void setState(HAState s) {
state = s;
}
@Override
public HAState getState() {
return state;
}
@Override
public void startActiveServices() throws IOException {
try {
namesystem.startActiveServices();
startTrashEmptier(conf);
} catch (Throwable t) {
doImmediateShutdown(t);
}
}
@Override
public void stopActiveServices() throws IOException {
try {
if (namesystem != null) {
namesystem.stopActiveServices();
}
stopTrashEmptier();
} catch (Throwable t) {
doImmediateShutdown(t);
}
}
@Override
public void startStandbyServices() throws IOException {
try {
namesystem.startStandbyServices(conf);
} catch (Throwable t) {
doImmediateShutdown(t);
}
}
@Override
public void prepareToStopStandbyServices() throws ServiceFailedException {
try {
namesystem.prepareToStopStandbyServices();
} catch (Throwable t) {
doImmediateShutdown(t);
}
}
@Override
public void stopStandbyServices() throws IOException {
try {
if (namesystem != null) {
namesystem.stopStandbyServices();
}
} catch (Throwable t) {
doImmediateShutdown(t);
}
}
@Override
public void writeLock() {
namesystem.writeLock();
namesystem.lockRetryCache();
}
@Override
public void writeUnlock() {
namesystem.unlockRetryCache();
namesystem.writeUnlock();
}
/** Check if an operation of given category is allowed */
@Override
public void checkOperation(final OperationCategory op)
throws StandbyException {
state.checkOperation(haContext, op);
}
@Override
public boolean allowStaleReads() {
return allowStaleStandbyReads;
}
}
public boolean isStandbyState() {
return (state.equals(STANDBY_STATE));
}
public boolean isActiveState() {
return (state.equals(ACTIVE_STATE));
}
/**
* Returns whether the NameNode is completely started
*/
boolean isStarted() {
return this.started.get();
}
/**
* Check that a request to change this node's HA state is valid.
* In particular, verifies that, if auto failover is enabled, non-forced
* requests from the HAAdmin CLI are rejected, and vice versa.
*
* @param req the request to check
* @throws AccessControlException if the request is disallowed
*/
void checkHaStateChange(StateChangeRequestInfo req)
throws AccessControlException {
boolean autoHaEnabled = conf.getBoolean(DFS_HA_AUTO_FAILOVER_ENABLED_KEY,
DFS_HA_AUTO_FAILOVER_ENABLED_DEFAULT);
switch (req.getSource()) {
case REQUEST_BY_USER:
if (autoHaEnabled) {
throw new AccessControlException(
"Manual HA control for this NameNode is disallowed, because " +
"automatic HA is enabled.");
}
break;
case REQUEST_BY_USER_FORCED:
if (autoHaEnabled) {
LOG.warn("Allowing manual HA control from " +
Server.getRemoteAddress() +
" even though automatic HA is enabled, because the user " +
"specified the force flag");
}
break;
case REQUEST_BY_ZKFC:
if (!autoHaEnabled) {
throw new AccessControlException(
"Request from ZK failover controller at " +
Server.getRemoteAddress() + " denied since automatic HA " +
"is not enabled");
}
break;
}
}
}
| 67,398 | 35.609995 | 103 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import com.google.common.base.Preconditions;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class ContentSummaryComputationContext {
private FSDirectory dir = null;
private FSNamesystem fsn = null;
private BlockStoragePolicySuite bsps = null;
private ContentCounts counts = null;
private long nextCountLimit = 0;
private long limitPerRun = 0;
private long yieldCount = 0;
private long sleepMilliSec = 0;
private int sleepNanoSec = 0;
/**
* Constructor
*
* @param dir The FSDirectory instance
* @param fsn The FSNamesystem instance
* @param limitPerRun allowed number of operations in one
* locking period. 0 or a negative number means
* no limit (i.e. no yielding)
*/
public ContentSummaryComputationContext(FSDirectory dir,
FSNamesystem fsn, long limitPerRun, long sleepMicroSec) {
this.dir = dir;
this.fsn = fsn;
this.limitPerRun = limitPerRun;
this.nextCountLimit = limitPerRun;
this.counts = new ContentCounts.Builder().build();
this.sleepMilliSec = sleepMicroSec/1000;
this.sleepNanoSec = (int)((sleepMicroSec%1000)*1000);
}
/** Constructor for blocking computation. */
public ContentSummaryComputationContext(BlockStoragePolicySuite bsps) {
this(null, null, 0, 1000);
this.bsps = bsps;
}
/** Return current yield count */
public long getYieldCount() {
return yieldCount;
}
/**
* Relinquish locks held during computation for a short while
* and reacquire them. This will give other threads a chance
* to acquire the contended locks and run.
*
* @return true if locks were released and reacquired.
*/
public boolean yield() {
// Are we set up to do this?
if (limitPerRun <= 0 || dir == null || fsn == null) {
return false;
}
// Have we reached the limit?
long currentCount = counts.getFileCount() +
counts.getSymlinkCount() +
counts.getDirectoryCount() +
counts.getSnapshotableDirectoryCount();
if (currentCount <= nextCountLimit) {
return false;
}
// Update the next limit
nextCountLimit = currentCount + limitPerRun;
boolean hadDirReadLock = dir.hasReadLock();
boolean hadDirWriteLock = dir.hasWriteLock();
boolean hadFsnReadLock = fsn.hasReadLock();
boolean hadFsnWriteLock = fsn.hasWriteLock();
// sanity check.
if (!hadDirReadLock || !hadFsnReadLock || hadDirWriteLock ||
hadFsnWriteLock || dir.getReadHoldCount() != 1 ||
fsn.getReadHoldCount() != 1) {
// cannot relinquish
return false;
}
// unlock
dir.readUnlock();
fsn.readUnlock();
try {
Thread.sleep(sleepMilliSec, sleepNanoSec);
} catch (InterruptedException ie) {
} finally {
// reacquire
fsn.readLock();
dir.readLock();
}
yieldCount++;
return true;
}
/** Get the content counts */
public ContentCounts getCounts() {
return counts;
}
public BlockStoragePolicySuite getBlockStoragePolicySuite() {
Preconditions.checkState((bsps != null || fsn != null),
"BlockStoragePolicySuite must be either initialized or available via" +
" FSNameSystem");
return (bsps != null) ? bsps:
fsn.getBlockManager().getStoragePolicySuite();
}
}
| 4,353 | 31.014706 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.hdfs.protocol.HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
import static org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.CURRENT_STATE_ID;
import static org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.NO_SNAPSHOT_ID;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.PrintWriter;
import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiff;
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList;
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshotFeature;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.util.LongBitFormat;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
/** I-node for closed file. */
@InterfaceAudience.Private
public class INodeFile extends INodeWithAdditionalFields
implements INodeFileAttributes, BlockCollection {
/** The same as valueOf(inode, path, false). */
public static INodeFile valueOf(INode inode, String path
) throws FileNotFoundException {
return valueOf(inode, path, false);
}
/** Cast INode to INodeFile. */
public static INodeFile valueOf(INode inode, String path, boolean acceptNull)
throws FileNotFoundException {
if (inode == null) {
if (acceptNull) {
return null;
} else {
throw new FileNotFoundException("File does not exist: " + path);
}
}
if (!inode.isFile()) {
throw new FileNotFoundException("Path is not a file: " + path);
}
return inode.asFile();
}
/**
* Bit format:
* [4-bit storagePolicyID][12-bit replication][48-bit preferredBlockSize]
*/
static enum HeaderFormat {
PREFERRED_BLOCK_SIZE(null, 48, 1),
REPLICATION(PREFERRED_BLOCK_SIZE.BITS, 12, 1),
STORAGE_POLICY_ID(REPLICATION.BITS, BlockStoragePolicySuite.ID_BIT_LENGTH,
0);
private final LongBitFormat BITS;
private HeaderFormat(LongBitFormat previous, int length, long min) {
BITS = new LongBitFormat(name(), previous, length, min);
}
static short getReplication(long header) {
return (short)REPLICATION.BITS.retrieve(header);
}
static long getPreferredBlockSize(long header) {
return PREFERRED_BLOCK_SIZE.BITS.retrieve(header);
}
static byte getStoragePolicyID(long header) {
return (byte)STORAGE_POLICY_ID.BITS.retrieve(header);
}
static long toLong(long preferredBlockSize, short replication,
byte storagePolicyID) {
long h = 0;
if (preferredBlockSize == 0) {
preferredBlockSize = PREFERRED_BLOCK_SIZE.BITS.getMin();
}
h = PREFERRED_BLOCK_SIZE.BITS.combine(preferredBlockSize, h);
h = REPLICATION.BITS.combine(replication, h);
h = STORAGE_POLICY_ID.BITS.combine(storagePolicyID, h);
return h;
}
}
private long header = 0L;
private BlockInfo[] blocks;
INodeFile(long id, byte[] name, PermissionStatus permissions, long mtime,
long atime, BlockInfo[] blklist, short replication,
long preferredBlockSize) {
this(id, name, permissions, mtime, atime, blklist, replication,
preferredBlockSize, (byte) 0);
}
INodeFile(long id, byte[] name, PermissionStatus permissions, long mtime,
long atime, BlockInfo[] blklist, short replication,
long preferredBlockSize, byte storagePolicyID) {
super(id, name, permissions, mtime, atime);
header = HeaderFormat.toLong(preferredBlockSize, replication,
storagePolicyID);
this.blocks = blklist;
}
public INodeFile(INodeFile that) {
super(that);
this.header = that.header;
this.blocks = that.blocks;
this.features = that.features;
}
public INodeFile(INodeFile that, FileDiffList diffs) {
this(that);
Preconditions.checkArgument(!that.isWithSnapshot());
this.addSnapshotFeature(diffs);
}
/** @return true unconditionally. */
@Override
public final boolean isFile() {
return true;
}
/** @return this object. */
@Override
public final INodeFile asFile() {
return this;
}
@Override
public boolean metadataEquals(INodeFileAttributes other) {
return other != null
&& getHeaderLong()== other.getHeaderLong()
&& getPermissionLong() == other.getPermissionLong()
&& getAclFeature() == other.getAclFeature()
&& getXAttrFeature() == other.getXAttrFeature();
}
/* Start of Under-Construction Feature */
/**
* If the inode contains a {@link FileUnderConstructionFeature}, return it;
* otherwise, return null.
*/
public final FileUnderConstructionFeature getFileUnderConstructionFeature() {
return getFeature(FileUnderConstructionFeature.class);
}
/** Is this file under construction? */
@Override // BlockCollection
public boolean isUnderConstruction() {
return getFileUnderConstructionFeature() != null;
}
INodeFile toUnderConstruction(String clientName, String clientMachine) {
Preconditions.checkState(!isUnderConstruction(),
"file is already under construction");
FileUnderConstructionFeature uc = new FileUnderConstructionFeature(
clientName, clientMachine);
addFeature(uc);
return this;
}
/**
* Convert the file to a complete file, i.e., to remove the Under-Construction
* feature.
*/
public INodeFile toCompleteFile(long mtime) {
Preconditions.checkState(isUnderConstruction(),
"file is no longer under construction");
FileUnderConstructionFeature uc = getFileUnderConstructionFeature();
if (uc != null) {
assertAllBlocksComplete();
removeFeature(uc);
this.setModificationTime(mtime);
}
return this;
}
/** Assert all blocks are complete. */
private void assertAllBlocksComplete() {
if (blocks == null) {
return;
}
for (int i = 0; i < blocks.length; i++) {
Preconditions.checkState(blocks[i].isComplete(), "Failed to finalize"
+ " %s %s since blocks[%s] is non-complete, where blocks=%s.",
getClass().getSimpleName(), this, i, Arrays.asList(blocks));
}
}
@Override // BlockCollection
public void setBlock(int index, BlockInfo blk) {
this.blocks[index] = blk;
}
@Override // BlockCollection, the file should be under construction
public BlockInfoContiguousUnderConstruction setLastBlock(
BlockInfo lastBlock, DatanodeStorageInfo[] locations)
throws IOException {
Preconditions.checkState(isUnderConstruction(),
"file is no longer under construction");
if (numBlocks() == 0) {
throw new IOException("Failed to set last block: File is empty.");
}
BlockInfoContiguousUnderConstruction ucBlock =
lastBlock.convertToBlockUnderConstruction(
BlockUCState.UNDER_CONSTRUCTION, locations);
setBlock(numBlocks() - 1, ucBlock);
return ucBlock;
}
/**
* Remove a block from the block list. This block should be
* the last one on the list.
*/
BlockInfoContiguousUnderConstruction removeLastBlock(Block oldblock) {
Preconditions.checkState(isUnderConstruction(),
"file is no longer under construction");
if (blocks == null || blocks.length == 0) {
return null;
}
int size_1 = blocks.length - 1;
if (!blocks[size_1].equals(oldblock)) {
return null;
}
BlockInfoContiguousUnderConstruction uc =
(BlockInfoContiguousUnderConstruction)blocks[size_1];
//copy to a new list
BlockInfo[] newlist = new BlockInfo[size_1];
System.arraycopy(blocks, 0, newlist, 0, size_1);
setBlocks(newlist);
return uc;
}
/* End of Under-Construction Feature */
/* Start of Snapshot Feature */
public FileWithSnapshotFeature addSnapshotFeature(FileDiffList diffs) {
Preconditions.checkState(!isWithSnapshot(),
"File is already with snapshot");
FileWithSnapshotFeature sf = new FileWithSnapshotFeature(diffs);
this.addFeature(sf);
return sf;
}
/**
* If feature list contains a {@link FileWithSnapshotFeature}, return it;
* otherwise, return null.
*/
public final FileWithSnapshotFeature getFileWithSnapshotFeature() {
return getFeature(FileWithSnapshotFeature.class);
}
/** Is this file has the snapshot feature? */
public final boolean isWithSnapshot() {
return getFileWithSnapshotFeature() != null;
}
@Override
public String toDetailString() {
FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature();
return super.toDetailString() + (sf == null ? "" : sf.getDetailedString());
}
@Override
public INodeFileAttributes getSnapshotINode(final int snapshotId) {
FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature();
if (sf != null) {
return sf.getDiffs().getSnapshotINode(snapshotId, this);
} else {
return this;
}
}
@Override
public void recordModification(final int latestSnapshotId) {
recordModification(latestSnapshotId, false);
}
public void recordModification(final int latestSnapshotId, boolean withBlocks) {
if (isInLatestSnapshot(latestSnapshotId)
&& !shouldRecordInSrcSnapshot(latestSnapshotId)) {
// the file is in snapshot, create a snapshot feature if it does not have
FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature();
if (sf == null) {
sf = addSnapshotFeature(null);
}
// record self in the diff list if necessary
sf.getDiffs().saveSelf2Snapshot(latestSnapshotId, this, null, withBlocks);
}
}
public FileDiffList getDiffs() {
FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature();
if (sf != null) {
return sf.getDiffs();
}
return null;
}
/* End of Snapshot Feature */
/** @return the replication factor of the file. */
public final short getFileReplication(int snapshot) {
if (snapshot != CURRENT_STATE_ID) {
return getSnapshotINode(snapshot).getFileReplication();
}
return HeaderFormat.getReplication(header);
}
/** The same as getFileReplication(null). */
@Override // INodeFileAttributes
public final short getFileReplication() {
return getFileReplication(CURRENT_STATE_ID);
}
@Override // BlockCollection
public short getPreferredBlockReplication() {
short max = getFileReplication(CURRENT_STATE_ID);
FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature();
if (sf != null) {
short maxInSnapshot = sf.getMaxBlockRepInDiffs();
if (sf.isCurrentFileDeleted()) {
return maxInSnapshot;
}
max = maxInSnapshot > max ? maxInSnapshot : max;
}
return max;
}
/** Set the replication factor of this file. */
private void setFileReplication(short replication) {
header = HeaderFormat.REPLICATION.BITS.combine(replication, header);
}
/** Set the replication factor of this file. */
public final INodeFile setFileReplication(short replication,
int latestSnapshotId) throws QuotaExceededException {
recordModification(latestSnapshotId);
setFileReplication(replication);
return this;
}
/** @return preferred block size (in bytes) of the file. */
@Override
public long getPreferredBlockSize() {
return HeaderFormat.getPreferredBlockSize(header);
}
@Override
public byte getLocalStoragePolicyID() {
return HeaderFormat.getStoragePolicyID(header);
}
@Override
public byte getStoragePolicyID() {
byte id = getLocalStoragePolicyID();
if (id == BLOCK_STORAGE_POLICY_ID_UNSPECIFIED) {
return this.getParent() != null ?
this.getParent().getStoragePolicyID() : id;
}
return id;
}
private void setStoragePolicyID(byte storagePolicyId) {
header = HeaderFormat.STORAGE_POLICY_ID.BITS.combine(storagePolicyId,
header);
}
public final void setStoragePolicyID(byte storagePolicyId,
int latestSnapshotId) throws QuotaExceededException {
recordModification(latestSnapshotId);
setStoragePolicyID(storagePolicyId);
}
@Override // INodeFileAttributes
public long getHeaderLong() {
return header;
}
/** @return the blocks of the file. */
@Override // BlockCollection
public BlockInfo[] getBlocks() {
return this.blocks;
}
/** @return blocks of the file corresponding to the snapshot. */
public BlockInfo[] getBlocks(int snapshot) {
if(snapshot == CURRENT_STATE_ID || getDiffs() == null) {
return getBlocks();
}
FileDiff diff = getDiffs().getDiffById(snapshot);
BlockInfo[] snapshotBlocks = diff == null ? getBlocks() : diff.getBlocks();
if (snapshotBlocks != null) {
return snapshotBlocks;
}
// Blocks are not in the current snapshot
// Find next snapshot with blocks present or return current file blocks
snapshotBlocks = getDiffs().findLaterSnapshotBlocks(snapshot);
return (snapshotBlocks == null) ? getBlocks() : snapshotBlocks;
}
/** Used during concat to update the BlockCollection for each block. */
private void updateBlockCollection() {
if (blocks != null) {
for(BlockInfo b : blocks) {
b.setBlockCollection(this);
}
}
}
/**
* append array of blocks to this.blocks
*/
void concatBlocks(INodeFile[] inodes) {
int size = this.blocks.length;
int totalAddedBlocks = 0;
for(INodeFile f : inodes) {
totalAddedBlocks += f.blocks.length;
}
BlockInfo[] newlist =
new BlockInfo[size + totalAddedBlocks];
System.arraycopy(this.blocks, 0, newlist, 0, size);
for(INodeFile in: inodes) {
System.arraycopy(in.blocks, 0, newlist, size, in.blocks.length);
size += in.blocks.length;
}
setBlocks(newlist);
updateBlockCollection();
}
/**
* add a block to the block list
*/
void addBlock(BlockInfo newblock) {
if (this.blocks == null) {
this.setBlocks(new BlockInfo[]{newblock});
} else {
int size = this.blocks.length;
BlockInfo[] newlist = new BlockInfo[size + 1];
System.arraycopy(this.blocks, 0, newlist, 0, size);
newlist[size] = newblock;
this.setBlocks(newlist);
}
}
/** Set the blocks. */
private void setBlocks(BlockInfo[] blocks) {
this.blocks = blocks;
}
/** Clear all blocks of the file. */
public void clearBlocks() {
setBlocks(null);
}
@Override
public void cleanSubtree(ReclaimContext reclaimContext,
final int snapshot, int priorSnapshotId) {
FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
if (sf != null) {
// TODO: avoid calling getStoragePolicyID
sf.cleanFile(reclaimContext, this, snapshot, priorSnapshotId,
getStoragePolicyID());
} else {
if (snapshot == CURRENT_STATE_ID) {
if (priorSnapshotId == NO_SNAPSHOT_ID) {
// this only happens when deleting the current file and it is not
// in any snapshot
destroyAndCollectBlocks(reclaimContext);
} else {
FileUnderConstructionFeature uc = getFileUnderConstructionFeature();
// when deleting the current file and it is in snapshot, we should
// clean the 0-sized block if the file is UC
if (uc != null) {
uc.cleanZeroSizeBlock(this, reclaimContext.collectedBlocks);
if (reclaimContext.removedUCFiles != null) {
reclaimContext.removedUCFiles.add(getId());
}
}
}
}
}
}
@Override
public void destroyAndCollectBlocks(ReclaimContext reclaimContext) {
// TODO pass in the storage policy
reclaimContext.quotaDelta().add(computeQuotaUsage(reclaimContext.bsps,
false));
clearFile(reclaimContext);
FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
if (sf != null) {
sf.getDiffs().destroyAndCollectSnapshotBlocks(
reclaimContext.collectedBlocks);
sf.clearDiffs();
}
if (isUnderConstruction() && reclaimContext.removedUCFiles != null) {
reclaimContext.removedUCFiles.add(getId());
}
}
public void clearFile(ReclaimContext reclaimContext) {
if (blocks != null && reclaimContext.collectedBlocks != null) {
for (BlockInfo blk : blocks) {
reclaimContext.collectedBlocks.addDeleteBlock(blk);
blk.setBlockCollection(null);
}
}
clearBlocks();
if (getAclFeature() != null) {
AclStorage.removeAclFeature(getAclFeature());
}
clear();
reclaimContext.removedINodes.add(this);
}
@Override
public String getName() {
// Get the full path name of this inode.
return getFullPathName();
}
// This is the only place that needs to use the BlockStoragePolicySuite to
// derive the intended storage type usage for quota by storage type
@Override
public final QuotaCounts computeQuotaUsage(BlockStoragePolicySuite bsps,
byte blockStoragePolicyId, boolean useCache, int lastSnapshotId) {
final QuotaCounts counts = new QuotaCounts.Builder().nameSpace(1).build();
final BlockStoragePolicy bsp = bsps.getPolicy(blockStoragePolicyId);
FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
if (sf == null) {
counts.add(storagespaceConsumed(bsp));
return counts;
}
FileDiffList fileDiffList = sf.getDiffs();
int last = fileDiffList.getLastSnapshotId();
if (lastSnapshotId == Snapshot.CURRENT_STATE_ID
|| last == Snapshot.CURRENT_STATE_ID) {
counts.add(storagespaceConsumed(bsp));
return counts;
}
final long ssDeltaNoReplication;
short replication;
if (last < lastSnapshotId) {
ssDeltaNoReplication = computeFileSize(true, false);
replication = getFileReplication();
} else {
int sid = fileDiffList.getSnapshotById(lastSnapshotId);
ssDeltaNoReplication = computeFileSize(sid);
replication = getFileReplication(sid);
}
counts.addStorageSpace(ssDeltaNoReplication * replication);
if (bsp != null) {
List<StorageType> storageTypes = bsp.chooseStorageTypes(replication);
for (StorageType t : storageTypes) {
if (!t.supportTypeQuota()) {
continue;
}
counts.addTypeSpace(t, ssDeltaNoReplication);
}
}
return counts;
}
@Override
public final ContentSummaryComputationContext computeContentSummary(
final ContentSummaryComputationContext summary) {
final ContentCounts counts = summary.getCounts();
FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
final long fileLen;
if (sf == null) {
fileLen = computeFileSize();
counts.addContent(Content.FILE, 1);
} else {
final FileDiffList diffs = sf.getDiffs();
final int n = diffs.asList().size();
counts.addContent(Content.FILE, n);
if (n > 0 && sf.isCurrentFileDeleted()) {
fileLen = diffs.getLast().getFileSize();
} else {
fileLen = computeFileSize();
}
}
counts.addContent(Content.LENGTH, fileLen);
counts.addContent(Content.DISKSPACE, storagespaceConsumed(null)
.getStorageSpace());
if (getStoragePolicyID() != BLOCK_STORAGE_POLICY_ID_UNSPECIFIED){
BlockStoragePolicy bsp = summary.getBlockStoragePolicySuite().
getPolicy(getStoragePolicyID());
List<StorageType> storageTypes = bsp.chooseStorageTypes(getFileReplication());
for (StorageType t : storageTypes) {
if (!t.supportTypeQuota()) {
continue;
}
counts.addTypeSpace(t, fileLen);
}
}
return summary;
}
/** The same as computeFileSize(null). */
public final long computeFileSize() {
return computeFileSize(CURRENT_STATE_ID);
}
/**
* Compute file size of the current file if the given snapshot is null;
* otherwise, get the file size from the given snapshot.
*/
public final long computeFileSize(int snapshotId) {
FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature();
if (snapshotId != CURRENT_STATE_ID && sf != null) {
final FileDiff d = sf.getDiffs().getDiffById(snapshotId);
if (d != null) {
return d.getFileSize();
}
}
return computeFileSize(true, false);
}
/**
* Compute file size of the current file size
* but not including the last block if it is under construction.
*/
public final long computeFileSizeNotIncludingLastUcBlock() {
return computeFileSize(false, false);
}
/**
* Compute file size of the current file.
*
* @param includesLastUcBlock
* If the last block is under construction, should it be included?
* @param usePreferredBlockSize4LastUcBlock
* If the last block is under construction, should we use actual
* block size or preferred block size?
* Note that usePreferredBlockSize4LastUcBlock is ignored
* if includesLastUcBlock == false.
* @return file size
*/
public final long computeFileSize(boolean includesLastUcBlock,
boolean usePreferredBlockSize4LastUcBlock) {
if (blocks == null || blocks.length == 0) {
return 0;
}
final int last = blocks.length - 1;
//check if the last block is BlockInfoUnderConstruction
long size = blocks[last].getNumBytes();
if (blocks[last] instanceof BlockInfoContiguousUnderConstruction) {
if (!includesLastUcBlock) {
size = 0;
} else if (usePreferredBlockSize4LastUcBlock) {
size = getPreferredBlockSize();
}
}
//sum other blocks
for(int i = 0; i < last; i++) {
size += blocks[i].getNumBytes();
}
return size;
}
/**
* Compute size consumed by all blocks of the current file,
* including blocks in its snapshots.
* Use preferred block size for the last block if it is under construction.
*/
public final QuotaCounts storagespaceConsumed(BlockStoragePolicy bsp) {
QuotaCounts counts = new QuotaCounts.Builder().build();
final Iterable<BlockInfo> blocks;
FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
if (sf == null) {
blocks = Arrays.asList(getBlocks());
} else {
// Collect all distinct blocks
Set<BlockInfo> allBlocks = new HashSet<>(Arrays.asList(getBlocks()));
List<FileDiff> diffs = sf.getDiffs().asList();
for(FileDiff diff : diffs) {
BlockInfo[] diffBlocks = diff.getBlocks();
if (diffBlocks != null) {
allBlocks.addAll(Arrays.asList(diffBlocks));
}
}
blocks = allBlocks;
}
final short replication = getPreferredBlockReplication();
for (BlockInfo b : blocks) {
long blockSize = b.isComplete() ? b.getNumBytes() :
getPreferredBlockSize();
counts.addStorageSpace(blockSize * replication);
if (bsp != null) {
List<StorageType> types = bsp.chooseStorageTypes(replication);
for (StorageType t : types) {
if (t.supportTypeQuota()) {
counts.addTypeSpace(t, blockSize);
}
}
}
}
return counts;
}
/**
* Return the penultimate allocated block for this file.
*/
BlockInfo getPenultimateBlock() {
if (blocks == null || blocks.length <= 1) {
return null;
}
return blocks[blocks.length - 2];
}
@Override
public BlockInfo getLastBlock() {
return blocks == null || blocks.length == 0? null: blocks[blocks.length-1];
}
@Override
public int numBlocks() {
return blocks == null ? 0 : blocks.length;
}
@VisibleForTesting
@Override
public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix,
final int snapshotId) {
super.dumpTreeRecursively(out, prefix, snapshotId);
out.print(", fileSize=" + computeFileSize(snapshotId));
// only compare the first block
out.print(", blocks=");
out.print(blocks == null || blocks.length == 0? null: blocks[0]);
out.println();
}
/**
* Remove full blocks at the end file up to newLength
* @return sum of sizes of the remained blocks
*/
public long collectBlocksBeyondMax(final long max,
final BlocksMapUpdateInfo collectedBlocks) {
final BlockInfo[] oldBlocks = getBlocks();
if (oldBlocks == null) {
return 0;
}
// find the minimum n such that the size of the first n blocks > max
int n = 0;
long size = 0;
for(; n < oldBlocks.length && max > size; n++) {
size += oldBlocks[n].getNumBytes();
}
if (n >= oldBlocks.length) {
return size;
}
// starting from block n, the data is beyond max.
// resize the array.
truncateBlocksTo(n);
// collect the blocks beyond max
if (collectedBlocks != null) {
for(; n < oldBlocks.length; n++) {
collectedBlocks.addDeleteBlock(oldBlocks[n]);
}
}
return size;
}
/**
* compute the quota usage change for a truncate op
* @param newLength the length for truncation
**/
void computeQuotaDeltaForTruncate(
long newLength, BlockStoragePolicy bsps,
QuotaCounts delta) {
final BlockInfo[] blocks = getBlocks();
if (blocks == null || blocks.length == 0) {
return;
}
long size = 0;
for (BlockInfo b : blocks) {
size += b.getNumBytes();
}
BlockInfo[] sblocks = null;
FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
if (sf != null) {
FileDiff diff = sf.getDiffs().getLast();
sblocks = diff != null ? diff.getBlocks() : null;
}
for (int i = blocks.length - 1; i >= 0 && size > newLength;
size -= blocks[i].getNumBytes(), --i) {
BlockInfo bi = blocks[i];
long truncatedBytes;
if (size - newLength < bi.getNumBytes()) {
// Record a full block as the last block will be copied during
// recovery
truncatedBytes = bi.getNumBytes() - getPreferredBlockSize();
} else {
truncatedBytes = bi.getNumBytes();
}
// The block exist in snapshot, adding back the truncated bytes in the
// existing files
if (sblocks != null && i < sblocks.length && bi.equals(sblocks[i])) {
truncatedBytes -= bi.getNumBytes();
}
delta.addStorageSpace(-truncatedBytes * getPreferredBlockReplication());
if (bsps != null) {
List<StorageType> types = bsps.chooseStorageTypes(
getPreferredBlockReplication());
for (StorageType t : types) {
if (t.supportTypeQuota()) {
delta.addTypeSpace(t, -truncatedBytes);
}
}
}
}
}
void truncateBlocksTo(int n) {
final BlockInfo[] newBlocks;
if (n == 0) {
newBlocks = BlockInfo.EMPTY_ARRAY;
} else {
newBlocks = new BlockInfo[n];
System.arraycopy(getBlocks(), 0, newBlocks, 0, n);
}
// set new blocks
setBlocks(newBlocks);
}
public void collectBlocksBeyondSnapshot(BlockInfo[] snapshotBlocks,
BlocksMapUpdateInfo collectedBlocks) {
BlockInfo[] oldBlocks = getBlocks();
if(snapshotBlocks == null || oldBlocks == null)
return;
// Skip blocks in common between the file and the snapshot
int n = 0;
while(n < oldBlocks.length && n < snapshotBlocks.length &&
oldBlocks[n] == snapshotBlocks[n]) {
n++;
}
truncateBlocksTo(n);
// Collect the remaining blocks of the file
while(n < oldBlocks.length) {
collectedBlocks.addDeleteBlock(oldBlocks[n++]);
}
}
/** Exclude blocks collected for deletion that belong to a snapshot. */
void excludeSnapshotBlocks(int snapshotId,
BlocksMapUpdateInfo collectedBlocks) {
if(collectedBlocks == null || collectedBlocks.getToDeleteList().isEmpty())
return;
FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
if(sf == null)
return;
BlockInfo[] snapshotBlocks =
getDiffs().findEarlierSnapshotBlocks(snapshotId);
if(snapshotBlocks == null)
return;
List<BlockInfo> toDelete = collectedBlocks.getToDeleteList();
for(BlockInfo blk : snapshotBlocks) {
if(toDelete.contains(blk))
collectedBlocks.removeDeleteBlock(blk);
}
}
/**
* @return true if the block is contained in a snapshot or false otherwise.
*/
boolean isBlockInLatestSnapshot(BlockInfo block) {
FileWithSnapshotFeature sf = this.getFileWithSnapshotFeature();
if (sf == null || sf.getDiffs() == null) {
return false;
}
BlockInfo[] snapshotBlocks = getDiffs()
.findEarlierSnapshotBlocks(getDiffs().getLastSnapshotId());
return snapshotBlocks != null &&
Arrays.asList(snapshotBlocks).contains(block);
}
}
| 30,343 | 31.384205 | 96 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.hdfs.XAttrHelper;
import org.apache.hadoop.security.AccessControlException;
import com.google.common.collect.Lists;
import com.google.common.base.Preconditions;
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.SECURITY_XATTR_UNREADABLE_BY_SUPERUSER;
/**
* There are four types of extended attributes <XAttr> defined by the
* following namespaces:
* <br>
* USER - extended user attributes: these can be assigned to files and
* directories to store arbitrary additional information. The access
* permissions for user attributes are defined by the file permission
* bits. For sticky directories, only the owner and privileged user can
* write attributes.
* <br>
* TRUSTED - trusted extended attributes: these are visible/accessible
* only to/by the super user.
* <br>
* SECURITY - extended security attributes: these are used by the HDFS
* core for security purposes and are not available through admin/user
* API.
* <br>
* SYSTEM - extended system attributes: these are used by the HDFS
* core and are not available through admin/user API.
* <br>
* RAW - extended system attributes: these are used for internal system
* attributes that sometimes need to be exposed. Like SYSTEM namespace
* attributes they are not visible to the user except when getXAttr/getXAttrs
* is called on a file or directory in the /.reserved/raw HDFS directory
* hierarchy. These attributes can only be accessed by the superuser.
* </br>
*/
@InterfaceAudience.Private
public class XAttrPermissionFilter {
static void checkPermissionForApi(FSPermissionChecker pc, XAttr xAttr,
boolean isRawPath)
throws AccessControlException {
final boolean isSuperUser = pc.isSuperUser();
if (xAttr.getNameSpace() == XAttr.NameSpace.USER ||
(xAttr.getNameSpace() == XAttr.NameSpace.TRUSTED && isSuperUser)) {
return;
}
if (xAttr.getNameSpace() == XAttr.NameSpace.RAW &&
isRawPath && isSuperUser) {
return;
}
if (XAttrHelper.getPrefixName(xAttr).
equals(SECURITY_XATTR_UNREADABLE_BY_SUPERUSER)) {
if (xAttr.getValue() != null) {
throw new AccessControlException("Attempt to set a value for '" +
SECURITY_XATTR_UNREADABLE_BY_SUPERUSER +
"'. Values are not allowed for this xattr.");
}
return;
}
throw new AccessControlException("User doesn't have permission for xattr: "
+ XAttrHelper.getPrefixName(xAttr));
}
static void checkPermissionForApi(FSPermissionChecker pc,
List<XAttr> xAttrs, boolean isRawPath) throws AccessControlException {
Preconditions.checkArgument(xAttrs != null);
if (xAttrs.isEmpty()) {
return;
}
for (XAttr xAttr : xAttrs) {
checkPermissionForApi(pc, xAttr, isRawPath);
}
}
static List<XAttr> filterXAttrsForApi(FSPermissionChecker pc,
List<XAttr> xAttrs, boolean isRawPath) {
assert xAttrs != null : "xAttrs can not be null";
if (xAttrs.isEmpty()) {
return xAttrs;
}
List<XAttr> filteredXAttrs = Lists.newArrayListWithCapacity(xAttrs.size());
final boolean isSuperUser = pc.isSuperUser();
for (XAttr xAttr : xAttrs) {
if (xAttr.getNameSpace() == XAttr.NameSpace.USER) {
filteredXAttrs.add(xAttr);
} else if (xAttr.getNameSpace() == XAttr.NameSpace.TRUSTED &&
isSuperUser) {
filteredXAttrs.add(xAttr);
} else if (xAttr.getNameSpace() == XAttr.NameSpace.RAW &&
isSuperUser && isRawPath) {
filteredXAttrs.add(xAttr);
} else if (XAttrHelper.getPrefixName(xAttr).
equals(SECURITY_XATTR_UNREADABLE_BY_SUPERUSER)) {
filteredXAttrs.add(xAttr);
}
}
return filteredXAttrs;
}
}
| 4,748 | 36.393701 | 110 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RenewDelegationTokenServlet.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException;
import java.io.OutputStreamWriter;
import java.io.PrintWriter;
import java.security.PrivilegedExceptionAction;
import javax.servlet.ServletContext;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import com.google.common.base.Charsets;
/**
* Renew delegation tokens over http for use in hftp.
*/
@SuppressWarnings("serial")
public class RenewDelegationTokenServlet extends DfsServlet {
private static final Log LOG = LogFactory.getLog(RenewDelegationTokenServlet.class);
public static final String PATH_SPEC = "/renewDelegationToken";
public static final String TOKEN = "token";
@Override
protected void doGet(final HttpServletRequest req, final HttpServletResponse resp)
throws ServletException, IOException {
final UserGroupInformation ugi;
final ServletContext context = getServletContext();
final Configuration conf = NameNodeHttpServer.getConfFromContext(context);
try {
ugi = getUGI(req, conf);
} catch(IOException ioe) {
LOG.info("Request for token received with no authentication from "
+ req.getRemoteAddr(), ioe);
resp.sendError(HttpServletResponse.SC_FORBIDDEN,
"Unable to identify or authenticate user");
return;
}
final NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context);
String tokenString = req.getParameter(TOKEN);
if (tokenString == null) {
resp.sendError(HttpServletResponse.SC_MULTIPLE_CHOICES,
"Token to renew not specified");
}
final Token<DelegationTokenIdentifier> token =
new Token<DelegationTokenIdentifier>();
token.decodeFromUrlString(tokenString);
try {
long result = ugi.doAs(new PrivilegedExceptionAction<Long>() {
@Override
public Long run() throws Exception {
return nn.getRpcServer().renewDelegationToken(token);
}
});
final PrintWriter os = new PrintWriter(new OutputStreamWriter(
resp.getOutputStream(), Charsets.UTF_8));
os.println(result);
os.close();
} catch(Exception e) {
// transfer exception over the http
String exceptionClass = e.getClass().getName();
String exceptionMsg = e.getLocalizedMessage();
String strException = exceptionClass + ";" + exceptionMsg;
LOG.info("Exception while renewing token. Re-throwing. s=" + strException, e);
resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, strException);
}
}
}
| 3,734 | 39.16129 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeId.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.FileNotFoundException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.util.SequentialNumber;
/**
* An id which uniquely identifies an inode. Id 1 to 1000 are reserved for
* potential future usage. The id won't be recycled and is not expected to wrap
* around in a very long time. Root inode id is always 1001. Id 0 is used for
* backward compatibility support.
*/
@InterfaceAudience.Private
public class INodeId extends SequentialNumber {
/**
* The last reserved inode id. InodeIDs are allocated from LAST_RESERVED_ID +
* 1.
*/
public static final long LAST_RESERVED_ID = 2 << 14 - 1;
public static final long ROOT_INODE_ID = LAST_RESERVED_ID + 1;
/**
* To check if the request id is the same as saved id. Don't check fileId
* with GRANDFATHER_INODE_ID for backward compatibility.
*/
public static void checkId(long requestId, INode inode)
throws FileNotFoundException {
if (requestId != HdfsConstants.GRANDFATHER_INODE_ID && requestId != inode.getId()) {
throw new FileNotFoundException(
"ID mismatch. Request id and saved id: " + requestId + " , "
+ inode.getId() + " for file " + inode.getFullPathName());
}
}
INodeId() {
super(ROOT_INODE_ID);
}
}
| 2,206 | 37.051724 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorageRetentionManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.File;
import java.io.FilenameFilter;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.EnumSet;
import java.util.Iterator;
import java.util.List;
import java.util.TreeSet;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile;
import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
import org.apache.hadoop.hdfs.util.MD5FileUtils;
import com.google.common.base.Preconditions;
import com.google.common.collect.ComparisonChain;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
/**
* The NNStorageRetentionManager is responsible for inspecting the storage
* directories of the NN and enforcing a retention policy on checkpoints
* and edit logs.
*
* It delegates the actual removal of files to a StoragePurger
* implementation, which might delete the files or instead copy them to
* a filer or HDFS for later analysis.
*/
public class NNStorageRetentionManager {
private final int numCheckpointsToRetain;
private final long numExtraEditsToRetain;
private final int maxExtraEditsSegmentsToRetain;
private static final Log LOG = LogFactory.getLog(
NNStorageRetentionManager.class);
private final NNStorage storage;
private final StoragePurger purger;
private final LogsPurgeable purgeableLogs;
public NNStorageRetentionManager(
Configuration conf,
NNStorage storage,
LogsPurgeable purgeableLogs,
StoragePurger purger) {
this.numCheckpointsToRetain = conf.getInt(
DFSConfigKeys.DFS_NAMENODE_NUM_CHECKPOINTS_RETAINED_KEY,
DFSConfigKeys.DFS_NAMENODE_NUM_CHECKPOINTS_RETAINED_DEFAULT);
this.numExtraEditsToRetain = conf.getLong(
DFSConfigKeys.DFS_NAMENODE_NUM_EXTRA_EDITS_RETAINED_KEY,
DFSConfigKeys.DFS_NAMENODE_NUM_EXTRA_EDITS_RETAINED_DEFAULT);
this.maxExtraEditsSegmentsToRetain = conf.getInt(
DFSConfigKeys.DFS_NAMENODE_MAX_EXTRA_EDITS_SEGMENTS_RETAINED_KEY,
DFSConfigKeys.DFS_NAMENODE_MAX_EXTRA_EDITS_SEGMENTS_RETAINED_DEFAULT);
Preconditions.checkArgument(numCheckpointsToRetain > 0,
"Must retain at least one checkpoint");
Preconditions.checkArgument(numExtraEditsToRetain >= 0,
DFSConfigKeys.DFS_NAMENODE_NUM_EXTRA_EDITS_RETAINED_KEY +
" must not be negative");
this.storage = storage;
this.purgeableLogs = purgeableLogs;
this.purger = purger;
}
public NNStorageRetentionManager(Configuration conf, NNStorage storage,
LogsPurgeable purgeableLogs) {
this(conf, storage, purgeableLogs, new DeletionStoragePurger());
}
void purgeCheckpoints(NameNodeFile nnf) throws IOException {
purgeCheckpoinsAfter(nnf, -1);
}
void purgeCheckpoinsAfter(NameNodeFile nnf, long fromTxId)
throws IOException {
FSImageTransactionalStorageInspector inspector =
new FSImageTransactionalStorageInspector(EnumSet.of(nnf));
storage.inspectStorageDirs(inspector);
for (FSImageFile image : inspector.getFoundImages()) {
if (image.getCheckpointTxId() > fromTxId) {
purger.purgeImage(image);
}
}
}
void purgeOldStorage(NameNodeFile nnf) throws IOException {
FSImageTransactionalStorageInspector inspector =
new FSImageTransactionalStorageInspector(EnumSet.of(nnf));
storage.inspectStorageDirs(inspector);
long minImageTxId = getImageTxIdToRetain(inspector);
purgeCheckpointsOlderThan(inspector, minImageTxId);
if (nnf == NameNodeFile.IMAGE_ROLLBACK) {
// do not purge edits for IMAGE_ROLLBACK.
return;
}
// If fsimage_N is the image we want to keep, then we need to keep
// all txns > N. We can remove anything < N+1, since fsimage_N
// reflects the state up to and including N. However, we also
// provide a "cushion" of older txns that we keep, which is
// handy for HA, where a remote node may not have as many
// new images.
//
// First, determine the target number of extra transactions to retain based
// on the configured amount.
long minimumRequiredTxId = minImageTxId + 1;
long purgeLogsFrom = Math.max(0, minimumRequiredTxId - numExtraEditsToRetain);
ArrayList<EditLogInputStream> editLogs = new ArrayList<EditLogInputStream>();
purgeableLogs.selectInputStreams(editLogs, purgeLogsFrom, false);
Collections.sort(editLogs, new Comparator<EditLogInputStream>() {
@Override
public int compare(EditLogInputStream a, EditLogInputStream b) {
return ComparisonChain.start()
.compare(a.getFirstTxId(), b.getFirstTxId())
.compare(a.getLastTxId(), b.getLastTxId())
.result();
}
});
// Remove from consideration any edit logs that are in fact required.
while (editLogs.size() > 0 &&
editLogs.get(editLogs.size() - 1).getFirstTxId() >= minimumRequiredTxId) {
editLogs.remove(editLogs.size() - 1);
}
// Next, adjust the number of transactions to retain if doing so would mean
// keeping too many segments around.
while (editLogs.size() > maxExtraEditsSegmentsToRetain) {
purgeLogsFrom = editLogs.get(0).getLastTxId() + 1;
editLogs.remove(0);
}
// Finally, ensure that we're not trying to purge any transactions that we
// actually need.
if (purgeLogsFrom > minimumRequiredTxId) {
throw new AssertionError("Should not purge more edits than required to "
+ "restore: " + purgeLogsFrom + " should be <= "
+ minimumRequiredTxId);
}
purgeableLogs.purgeLogsOlderThan(purgeLogsFrom);
}
private void purgeCheckpointsOlderThan(
FSImageTransactionalStorageInspector inspector,
long minTxId) {
for (FSImageFile image : inspector.getFoundImages()) {
if (image.getCheckpointTxId() < minTxId) {
purger.purgeImage(image);
}
}
}
/**
* @param inspector inspector that has already inspected all storage dirs
* @return the transaction ID corresponding to the oldest checkpoint
* that should be retained.
*/
private long getImageTxIdToRetain(FSImageTransactionalStorageInspector inspector) {
List<FSImageFile> images = inspector.getFoundImages();
TreeSet<Long> imageTxIds = Sets.newTreeSet();
for (FSImageFile image : images) {
imageTxIds.add(image.getCheckpointTxId());
}
List<Long> imageTxIdsList = Lists.newArrayList(imageTxIds);
if (imageTxIdsList.isEmpty()) {
return 0;
}
Collections.reverse(imageTxIdsList);
int toRetain = Math.min(numCheckpointsToRetain, imageTxIdsList.size());
long minTxId = imageTxIdsList.get(toRetain - 1);
LOG.info("Going to retain " + toRetain + " images with txid >= " +
minTxId);
return minTxId;
}
/**
* Interface responsible for disposing of old checkpoints and edit logs.
*/
static interface StoragePurger {
void purgeLog(EditLogFile log);
void purgeImage(FSImageFile image);
}
static class DeletionStoragePurger implements StoragePurger {
@Override
public void purgeLog(EditLogFile log) {
LOG.info("Purging old edit log " + log);
deleteOrWarn(log.getFile());
}
@Override
public void purgeImage(FSImageFile image) {
LOG.info("Purging old image " + image);
deleteOrWarn(image.getFile());
deleteOrWarn(MD5FileUtils.getDigestFileForFile(image.getFile()));
}
private static void deleteOrWarn(File file) {
if (!file.delete()) {
// It's OK if we fail to delete something -- we'll catch it
// next time we swing through this directory.
LOG.warn("Could not delete " + file);
}
}
}
/**
* Delete old OIV fsimages. Since the target dir is not a full blown
* storage directory, we simply list and keep the latest ones. For the
* same reason, no storage inspector is used.
*/
void purgeOldLegacyOIVImages(String dir, long txid) {
File oivImageDir = new File(dir);
final String oivImagePrefix = NameNodeFile.IMAGE_LEGACY_OIV.getName();
String filesInStorage[];
// Get the listing
filesInStorage = oivImageDir.list(new FilenameFilter() {
@Override
public boolean accept(File dir, String name) {
return name.matches(oivImagePrefix + "_(\\d+)");
}
});
// Check whether there is any work to do.
if (filesInStorage.length <= numCheckpointsToRetain) {
return;
}
// Create a sorted list of txids from the file names.
TreeSet<Long> sortedTxIds = new TreeSet<Long>();
for (String fName : filesInStorage) {
// Extract the transaction id from the file name.
long fTxId;
try {
fTxId = Long.parseLong(fName.substring(oivImagePrefix.length() + 1));
} catch (NumberFormatException nfe) {
// This should not happen since we have already filtered it.
// Log and continue.
LOG.warn("Invalid file name. Skipping " + fName);
continue;
}
sortedTxIds.add(Long.valueOf(fTxId));
}
int numFilesToDelete = sortedTxIds.size() - numCheckpointsToRetain;
Iterator<Long> iter = sortedTxIds.iterator();
while (numFilesToDelete > 0 && iter.hasNext()) {
long txIdVal = iter.next().longValue();
String fileName = NNStorage.getLegacyOIVImageFileName(txIdVal);
LOG.info("Deleting " + fileName);
File fileToDelete = new File(oivImageDir, fileName);
if (!fileToDelete.delete()) {
// deletion failed.
LOG.warn("Failed to delete image file: " + fileToDelete);
}
numFilesToDelete--;
}
}
}
| 10,796 | 35.849829 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException;
import java.io.PrintWriter;
import java.net.URL;
import javax.servlet.ServletContext;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DatanodeJspHelper;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.ServletUtil;
import org.znerd.xmlenc.XMLOutputter;
/** Servlets for file checksum */
@InterfaceAudience.Private
public class FileChecksumServlets {
/** Redirect file checksum queries to an appropriate datanode. */
@InterfaceAudience.Private
public static class RedirectServlet extends DfsServlet {
/** For java.io.Serializable */
private static final long serialVersionUID = 1L;
/** Create a redirection URL */
private URL createRedirectURL(UserGroupInformation ugi, DatanodeID host,
HttpServletRequest request, NameNode nn)
throws IOException {
final String hostname = host instanceof DatanodeInfo
? host.getHostName() : host.getIpAddr();
final String scheme = request.getScheme();
int port = host.getInfoPort();
if ("https".equals(scheme)) {
final Integer portObject = (Integer) getServletContext().getAttribute(
DFSConfigKeys.DFS_DATANODE_HTTPS_PORT_KEY);
if (portObject != null) {
port = portObject;
}
}
final String encodedPath = ServletUtil.getRawPath(request, "/fileChecksum");
String dtParam = "";
if (UserGroupInformation.isSecurityEnabled()) {
String tokenString = ugi.getTokens().iterator().next().encodeToUrlString();
dtParam = JspHelper.getDelegationTokenUrlParam(tokenString);
}
String addr = nn.getNameNodeAddressHostPortString();
String addrParam = JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, addr);
return new URL(scheme, hostname, port,
"/getFileChecksum" + encodedPath + '?' +
"ugi=" + ServletUtil.encodeQueryValue(ugi.getShortUserName()) +
dtParam + addrParam);
}
@Override
public void doGet(HttpServletRequest request, HttpServletResponse response
) throws ServletException, IOException {
final ServletContext context = getServletContext();
final Configuration conf = NameNodeHttpServer.getConfFromContext(context);
final UserGroupInformation ugi = getUGI(request, conf);
final NameNode namenode = NameNodeHttpServer.getNameNodeFromContext(
context);
final DatanodeID datanode = NamenodeJspHelper.getRandomDatanode(namenode);
try {
response.sendRedirect(
createRedirectURL(ugi, datanode, request, namenode).toString());
} catch (IOException e) {
response.sendError(400, e.getMessage());
}
}
}
/** Get FileChecksum */
@InterfaceAudience.Private
public static class GetServlet extends DfsServlet {
/** For java.io.Serializable */
private static final long serialVersionUID = 1L;
@Override
public void doGet(HttpServletRequest request, HttpServletResponse response
) throws ServletException, IOException {
final PrintWriter out = response.getWriter();
final String path = ServletUtil.getDecodedPath(request, "/getFileChecksum");
final XMLOutputter xml = new XMLOutputter(out, "UTF-8");
xml.declaration();
final ServletContext context = getServletContext();
final DataNode datanode = (DataNode) context.getAttribute("datanode");
final Configuration conf =
new HdfsConfiguration(datanode.getConf());
try {
final DFSClient dfs = DatanodeJspHelper.getDFSClient(request,
datanode, conf, getUGI(request, conf));
final MD5MD5CRC32FileChecksum checksum = dfs.getFileChecksum(path, Long.MAX_VALUE);
MD5MD5CRC32FileChecksum.write(xml, checksum);
} catch(IOException ioe) {
writeXml(ioe, path, xml);
} catch (InterruptedException e) {
writeXml(e, path, xml);
}
xml.endDocument();
}
}
}
| 5,482 | 39.614815 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DfsServlet.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException;
import java.net.InetSocketAddress;
import javax.servlet.ServletContext;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.NameNodeProxies;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.security.UserGroupInformation;
import org.znerd.xmlenc.XMLOutputter;
/**
* A base class for the servlets in DFS.
*/
abstract class DfsServlet extends HttpServlet {
/** For java.io.Serializable */
private static final long serialVersionUID = 1L;
static final Log LOG = LogFactory.getLog(DfsServlet.class.getCanonicalName());
/** Write the object to XML format */
protected void writeXml(Exception except, String path, XMLOutputter doc)
throws IOException {
doc.startTag(RemoteException.class.getSimpleName());
doc.attribute("path", path);
if (except instanceof RemoteException) {
doc.attribute("class", ((RemoteException) except).getClassName());
} else {
doc.attribute("class", except.getClass().getName());
}
String msg = except.getLocalizedMessage();
int i = msg.indexOf("\n");
if (i >= 0) {
msg = msg.substring(0, i);
}
doc.attribute("message", msg.substring(msg.indexOf(":") + 1).trim());
doc.endTag();
}
/**
* Create a {@link NameNode} proxy from the current {@link ServletContext}.
*/
protected ClientProtocol createNameNodeProxy() throws IOException {
ServletContext context = getServletContext();
// if we are running in the Name Node, use it directly rather than via
// rpc
NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context);
if (nn != null) {
return nn.getRpcServer();
}
InetSocketAddress nnAddr =
NameNodeHttpServer.getNameNodeAddressFromContext(context);
Configuration conf = new HdfsConfiguration(
NameNodeHttpServer.getConfFromContext(context));
return NameNodeProxies.createProxy(conf, NameNode.getUri(nnAddr),
ClientProtocol.class).getProxy();
}
protected UserGroupInformation getUGI(HttpServletRequest request,
Configuration conf) throws IOException {
return JspHelper.getUGI(getServletContext(), request, conf);
}
}
| 3,410 | 37.325843 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclEntryScope;
import org.apache.hadoop.fs.permission.AclEntryType;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.AclException;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
class FSDirAclOp {
static HdfsFileStatus modifyAclEntries(
FSDirectory fsd, final String srcArg, List<AclEntry> aclSpec)
throws IOException {
String src = srcArg;
checkAclsConfigFlag(fsd);
FSPermissionChecker pc = fsd.getPermissionChecker();
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
src = fsd.resolvePath(pc, src, pathComponents);
INodesInPath iip;
fsd.writeLock();
try {
iip = fsd.getINodesInPath4Write(FSDirectory.normalizePath(src), true);
fsd.checkOwner(pc, iip);
INode inode = FSDirectory.resolveLastINode(iip);
int snapshotId = iip.getLatestSnapshotId();
List<AclEntry> existingAcl = AclStorage.readINodeLogicalAcl(inode);
List<AclEntry> newAcl = AclTransformation.mergeAclEntries(
existingAcl, aclSpec);
AclStorage.updateINodeAcl(inode, newAcl, snapshotId);
fsd.getEditLog().logSetAcl(src, newAcl);
} finally {
fsd.writeUnlock();
}
return fsd.getAuditFileInfo(iip);
}
static HdfsFileStatus removeAclEntries(
FSDirectory fsd, final String srcArg, List<AclEntry> aclSpec)
throws IOException {
String src = srcArg;
checkAclsConfigFlag(fsd);
FSPermissionChecker pc = fsd.getPermissionChecker();
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
src = fsd.resolvePath(pc, src, pathComponents);
INodesInPath iip;
fsd.writeLock();
try {
iip = fsd.getINodesInPath4Write(FSDirectory.normalizePath(src), true);
fsd.checkOwner(pc, iip);
INode inode = FSDirectory.resolveLastINode(iip);
int snapshotId = iip.getLatestSnapshotId();
List<AclEntry> existingAcl = AclStorage.readINodeLogicalAcl(inode);
List<AclEntry> newAcl = AclTransformation.filterAclEntriesByAclSpec(
existingAcl, aclSpec);
AclStorage.updateINodeAcl(inode, newAcl, snapshotId);
fsd.getEditLog().logSetAcl(src, newAcl);
} finally {
fsd.writeUnlock();
}
return fsd.getAuditFileInfo(iip);
}
static HdfsFileStatus removeDefaultAcl(FSDirectory fsd, final String srcArg)
throws IOException {
String src = srcArg;
checkAclsConfigFlag(fsd);
FSPermissionChecker pc = fsd.getPermissionChecker();
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
src = fsd.resolvePath(pc, src, pathComponents);
INodesInPath iip;
fsd.writeLock();
try {
iip = fsd.getINodesInPath4Write(FSDirectory.normalizePath(src), true);
fsd.checkOwner(pc, iip);
INode inode = FSDirectory.resolveLastINode(iip);
int snapshotId = iip.getLatestSnapshotId();
List<AclEntry> existingAcl = AclStorage.readINodeLogicalAcl(inode);
List<AclEntry> newAcl = AclTransformation.filterDefaultAclEntries(
existingAcl);
AclStorage.updateINodeAcl(inode, newAcl, snapshotId);
fsd.getEditLog().logSetAcl(src, newAcl);
} finally {
fsd.writeUnlock();
}
return fsd.getAuditFileInfo(iip);
}
static HdfsFileStatus removeAcl(FSDirectory fsd, final String srcArg)
throws IOException {
String src = srcArg;
checkAclsConfigFlag(fsd);
FSPermissionChecker pc = fsd.getPermissionChecker();
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
src = fsd.resolvePath(pc, src, pathComponents);
INodesInPath iip;
fsd.writeLock();
try {
iip = fsd.getINodesInPath4Write(src);
fsd.checkOwner(pc, iip);
unprotectedRemoveAcl(fsd, iip);
} finally {
fsd.writeUnlock();
}
fsd.getEditLog().logSetAcl(src, AclFeature.EMPTY_ENTRY_LIST);
return fsd.getAuditFileInfo(iip);
}
static HdfsFileStatus setAcl(
FSDirectory fsd, final String srcArg, List<AclEntry> aclSpec)
throws IOException {
String src = srcArg;
checkAclsConfigFlag(fsd);
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
FSPermissionChecker pc = fsd.getPermissionChecker();
src = fsd.resolvePath(pc, src, pathComponents);
INodesInPath iip;
fsd.writeLock();
try {
iip = fsd.getINodesInPath4Write(src);
fsd.checkOwner(pc, iip);
List<AclEntry> newAcl = unprotectedSetAcl(fsd, src, aclSpec, false);
fsd.getEditLog().logSetAcl(src, newAcl);
} finally {
fsd.writeUnlock();
}
return fsd.getAuditFileInfo(iip);
}
static AclStatus getAclStatus(
FSDirectory fsd, String src) throws IOException {
checkAclsConfigFlag(fsd);
FSPermissionChecker pc = fsd.getPermissionChecker();
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
src = fsd.resolvePath(pc, src, pathComponents);
String srcs = FSDirectory.normalizePath(src);
fsd.readLock();
try {
// There is no real inode for the path ending in ".snapshot", so return a
// non-null, unpopulated AclStatus. This is similar to getFileInfo.
if (srcs.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR) &&
fsd.getINode4DotSnapshot(srcs) != null) {
return new AclStatus.Builder().owner("").group("").build();
}
INodesInPath iip = fsd.getINodesInPath(srcs, true);
if (fsd.isPermissionEnabled()) {
fsd.checkTraverse(pc, iip);
}
INode inode = FSDirectory.resolveLastINode(iip);
int snapshotId = iip.getPathSnapshotId();
List<AclEntry> acl = AclStorage.readINodeAcl(fsd.getAttributes(src,
inode.getLocalNameBytes(), inode, snapshotId));
FsPermission fsPermission = inode.getFsPermission(snapshotId);
return new AclStatus.Builder()
.owner(inode.getUserName()).group(inode.getGroupName())
.stickyBit(fsPermission.getStickyBit())
.setPermission(fsPermission)
.addEntries(acl).build();
} finally {
fsd.readUnlock();
}
}
static List<AclEntry> unprotectedSetAcl(
FSDirectory fsd, String src, List<AclEntry> aclSpec, boolean fromEdits)
throws IOException {
assert fsd.hasWriteLock();
final INodesInPath iip = fsd.getINodesInPath4Write(
FSDirectory.normalizePath(src), true);
// ACL removal is logged to edits as OP_SET_ACL with an empty list.
if (aclSpec.isEmpty()) {
unprotectedRemoveAcl(fsd, iip);
return AclFeature.EMPTY_ENTRY_LIST;
}
INode inode = FSDirectory.resolveLastINode(iip);
int snapshotId = iip.getLatestSnapshotId();
List<AclEntry> newAcl = aclSpec;
if (!fromEdits) {
List<AclEntry> existingAcl = AclStorage.readINodeLogicalAcl(inode);
newAcl = AclTransformation.replaceAclEntries(existingAcl, aclSpec);
}
AclStorage.updateINodeAcl(inode, newAcl, snapshotId);
return newAcl;
}
private static void checkAclsConfigFlag(FSDirectory fsd) throws AclException {
if (!fsd.isAclsEnabled()) {
throw new AclException(String.format(
"The ACL operation has been rejected. "
+ "Support for ACLs has been disabled by setting %s to false.",
DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY));
}
}
private static void unprotectedRemoveAcl(FSDirectory fsd, INodesInPath iip)
throws IOException {
assert fsd.hasWriteLock();
INode inode = FSDirectory.resolveLastINode(iip);
int snapshotId = iip.getLatestSnapshotId();
AclFeature f = inode.getAclFeature();
if (f == null) {
return;
}
FsPermission perm = inode.getFsPermission();
List<AclEntry> featureEntries = AclStorage.getEntriesFromAclFeature(f);
if (featureEntries.get(0).getScope() == AclEntryScope.ACCESS) {
// Restore group permissions from the feature's entry to permission
// bits, overwriting the mask, which is not part of a minimal ACL.
AclEntry groupEntryKey = new AclEntry.Builder()
.setScope(AclEntryScope.ACCESS).setType(AclEntryType.GROUP).build();
int groupEntryIndex = Collections.binarySearch(
featureEntries, groupEntryKey,
AclTransformation.ACL_ENTRY_COMPARATOR);
assert groupEntryIndex >= 0;
FsAction groupPerm = featureEntries.get(groupEntryIndex).getPermission();
FsPermission newPerm = new FsPermission(perm.getUserAction(), groupPerm,
perm.getOtherAction(), perm.getStickyBit());
inode.setPermission(newPerm, snapshotId);
}
inode.removeAclFeature(snapshotId);
}
}
| 9,856 | 38.270916 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LogsPurgeable.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException;
import java.util.Collection;
/**
* Interface used to abstract over classes which manage edit logs that may need
* to be purged.
*/
interface LogsPurgeable {
/**
* Remove all edit logs with transaction IDs lower than the given transaction
* ID.
*
* @param minTxIdToKeep the lowest transaction ID that should be retained
* @throws IOException in the event of error
*/
public void purgeLogsOlderThan(long minTxIdToKeep) throws IOException;
/**
* Get a list of edit log input streams. The list will start with the
* stream that contains fromTxnId, and continue until the end of the journal
* being managed.
*
* @param fromTxId the first transaction id we want to read
* @param inProgressOk whether or not in-progress streams should be returned
* @throws IOException if the underlying storage has an error or is otherwise
* inaccessible
*/
void selectInputStreams(Collection<EditLogInputStream> streams,
long fromTxId, boolean inProgressOk) throws IOException;
}
| 1,917 | 35.884615 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Charsets;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.XAttrHelper;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
import org.apache.hadoop.hdfs.protocolPB.PBHelper;
import org.apache.hadoop.security.AccessControlException;
import java.io.IOException;
import java.util.EnumSet;
import java.util.List;
import java.util.ListIterator;
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.CRYPTO_XATTR_ENCRYPTION_ZONE;
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.SECURITY_XATTR_UNREADABLE_BY_SUPERUSER;
class FSDirXAttrOp {
private static final XAttr KEYID_XATTR =
XAttrHelper.buildXAttr(CRYPTO_XATTR_ENCRYPTION_ZONE, null);
private static final XAttr UNREADABLE_BY_SUPERUSER_XATTR =
XAttrHelper.buildXAttr(SECURITY_XATTR_UNREADABLE_BY_SUPERUSER, null);
/**
* Set xattr for a file or directory.
*
* @param src
* - path on which it sets the xattr
* @param xAttr
* - xAttr details to set
* @param flag
* - xAttrs flags
* @throws IOException
*/
static HdfsFileStatus setXAttr(
FSDirectory fsd, String src, XAttr xAttr, EnumSet<XAttrSetFlag> flag,
boolean logRetryCache)
throws IOException {
checkXAttrsConfigFlag(fsd);
checkXAttrSize(fsd, xAttr);
FSPermissionChecker pc = fsd.getPermissionChecker();
XAttrPermissionFilter.checkPermissionForApi(
pc, xAttr, FSDirectory.isReservedRawName(src));
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
src = fsd.resolvePath(pc, src, pathComponents);
List<XAttr> xAttrs = Lists.newArrayListWithCapacity(1);
xAttrs.add(xAttr);
INodesInPath iip;
fsd.writeLock();
try {
iip = fsd.getINodesInPath4Write(src);
checkXAttrChangeAccess(fsd, iip, xAttr, pc);
unprotectedSetXAttrs(fsd, src, xAttrs, flag);
} finally {
fsd.writeUnlock();
}
fsd.getEditLog().logSetXAttrs(src, xAttrs, logRetryCache);
return fsd.getAuditFileInfo(iip);
}
static List<XAttr> getXAttrs(FSDirectory fsd, final String srcArg,
List<XAttr> xAttrs)
throws IOException {
String src = srcArg;
checkXAttrsConfigFlag(fsd);
FSPermissionChecker pc = fsd.getPermissionChecker();
final boolean isRawPath = FSDirectory.isReservedRawName(src);
boolean getAll = xAttrs == null || xAttrs.isEmpty();
if (!getAll) {
XAttrPermissionFilter.checkPermissionForApi(pc, xAttrs, isRawPath);
}
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
src = fsd.resolvePath(pc, src, pathComponents);
final INodesInPath iip = fsd.getINodesInPath(src, true);
if (fsd.isPermissionEnabled()) {
fsd.checkPathAccess(pc, iip, FsAction.READ);
}
List<XAttr> all = FSDirXAttrOp.getXAttrs(fsd, src);
List<XAttr> filteredAll = XAttrPermissionFilter.
filterXAttrsForApi(pc, all, isRawPath);
if (getAll) {
return filteredAll;
}
if (filteredAll == null || filteredAll.isEmpty()) {
throw new IOException(
"At least one of the attributes provided was not found.");
}
List<XAttr> toGet = Lists.newArrayListWithCapacity(xAttrs.size());
for (XAttr xAttr : xAttrs) {
boolean foundIt = false;
for (XAttr a : filteredAll) {
if (xAttr.getNameSpace() == a.getNameSpace() && xAttr.getName().equals(
a.getName())) {
toGet.add(a);
foundIt = true;
break;
}
}
if (!foundIt) {
throw new IOException(
"At least one of the attributes provided was not found.");
}
}
return toGet;
}
static List<XAttr> listXAttrs(
FSDirectory fsd, String src) throws IOException {
FSDirXAttrOp.checkXAttrsConfigFlag(fsd);
final FSPermissionChecker pc = fsd.getPermissionChecker();
final boolean isRawPath = FSDirectory.isReservedRawName(src);
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
src = fsd.resolvePath(pc, src, pathComponents);
final INodesInPath iip = fsd.getINodesInPath(src, true);
if (fsd.isPermissionEnabled()) {
/* To access xattr names, you need EXECUTE in the owning directory. */
fsd.checkParentAccess(pc, iip, FsAction.EXECUTE);
}
final List<XAttr> all = FSDirXAttrOp.getXAttrs(fsd, src);
return XAttrPermissionFilter.
filterXAttrsForApi(pc, all, isRawPath);
}
/**
* Remove an xattr for a file or directory.
*
* @param src
* - path to remove the xattr from
* @param xAttr
* - xAttr to remove
* @throws IOException
*/
static HdfsFileStatus removeXAttr(
FSDirectory fsd, String src, XAttr xAttr, boolean logRetryCache)
throws IOException {
FSDirXAttrOp.checkXAttrsConfigFlag(fsd);
FSPermissionChecker pc = fsd.getPermissionChecker();
XAttrPermissionFilter.checkPermissionForApi(
pc, xAttr, FSDirectory.isReservedRawName(src));
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(
src);
List<XAttr> xAttrs = Lists.newArrayListWithCapacity(1);
xAttrs.add(xAttr);
INodesInPath iip;
fsd.writeLock();
try {
src = fsd.resolvePath(pc, src, pathComponents);
iip = fsd.getINodesInPath4Write(src);
checkXAttrChangeAccess(fsd, iip, xAttr, pc);
List<XAttr> removedXAttrs = unprotectedRemoveXAttrs(fsd, src, xAttrs);
if (removedXAttrs != null && !removedXAttrs.isEmpty()) {
fsd.getEditLog().logRemoveXAttrs(src, removedXAttrs, logRetryCache);
} else {
throw new IOException(
"No matching attributes found for remove operation");
}
} finally {
fsd.writeUnlock();
}
return fsd.getAuditFileInfo(iip);
}
static List<XAttr> unprotectedRemoveXAttrs(
FSDirectory fsd, final String src, final List<XAttr> toRemove)
throws IOException {
assert fsd.hasWriteLock();
INodesInPath iip = fsd.getINodesInPath4Write(
FSDirectory.normalizePath(src), true);
INode inode = FSDirectory.resolveLastINode(iip);
int snapshotId = iip.getLatestSnapshotId();
List<XAttr> existingXAttrs = XAttrStorage.readINodeXAttrs(inode);
List<XAttr> removedXAttrs = Lists.newArrayListWithCapacity(toRemove.size());
List<XAttr> newXAttrs = filterINodeXAttrs(existingXAttrs, toRemove,
removedXAttrs);
if (existingXAttrs.size() != newXAttrs.size()) {
XAttrStorage.updateINodeXAttrs(inode, newXAttrs, snapshotId);
return removedXAttrs;
}
return null;
}
/**
* Filter XAttrs from a list of existing XAttrs. Removes matched XAttrs from
* toFilter and puts them into filtered. Upon completion,
* toFilter contains the filter XAttrs that were not found, while
* fitleredXAttrs contains the XAttrs that were found.
*
* @param existingXAttrs Existing XAttrs to be filtered
* @param toFilter XAttrs to filter from the existing XAttrs
* @param filtered Return parameter, XAttrs that were filtered
* @return List of XAttrs that does not contain filtered XAttrs
*/
@VisibleForTesting
static List<XAttr> filterINodeXAttrs(
final List<XAttr> existingXAttrs, final List<XAttr> toFilter,
final List<XAttr> filtered)
throws AccessControlException {
if (existingXAttrs == null || existingXAttrs.isEmpty() ||
toFilter == null || toFilter.isEmpty()) {
return existingXAttrs;
}
// Populate a new list with XAttrs that pass the filter
List<XAttr> newXAttrs =
Lists.newArrayListWithCapacity(existingXAttrs.size());
for (XAttr a : existingXAttrs) {
boolean add = true;
for (ListIterator<XAttr> it = toFilter.listIterator(); it.hasNext()
;) {
XAttr filter = it.next();
Preconditions.checkArgument(
!KEYID_XATTR.equalsIgnoreValue(filter),
"The encryption zone xattr should never be deleted.");
if (UNREADABLE_BY_SUPERUSER_XATTR.equalsIgnoreValue(filter)) {
throw new AccessControlException("The xattr '" +
SECURITY_XATTR_UNREADABLE_BY_SUPERUSER + "' can not be deleted.");
}
if (a.equalsIgnoreValue(filter)) {
add = false;
it.remove();
filtered.add(filter);
break;
}
}
if (add) {
newXAttrs.add(a);
}
}
return newXAttrs;
}
static INode unprotectedSetXAttrs(
FSDirectory fsd, final String src, final List<XAttr> xAttrs,
final EnumSet<XAttrSetFlag> flag)
throws IOException {
assert fsd.hasWriteLock();
INodesInPath iip = fsd.getINodesInPath4Write(FSDirectory.normalizePath(src),
true);
INode inode = FSDirectory.resolveLastINode(iip);
int snapshotId = iip.getLatestSnapshotId();
List<XAttr> existingXAttrs = XAttrStorage.readINodeXAttrs(inode);
List<XAttr> newXAttrs = setINodeXAttrs(fsd, existingXAttrs, xAttrs, flag);
final boolean isFile = inode.isFile();
for (XAttr xattr : newXAttrs) {
final String xaName = XAttrHelper.getPrefixName(xattr);
/*
* If we're adding the encryption zone xattr, then add src to the list
* of encryption zones.
*/
if (CRYPTO_XATTR_ENCRYPTION_ZONE.equals(xaName)) {
final HdfsProtos.ZoneEncryptionInfoProto ezProto =
HdfsProtos.ZoneEncryptionInfoProto.parseFrom(xattr.getValue());
fsd.ezManager.addEncryptionZone(inode.getId(),
PBHelper.convert(ezProto.getSuite()),
PBHelper.convert(
ezProto.getCryptoProtocolVersion()),
ezProto.getKeyName());
}
if (!isFile && SECURITY_XATTR_UNREADABLE_BY_SUPERUSER.equals(xaName)) {
throw new IOException("Can only set '" +
SECURITY_XATTR_UNREADABLE_BY_SUPERUSER + "' on a file.");
}
}
XAttrStorage.updateINodeXAttrs(inode, newXAttrs, snapshotId);
return inode;
}
static List<XAttr> setINodeXAttrs(
FSDirectory fsd, final List<XAttr> existingXAttrs,
final List<XAttr> toSet, final EnumSet<XAttrSetFlag> flag)
throws IOException {
// Check for duplicate XAttrs in toSet
// We need to use a custom comparator, so using a HashSet is not suitable
for (int i = 0; i < toSet.size(); i++) {
for (int j = i + 1; j < toSet.size(); j++) {
if (toSet.get(i).equalsIgnoreValue(toSet.get(j))) {
throw new IOException("Cannot specify the same XAttr to be set " +
"more than once");
}
}
}
// Count the current number of user-visible XAttrs for limit checking
int userVisibleXAttrsNum = 0; // Number of user visible xAttrs
// The XAttr list is copied to an exactly-sized array when it's stored,
// so there's no need to size it precisely here.
int newSize = (existingXAttrs != null) ? existingXAttrs.size() : 0;
newSize += toSet.size();
List<XAttr> xAttrs = Lists.newArrayListWithCapacity(newSize);
// Check if the XAttr already exists to validate with the provided flag
for (XAttr xAttr: toSet) {
boolean exist = false;
if (existingXAttrs != null) {
for (XAttr a : existingXAttrs) {
if (a.equalsIgnoreValue(xAttr)) {
exist = true;
break;
}
}
}
XAttrSetFlag.validate(xAttr.getName(), exist, flag);
// add the new XAttr since it passed validation
xAttrs.add(xAttr);
if (isUserVisible(xAttr)) {
userVisibleXAttrsNum++;
}
}
// Add the existing xattrs back in, if they weren't already set
if (existingXAttrs != null) {
for (XAttr existing : existingXAttrs) {
boolean alreadySet = false;
for (XAttr set : toSet) {
if (set.equalsIgnoreValue(existing)) {
alreadySet = true;
break;
}
}
if (!alreadySet) {
xAttrs.add(existing);
if (isUserVisible(existing)) {
userVisibleXAttrsNum++;
}
}
}
}
if (userVisibleXAttrsNum > fsd.getInodeXAttrsLimit()) {
throw new IOException("Cannot add additional XAttr to inode, "
+ "would exceed limit of " + fsd.getInodeXAttrsLimit());
}
return xAttrs;
}
static List<XAttr> getXAttrs(FSDirectory fsd, INode inode, int snapshotId)
throws IOException {
fsd.readLock();
try {
return XAttrStorage.readINodeXAttrs(inode, snapshotId);
} finally {
fsd.readUnlock();
}
}
static XAttr unprotectedGetXAttrByName(
INode inode, int snapshotId, String xAttrName)
throws IOException {
List<XAttr> xAttrs = XAttrStorage.readINodeXAttrs(inode, snapshotId);
if (xAttrs == null) {
return null;
}
for (XAttr x : xAttrs) {
if (XAttrHelper.getPrefixName(x)
.equals(xAttrName)) {
return x;
}
}
return null;
}
private static void checkXAttrChangeAccess(
FSDirectory fsd, INodesInPath iip, XAttr xAttr,
FSPermissionChecker pc)
throws AccessControlException {
if (fsd.isPermissionEnabled() && xAttr.getNameSpace() == XAttr.NameSpace
.USER) {
final INode inode = iip.getLastINode();
if (inode != null &&
inode.isDirectory() &&
inode.getFsPermission().getStickyBit()) {
if (!pc.isSuperUser()) {
fsd.checkOwner(pc, iip);
}
} else {
fsd.checkPathAccess(pc, iip, FsAction.WRITE);
}
}
}
/**
* Verifies that the combined size of the name and value of an xattr is within
* the configured limit. Setting a limit of zero disables this check.
*/
private static void checkXAttrSize(FSDirectory fsd, XAttr xAttr) {
if (fsd.getXattrMaxSize() == 0) {
return;
}
int size = xAttr.getName().getBytes(Charsets.UTF_8).length;
if (xAttr.getValue() != null) {
size += xAttr.getValue().length;
}
if (size > fsd.getXattrMaxSize()) {
throw new HadoopIllegalArgumentException(
"The XAttr is too big. The maximum combined size of the"
+ " name and value is " + fsd.getXattrMaxSize()
+ ", but the total size is " + size);
}
}
private static void checkXAttrsConfigFlag(FSDirectory fsd) throws
IOException {
if (!fsd.isXattrsEnabled()) {
throw new IOException(String.format(
"The XAttr operation has been rejected. "
+ "Support for XAttrs has been disabled by setting %s to false.",
DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY));
}
}
private static List<XAttr> getXAttrs(FSDirectory fsd,
String src) throws IOException {
String srcs = FSDirectory.normalizePath(src);
fsd.readLock();
try {
INodesInPath iip = fsd.getINodesInPath(srcs, true);
INode inode = FSDirectory.resolveLastINode(iip);
int snapshotId = iip.getPathSnapshotId();
return XAttrStorage.readINodeXAttrs(fsd.getAttributes(src,
inode.getLocalNameBytes(), inode, snapshotId));
} finally {
fsd.readUnlock();
}
}
private static boolean isUserVisible(XAttr xAttr) {
XAttr.NameSpace ns = xAttr.getNameSpace();
return ns == XAttr.NameSpace.USER || ns == XAttr.NameSpace.TRUSTED;
}
}
| 16,854 | 35.169528 | 110 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetDelegationTokenServlet.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.DataOutputStream;
import java.io.IOException;
import java.security.PrivilegedExceptionAction;
import javax.servlet.ServletContext;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
/**
* Serve delegation tokens over http for use in hftp.
*/
@SuppressWarnings("serial")
public class GetDelegationTokenServlet extends DfsServlet {
private static final Log LOG = LogFactory.getLog(GetDelegationTokenServlet.class);
public static final String PATH_SPEC = "/getDelegationToken";
public static final String RENEWER = "renewer";
@Override
protected void doGet(final HttpServletRequest req, final HttpServletResponse resp)
throws ServletException, IOException {
final UserGroupInformation ugi;
final ServletContext context = getServletContext();
final Configuration conf = NameNodeHttpServer.getConfFromContext(context);
try {
ugi = getUGI(req, conf);
} catch(IOException ioe) {
LOG.info("Request for token received with no authentication from "
+ req.getRemoteAddr(), ioe);
resp.sendError(HttpServletResponse.SC_FORBIDDEN,
"Unable to identify or authenticate user");
return;
}
LOG.info("Sending token: {" + ugi.getUserName() + "," + req.getRemoteAddr() +"}");
final NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context);
String renewer = req.getParameter(RENEWER);
final String renewerFinal = (renewer == null) ?
req.getUserPrincipal().getName() : renewer;
DataOutputStream dos = null;
try {
dos = new DataOutputStream(resp.getOutputStream());
final DataOutputStream dosFinal = dos; // for doAs block
ugi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws IOException {
final Credentials ts = DelegationTokenSecretManager.createCredentials(
nn, ugi, renewerFinal);
ts.write(dosFinal);
return null;
}
});
} catch(Exception e) {
LOG.info("Exception while sending token. Re-throwing ", e);
resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
} finally {
if(dos != null) dos.close();
}
}
}
| 3,449 | 38.655172 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.RecoverLeaseOp;
import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion.Feature;
import com.google.common.base.Preconditions;
/**
* Helper class to perform append operation.
*/
final class FSDirAppendOp {
/**
* Private constructor for preventing FSDirAppendOp object creation.
* Static-only class.
*/
private FSDirAppendOp() {}
/**
* Append to an existing file.
* <p>
*
* The method returns the last block of the file if this is a partial block,
* which can still be used for writing more data. The client uses the
* returned block locations to form the data pipeline for this block.<br>
* The {@link LocatedBlock} will be null if the last block is full.
* The client then allocates a new block with the next call using
* {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#addBlock}.
* <p>
*
* For description of parameters and exceptions thrown see
* {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#append}
*
* @param fsn namespace
* @param srcArg path name
* @param pc permission checker to check fs permission
* @param holder client name
* @param clientMachine client machine info
* @param newBlock if the data is appended to a new block
* @param logRetryCache whether to record RPC ids in editlog for retry cache
* rebuilding
* @return the last block with status
*/
static LastBlockWithStatus appendFile(final FSNamesystem fsn,
final String srcArg, final FSPermissionChecker pc, final String holder,
final String clientMachine, final boolean newBlock,
final boolean logRetryCache) throws IOException {
assert fsn.hasWriteLock();
final byte[][] pathComponents = FSDirectory
.getPathComponentsForReservedPath(srcArg);
final LocatedBlock lb;
final FSDirectory fsd = fsn.getFSDirectory();
final String src;
fsd.writeLock();
try {
src = fsd.resolvePath(pc, srcArg, pathComponents);
final INodesInPath iip = fsd.getINodesInPath4Write(src);
// Verify that the destination does not exist as a directory already
final INode inode = iip.getLastINode();
final String path = iip.getPath();
if (inode != null && inode.isDirectory()) {
throw new FileAlreadyExistsException("Cannot append to directory "
+ path + "; already exists as a directory.");
}
if (fsd.isPermissionEnabled()) {
fsd.checkPathAccess(pc, iip, FsAction.WRITE);
}
if (inode == null) {
throw new FileNotFoundException(
"Failed to append to non-existent file " + path + " for client "
+ clientMachine);
}
final INodeFile file = INodeFile.valueOf(inode, path, true);
BlockManager blockManager = fsd.getBlockManager();
final BlockStoragePolicy lpPolicy = blockManager
.getStoragePolicy("LAZY_PERSIST");
if (lpPolicy != null && lpPolicy.getId() == file.getStoragePolicyID()) {
throw new UnsupportedOperationException(
"Cannot append to lazy persist file " + path);
}
// Opening an existing file for append - may need to recover lease.
fsn.recoverLeaseInternal(RecoverLeaseOp.APPEND_FILE, iip, path, holder,
clientMachine, false);
final BlockInfo lastBlock = file.getLastBlock();
// Check that the block has at least minimum replication.
if (lastBlock != null && lastBlock.isComplete()
&& !blockManager.isSufficientlyReplicated(lastBlock)) {
throw new IOException("append: lastBlock=" + lastBlock + " of src="
+ path + " is not sufficiently replicated yet.");
}
lb = prepareFileForAppend(fsn, iip, holder, clientMachine, newBlock,
true, logRetryCache);
} catch (IOException ie) {
NameNode.stateChangeLog
.warn("DIR* NameSystem.append: " + ie.getMessage());
throw ie;
} finally {
fsd.writeUnlock();
}
HdfsFileStatus stat = FSDirStatAndListingOp.getFileInfo(fsd, src, false,
FSDirectory.isReservedRawName(srcArg), true);
if (lb != null) {
NameNode.stateChangeLog.debug(
"DIR* NameSystem.appendFile: file {} for {} at {} block {} block"
+ " size {}", srcArg, holder, clientMachine, lb.getBlock(), lb
.getBlock().getNumBytes());
}
return new LastBlockWithStatus(lb, stat);
}
/**
* Convert current node to under construction.
* Recreate in-memory lease record.
*
* @param fsn namespace
* @param iip inodes in the path containing the file
* @param leaseHolder identifier of the lease holder on this file
* @param clientMachine identifier of the client machine
* @param newBlock if the data is appended to a new block
* @param writeToEditLog whether to persist this change to the edit log
* @param logRetryCache whether to record RPC ids in editlog for retry cache
* rebuilding
* @return the last block locations if the block is partial or null otherwise
* @throws IOException
*/
static LocatedBlock prepareFileForAppend(final FSNamesystem fsn,
final INodesInPath iip, final String leaseHolder,
final String clientMachine, final boolean newBlock,
final boolean writeToEditLog, final boolean logRetryCache)
throws IOException {
assert fsn.hasWriteLock();
final INodeFile file = iip.getLastINode().asFile();
final QuotaCounts delta = verifyQuotaForUCBlock(fsn, file, iip);
file.recordModification(iip.getLatestSnapshotId());
file.toUnderConstruction(leaseHolder, clientMachine);
fsn.getLeaseManager().addLease(
file.getFileUnderConstructionFeature().getClientName(), file.getId());
LocatedBlock ret = null;
if (!newBlock) {
FSDirectory fsd = fsn.getFSDirectory();
ret = fsd.getBlockManager().convertLastBlockToUnderConstruction(file, 0);
if (ret != null && delta != null) {
Preconditions.checkState(delta.getStorageSpace() >= 0, "appending to"
+ " a block with size larger than the preferred block size");
fsd.writeLock();
try {
fsd.updateCountNoQuotaCheck(iip, iip.length() - 1, delta);
} finally {
fsd.writeUnlock();
}
}
} else {
BlockInfo lastBlock = file.getLastBlock();
if (lastBlock != null) {
ExtendedBlock blk = new ExtendedBlock(fsn.getBlockPoolId(), lastBlock);
ret = new LocatedBlock(blk, new DatanodeInfo[0]);
}
}
if (writeToEditLog) {
final String path = iip.getPath();
if (NameNodeLayoutVersion.supports(Feature.APPEND_NEW_BLOCK,
fsn.getEffectiveLayoutVersion())) {
fsn.getEditLog().logAppendFile(path, file, newBlock, logRetryCache);
} else {
fsn.getEditLog().logOpenFile(path, file, false, logRetryCache);
}
}
return ret;
}
/**
* Verify quota when using the preferred block size for UC block. This is
* usually used by append and truncate.
*
* @throws QuotaExceededException when violating the storage quota
* @return expected quota usage update. null means no change or no need to
* update quota usage later
*/
private static QuotaCounts verifyQuotaForUCBlock(FSNamesystem fsn,
INodeFile file, INodesInPath iip) throws QuotaExceededException {
FSDirectory fsd = fsn.getFSDirectory();
if (!fsn.isImageLoaded() || fsd.shouldSkipQuotaChecks()) {
// Do not check quota if editlog is still being processed
return null;
}
if (file.getLastBlock() != null) {
final QuotaCounts delta = computeQuotaDeltaForUCBlock(fsn, file);
fsd.readLock();
try {
FSDirectory.verifyQuota(iip, iip.length() - 1, delta, null);
return delta;
} finally {
fsd.readUnlock();
}
}
return null;
}
/** Compute quota change for converting a complete block to a UC block. */
private static QuotaCounts computeQuotaDeltaForUCBlock(FSNamesystem fsn,
INodeFile file) {
final QuotaCounts delta = new QuotaCounts.Builder().build();
final BlockInfo lastBlock = file.getLastBlock();
if (lastBlock != null) {
final long diff = file.getPreferredBlockSize() - lastBlock.getNumBytes();
final short repl = file.getPreferredBlockReplication();
delta.addStorageSpace(diff * repl);
final BlockStoragePolicy policy = fsn.getFSDirectory()
.getBlockStoragePolicySuite().getPolicy(file.getStoragePolicyID());
List<StorageType> types = policy.chooseStorageTypes(repl);
for (StorageType t : types) {
if (t.supportTypeQuota()) {
delta.addTypeSpace(t, diff);
}
}
}
return delta;
}
}
| 10,431 | 38.816794 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystemLock.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import com.google.common.annotations.VisibleForTesting;
/**
* Mimics a ReentrantReadWriteLock so more sophisticated locking capabilities
* are possible.
*/
class FSNamesystemLock implements ReadWriteLock {
@VisibleForTesting
protected ReentrantReadWriteLock coarseLock;
FSNamesystemLock(boolean fair) {
this.coarseLock = new ReentrantReadWriteLock(fair);
}
@Override
public Lock readLock() {
return coarseLock.readLock();
}
@Override
public Lock writeLock() {
return coarseLock.writeLock();
}
public int getReadHoldCount() {
return coarseLock.getReadHoldCount();
}
public int getWriteHoldCount() {
return coarseLock.getWriteHoldCount();
}
public boolean isWriteLockedByCurrentThread() {
return coarseLock.isWriteLockedByCurrentThread();
}
}
| 1,872 | 28.730159 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupState.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.ha.ServiceFailedException;
import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
import org.apache.hadoop.hdfs.server.namenode.ha.HAState;
import org.apache.hadoop.ipc.StandbyException;
@InterfaceAudience.Private
public class BackupState extends HAState {
public BackupState() {
super(HAServiceState.STANDBY);
}
@Override // HAState
public void checkOperation(HAContext context, OperationCategory op)
throws StandbyException {
context.checkOperation(op);
}
@Override // HAState
public boolean shouldPopulateReplQueues() {
return false;
}
@Override // HAState
public void enterState(HAContext context) throws ServiceFailedException {
try {
context.startActiveServices();
} catch (IOException e) {
throw new ServiceFailedException("Failed to start backup services", e);
}
}
@Override // HAState
public void exitState(HAContext context) throws ServiceFailedException {
try {
context.stopActiveServices();
} catch (IOException e) {
throw new ServiceFailedException("Failed to stop backup services", e);
}
}
@Override // HAState
public void prepareToExitState(HAContext context) throws ServiceFailedException {
context.prepareToStopStandbyServices();
}
}
| 2,356 | 32.197183 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import com.google.common.base.Preconditions;
import org.apache.commons.io.Charsets;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.crypto.CipherSuite;
import org.apache.hadoop.crypto.CryptoProtocolVersion;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.FileEncryptionInfo;
import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.net.Node;
import org.apache.hadoop.net.NodeBase;
import org.apache.hadoop.util.ChunkedArrayList;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import static org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.CURRENT_STATE_ID;
import static org.apache.hadoop.util.Time.now;
class FSDirWriteFileOp {
private FSDirWriteFileOp() {}
static boolean unprotectedRemoveBlock(
FSDirectory fsd, String path, INodesInPath iip, INodeFile fileNode,
Block block) throws IOException {
// modify file-> block and blocksMap
// fileNode should be under construction
BlockInfoContiguousUnderConstruction uc = fileNode.removeLastBlock(block);
if (uc == null) {
return false;
}
fsd.getBlockManager().removeBlockFromMap(block);
if(NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("DIR* FSDirectory.removeBlock: "
+path+" with "+block
+" block is removed from the file system");
}
// update space consumed
fsd.updateCount(iip, 0, -fileNode.getPreferredBlockSize(),
fileNode.getPreferredBlockReplication(), true);
return true;
}
/**
* Persist the block list for the inode.
*/
static void persistBlocks(
FSDirectory fsd, String path, INodeFile file, boolean logRetryCache) {
assert fsd.getFSNamesystem().hasWriteLock();
Preconditions.checkArgument(file.isUnderConstruction());
fsd.getEditLog().logUpdateBlocks(path, file, logRetryCache);
if(NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("persistBlocks: " + path
+ " with " + file.getBlocks().length + " blocks is persisted to" +
" the file system");
}
}
static void abandonBlock(
FSDirectory fsd, FSPermissionChecker pc, ExtendedBlock b, long fileId,
String src, String holder) throws IOException {
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
src = fsd.resolvePath(pc, src, pathComponents);
final INode inode;
final INodesInPath iip;
if (fileId == HdfsConstants.GRANDFATHER_INODE_ID) {
// Older clients may not have given us an inode ID to work with.
// In this case, we have to try to resolve the path and hope it
// hasn't changed or been deleted since the file was opened for write.
iip = fsd.getINodesInPath(src, true);
inode = iip.getLastINode();
} else {
inode = fsd.getInode(fileId);
iip = INodesInPath.fromINode(inode);
if (inode != null) {
src = iip.getPath();
}
}
FSNamesystem fsn = fsd.getFSNamesystem();
final INodeFile file = fsn.checkLease(src, holder, inode, fileId);
Preconditions.checkState(file.isUnderConstruction());
Block localBlock = ExtendedBlock.getLocalBlock(b);
fsd.writeLock();
try {
// Remove the block from the pending creates list
if (!unprotectedRemoveBlock(fsd, src, iip, file, localBlock)) {
return;
}
} finally {
fsd.writeUnlock();
}
persistBlocks(fsd, src, file, false);
}
static void checkBlock(FSNamesystem fsn, ExtendedBlock block)
throws IOException {
String bpId = fsn.getBlockPoolId();
if (block != null && !bpId.equals(block.getBlockPoolId())) {
throw new IOException("Unexpected BlockPoolId " + block.getBlockPoolId()
+ " - expected " + bpId);
}
}
/**
* Part I of getAdditionalBlock().
* Analyze the state of the file under read lock to determine if the client
* can add a new block, detect potential retries, lease mismatches,
* and minimal replication of the penultimate block.
*
* Generate target DataNode locations for the new block,
* but do not create the new block yet.
*/
static ValidateAddBlockResult validateAddBlock(
FSNamesystem fsn, FSPermissionChecker pc,
String src, long fileId, String clientName,
ExtendedBlock previous, LocatedBlock[] onRetryBlock) throws IOException {
final long blockSize;
final int replication;
final byte storagePolicyID;
String clientMachine;
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
src = fsn.dir.resolvePath(pc, src, pathComponents);
FileState fileState = analyzeFileState(fsn, src, fileId, clientName,
previous, onRetryBlock);
final INodeFile pendingFile = fileState.inode;
// Check if the penultimate block is minimally replicated
if (!fsn.checkFileProgress(src, pendingFile, false)) {
throw new NotReplicatedYetException("Not replicated yet: " + src);
}
if (onRetryBlock[0] != null && onRetryBlock[0].getLocations().length > 0) {
// This is a retry. No need to generate new locations.
// Use the last block if it has locations.
return null;
}
if (pendingFile.getBlocks().length >= fsn.maxBlocksPerFile) {
throw new IOException("File has reached the limit on maximum number of"
+ " blocks (" + DFSConfigKeys.DFS_NAMENODE_MAX_BLOCKS_PER_FILE_KEY
+ "): " + pendingFile.getBlocks().length + " >= "
+ fsn.maxBlocksPerFile);
}
blockSize = pendingFile.getPreferredBlockSize();
clientMachine = pendingFile.getFileUnderConstructionFeature()
.getClientMachine();
replication = pendingFile.getFileReplication();
storagePolicyID = pendingFile.getStoragePolicyID();
return new ValidateAddBlockResult(blockSize, replication, storagePolicyID,
clientMachine);
}
static LocatedBlock makeLocatedBlock(FSNamesystem fsn, Block blk,
DatanodeStorageInfo[] locs, long offset) throws IOException {
LocatedBlock lBlk = BlockManager.newLocatedBlock(fsn.getExtendedBlock(blk),
locs, offset, false);
fsn.getFSDirectory().getBlockManager()
.setBlockToken(lBlk, BlockTokenIdentifier.AccessMode.WRITE);
return lBlk;
}
/**
* Part II of getAdditionalBlock().
* Should repeat the same analysis of the file state as in Part 1,
* but under the write lock.
* If the conditions still hold, then allocate a new block with
* the new targets, add it to the INode and to the BlocksMap.
*/
static LocatedBlock storeAllocatedBlock(FSNamesystem fsn, String src,
long fileId, String clientName, ExtendedBlock previous,
DatanodeStorageInfo[] targets) throws IOException {
long offset;
// Run the full analysis again, since things could have changed
// while chooseTarget() was executing.
LocatedBlock[] onRetryBlock = new LocatedBlock[1];
FileState fileState = analyzeFileState(fsn, src, fileId, clientName,
previous, onRetryBlock);
final INodeFile pendingFile = fileState.inode;
src = fileState.path;
if (onRetryBlock[0] != null) {
if (onRetryBlock[0].getLocations().length > 0) {
// This is a retry. Just return the last block if having locations.
return onRetryBlock[0];
} else {
// add new chosen targets to already allocated block and return
BlockInfo lastBlockInFile = pendingFile.getLastBlock();
((BlockInfoContiguousUnderConstruction) lastBlockInFile)
.setExpectedLocations(targets);
offset = pendingFile.computeFileSize();
return makeLocatedBlock(fsn, lastBlockInFile, targets, offset);
}
}
// commit the last block and complete it if it has minimum replicas
fsn.commitOrCompleteLastBlock(pendingFile, fileState.iip,
ExtendedBlock.getLocalBlock(previous));
// allocate new block, record block locations in INode.
Block newBlock = fsn.createNewBlock();
INodesInPath inodesInPath = INodesInPath.fromINode(pendingFile);
saveAllocatedBlock(fsn, src, inodesInPath, newBlock, targets);
persistNewBlock(fsn, src, pendingFile);
offset = pendingFile.computeFileSize();
// Return located block
return makeLocatedBlock(fsn, newBlock, targets, offset);
}
static DatanodeStorageInfo[] chooseTargetForNewBlock(
BlockManager bm, String src, DatanodeInfo[] excludedNodes, String[]
favoredNodes, ValidateAddBlockResult r) throws IOException {
Node clientNode = bm.getDatanodeManager()
.getDatanodeByHost(r.clientMachine);
if (clientNode == null) {
clientNode = getClientNode(bm, r.clientMachine);
}
Set<Node> excludedNodesSet = null;
if (excludedNodes != null) {
excludedNodesSet = new HashSet<>(excludedNodes.length);
Collections.addAll(excludedNodesSet, excludedNodes);
}
List<String> favoredNodesList = (favoredNodes == null) ? null
: Arrays.asList(favoredNodes);
// choose targets for the new block to be allocated.
return bm.chooseTarget4NewBlock(src, r.replication, clientNode,
excludedNodesSet, r.blockSize,
favoredNodesList, r.storagePolicyID);
}
/**
* Resolve clientmachine address to get a network location path
*/
static Node getClientNode(BlockManager bm, String clientMachine) {
List<String> hosts = new ArrayList<>(1);
hosts.add(clientMachine);
List<String> rName = bm.getDatanodeManager()
.resolveNetworkLocation(hosts);
Node clientNode = null;
if (rName != null) {
// Able to resolve clientMachine mapping.
// Create a temp node to findout the rack local nodes
clientNode = new NodeBase(rName.get(0) + NodeBase.PATH_SEPARATOR_STR
+ clientMachine);
}
return clientNode;
}
/**
* Create a new file or overwrite an existing file<br>
*
* Once the file is create the client then allocates a new block with the next
* call using {@link ClientProtocol#addBlock}.
* <p>
* For description of parameters and exceptions thrown see
* {@link ClientProtocol#create}
*/
static HdfsFileStatus startFile(
FSNamesystem fsn, FSPermissionChecker pc, String src,
PermissionStatus permissions, String holder, String clientMachine,
EnumSet<CreateFlag> flag, boolean createParent,
short replication, long blockSize,
EncryptionKeyInfo ezInfo, INode.BlocksMapUpdateInfo toRemoveBlocks,
boolean logRetryEntry)
throws IOException {
assert fsn.hasWriteLock();
boolean create = flag.contains(CreateFlag.CREATE);
boolean overwrite = flag.contains(CreateFlag.OVERWRITE);
boolean isLazyPersist = flag.contains(CreateFlag.LAZY_PERSIST);
CipherSuite suite = null;
CryptoProtocolVersion version = null;
KeyProviderCryptoExtension.EncryptedKeyVersion edek = null;
if (ezInfo != null) {
edek = ezInfo.edek;
suite = ezInfo.suite;
version = ezInfo.protocolVersion;
}
boolean isRawPath = FSDirectory.isReservedRawName(src);
FSDirectory fsd = fsn.getFSDirectory();
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
src = fsd.resolvePath(pc, src, pathComponents);
INodesInPath iip = fsd.getINodesInPath4Write(src);
// Verify that the destination does not exist as a directory already.
final INode inode = iip.getLastINode();
if (inode != null && inode.isDirectory()) {
throw new FileAlreadyExistsException(src +
" already exists as a directory");
}
final INodeFile myFile = INodeFile.valueOf(inode, src, true);
if (fsd.isPermissionEnabled()) {
if (overwrite && myFile != null) {
fsd.checkPathAccess(pc, iip, FsAction.WRITE);
}
/*
* To overwrite existing file, need to check 'w' permission
* of parent (equals to ancestor in this case)
*/
fsd.checkAncestorAccess(pc, iip, FsAction.WRITE);
}
if (!createParent) {
fsd.verifyParentDir(iip, src);
}
if (myFile == null && !create) {
throw new FileNotFoundException("Can't overwrite non-existent " +
src + " for client " + clientMachine);
}
FileEncryptionInfo feInfo = null;
final EncryptionZone zone = fsd.getEZForPath(iip);
if (zone != null) {
// The path is now within an EZ, but we're missing encryption parameters
if (suite == null || edek == null) {
throw new RetryStartFileException();
}
// Path is within an EZ and we have provided encryption parameters.
// Make sure that the generated EDEK matches the settings of the EZ.
final String ezKeyName = zone.getKeyName();
if (!ezKeyName.equals(edek.getEncryptionKeyName())) {
throw new RetryStartFileException();
}
feInfo = new FileEncryptionInfo(suite, version,
edek.getEncryptedKeyVersion().getMaterial(),
edek.getEncryptedKeyIv(),
ezKeyName, edek.getEncryptionKeyVersionName());
}
if (myFile != null) {
if (overwrite) {
List<INode> toRemoveINodes = new ChunkedArrayList<>();
List<Long> toRemoveUCFiles = new ChunkedArrayList<>();
long ret = FSDirDeleteOp.delete(fsd, iip, toRemoveBlocks,
toRemoveINodes, toRemoveUCFiles, now());
if (ret >= 0) {
iip = INodesInPath.replace(iip, iip.length() - 1, null);
FSDirDeleteOp.incrDeletedFileCount(ret);
fsn.removeLeasesAndINodes(toRemoveUCFiles, toRemoveINodes, true);
}
} else {
// If lease soft limit time is expired, recover the lease
fsn.recoverLeaseInternal(FSNamesystem.RecoverLeaseOp.CREATE_FILE, iip,
src, holder, clientMachine, false);
throw new FileAlreadyExistsException(src + " for client " +
clientMachine + " already exists");
}
}
fsn.checkFsObjectLimit();
INodeFile newNode = null;
Map.Entry<INodesInPath, String> parent = FSDirMkdirOp
.createAncestorDirectories(fsd, iip, permissions);
if (parent != null) {
iip = addFile(fsd, parent.getKey(), parent.getValue(), permissions,
replication, blockSize, holder, clientMachine);
newNode = iip != null ? iip.getLastINode().asFile() : null;
}
if (newNode == null) {
throw new IOException("Unable to add " + src + " to namespace");
}
fsn.leaseManager.addLease(
newNode.getFileUnderConstructionFeature().getClientName(),
newNode.getId());
if (feInfo != null) {
fsd.setFileEncryptionInfo(src, feInfo);
newNode = fsd.getInode(newNode.getId()).asFile();
}
setNewINodeStoragePolicy(fsd.getBlockManager(), newNode, iip,
isLazyPersist);
fsd.getEditLog().logOpenFile(src, newNode, overwrite, logRetryEntry);
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("DIR* NameSystem.startFile: added " +
src + " inode " + newNode.getId() + " " + holder);
}
return FSDirStatAndListingOp.getFileInfo(fsd, src, false, isRawPath, true);
}
static EncryptionKeyInfo getEncryptionKeyInfo(FSNamesystem fsn,
FSPermissionChecker pc, String src,
CryptoProtocolVersion[] supportedVersions)
throws IOException {
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
FSDirectory fsd = fsn.getFSDirectory();
src = fsd.resolvePath(pc, src, pathComponents);
INodesInPath iip = fsd.getINodesInPath4Write(src);
// Nothing to do if the path is not within an EZ
final EncryptionZone zone = fsd.getEZForPath(iip);
if (zone == null) {
return null;
}
CryptoProtocolVersion protocolVersion = fsn.chooseProtocolVersion(
zone, supportedVersions);
CipherSuite suite = zone.getSuite();
String ezKeyName = zone.getKeyName();
Preconditions.checkNotNull(protocolVersion);
Preconditions.checkNotNull(suite);
Preconditions.checkArgument(!suite.equals(CipherSuite.UNKNOWN),
"Chose an UNKNOWN CipherSuite!");
Preconditions.checkNotNull(ezKeyName);
return new EncryptionKeyInfo(protocolVersion, suite, ezKeyName);
}
static INodeFile addFileForEditLog(
FSDirectory fsd, long id, INodesInPath existing, byte[] localName,
PermissionStatus permissions, List<AclEntry> aclEntries,
List<XAttr> xAttrs, short replication, long modificationTime, long atime,
long preferredBlockSize, boolean underConstruction, String clientName,
String clientMachine, byte storagePolicyId) {
final INodeFile newNode;
assert fsd.hasWriteLock();
if (underConstruction) {
newNode = newINodeFile(id, permissions, modificationTime,
modificationTime, replication,
preferredBlockSize,
storagePolicyId);
newNode.toUnderConstruction(clientName, clientMachine);
} else {
newNode = newINodeFile(id, permissions, modificationTime,
atime, replication,
preferredBlockSize,
storagePolicyId);
}
newNode.setLocalName(localName);
try {
INodesInPath iip = fsd.addINode(existing, newNode);
if (iip != null) {
if (aclEntries != null) {
AclStorage.updateINodeAcl(newNode, aclEntries, CURRENT_STATE_ID);
}
if (xAttrs != null) {
XAttrStorage.updateINodeXAttrs(newNode, xAttrs, CURRENT_STATE_ID);
}
return newNode;
}
} catch (IOException e) {
if(NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug(
"DIR* FSDirectory.unprotectedAddFile: exception when add "
+ existing.getPath() + " to the file system", e);
}
}
return null;
}
/**
* Add a block to the file. Returns a reference to the added block.
*/
private static BlockInfo addBlock(
FSDirectory fsd, String path, INodesInPath inodesInPath, Block block,
DatanodeStorageInfo[] targets) throws IOException {
fsd.writeLock();
try {
final INodeFile fileINode = inodesInPath.getLastINode().asFile();
Preconditions.checkState(fileINode.isUnderConstruction());
// check quota limits and updated space consumed
fsd.updateCount(inodesInPath, 0, fileINode.getPreferredBlockSize(),
fileINode.getPreferredBlockReplication(), true);
// associate new last block for the file
BlockInfoContiguousUnderConstruction blockInfo =
new BlockInfoContiguousUnderConstruction(
block,
fileINode.getFileReplication(),
HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION,
targets);
fsd.getBlockManager().addBlockCollection(blockInfo, fileINode);
fileINode.addBlock(blockInfo);
if(NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("DIR* FSDirectory.addBlock: "
+ path + " with " + block
+ " block is added to the in-memory "
+ "file system");
}
return blockInfo;
} finally {
fsd.writeUnlock();
}
}
/**
* Add the given filename to the fs.
* @return the new INodesInPath instance that contains the new INode
*/
private static INodesInPath addFile(
FSDirectory fsd, INodesInPath existing, String localName,
PermissionStatus permissions, short replication, long preferredBlockSize,
String clientName, String clientMachine)
throws IOException {
long modTime = now();
INodeFile newNode = newINodeFile(fsd.allocateNewInodeId(), permissions,
modTime, modTime, replication, preferredBlockSize);
newNode.setLocalName(localName.getBytes(Charsets.UTF_8));
newNode.toUnderConstruction(clientName, clientMachine);
INodesInPath newiip;
fsd.writeLock();
try {
newiip = fsd.addINode(existing, newNode);
} finally {
fsd.writeUnlock();
}
if (newiip == null) {
NameNode.stateChangeLog.info("DIR* addFile: failed to add " +
existing.getPath() + "/" + localName);
return null;
}
if(NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("DIR* addFile: " + localName + " is added");
}
return newiip;
}
private static FileState analyzeFileState(
FSNamesystem fsn, String src, long fileId, String clientName,
ExtendedBlock previous, LocatedBlock[] onRetryBlock)
throws IOException {
assert fsn.hasReadLock();
checkBlock(fsn, previous);
onRetryBlock[0] = null;
fsn.checkNameNodeSafeMode("Cannot add block to " + src);
// have we exceeded the configured limit of fs objects.
fsn.checkFsObjectLimit();
Block previousBlock = ExtendedBlock.getLocalBlock(previous);
final INode inode;
final INodesInPath iip;
if (fileId == HdfsConstants.GRANDFATHER_INODE_ID) {
// Older clients may not have given us an inode ID to work with.
// In this case, we have to try to resolve the path and hope it
// hasn't changed or been deleted since the file was opened for write.
iip = fsn.dir.getINodesInPath4Write(src);
inode = iip.getLastINode();
} else {
// Newer clients pass the inode ID, so we can just get the inode
// directly.
inode = fsn.dir.getInode(fileId);
iip = INodesInPath.fromINode(inode);
if (inode != null) {
src = iip.getPath();
}
}
final INodeFile file = fsn.checkLease(src, clientName, inode, fileId);
BlockInfo lastBlockInFile = file.getLastBlock();
if (!Block.matchingIdAndGenStamp(previousBlock, lastBlockInFile)) {
// The block that the client claims is the current last block
// doesn't match up with what we think is the last block. There are
// four possibilities:
// 1) This is the first block allocation of an append() pipeline
// which started appending exactly at or exceeding the block boundary.
// In this case, the client isn't passed the previous block,
// so it makes the allocateBlock() call with previous=null.
// We can distinguish this since the last block of the file
// will be exactly a full block.
// 2) This is a retry from a client that missed the response of a
// prior getAdditionalBlock() call, perhaps because of a network
// timeout, or because of an HA failover. In that case, we know
// by the fact that the client is re-issuing the RPC that it
// never began to write to the old block. Hence it is safe to
// to return the existing block.
// 3) This is an entirely bogus request/bug -- we should error out
// rather than potentially appending a new block with an empty
// one in the middle, etc
// 4) This is a retry from a client that timed out while
// the prior getAdditionalBlock() is still being processed,
// currently working on chooseTarget().
// There are no means to distinguish between the first and
// the second attempts in Part I, because the first one hasn't
// changed the namesystem state yet.
// We run this analysis again in Part II where case 4 is impossible.
BlockInfo penultimateBlock = file.getPenultimateBlock();
if (previous == null &&
lastBlockInFile != null &&
lastBlockInFile.getNumBytes() >= file.getPreferredBlockSize() &&
lastBlockInFile.isComplete()) {
// Case 1
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug(
"BLOCK* NameSystem.allocateBlock: handling block allocation" +
" writing to a file with a complete previous block: src=" +
src + " lastBlock=" + lastBlockInFile);
}
} else if (Block.matchingIdAndGenStamp(penultimateBlock, previousBlock)) {
if (lastBlockInFile.getNumBytes() != 0) {
throw new IOException(
"Request looked like a retry to allocate block " +
lastBlockInFile + " but it already contains " +
lastBlockInFile.getNumBytes() + " bytes");
}
// Case 2
// Return the last block.
NameNode.stateChangeLog.info("BLOCK* allocateBlock: caught retry for " +
"allocation of a new block in " + src + ". Returning previously" +
" allocated block " + lastBlockInFile);
long offset = file.computeFileSize();
BlockInfoContiguousUnderConstruction lastBlockUC =
(BlockInfoContiguousUnderConstruction) lastBlockInFile;
onRetryBlock[0] = makeLocatedBlock(fsn, lastBlockInFile,
lastBlockUC.getExpectedStorageLocations(), offset);
return new FileState(file, src, iip);
} else {
// Case 3
throw new IOException("Cannot allocate block in " + src + ": " +
"passed 'previous' block " + previous + " does not match actual " +
"last block in file " + lastBlockInFile);
}
}
return new FileState(file, src, iip);
}
static boolean completeFile(FSNamesystem fsn, FSPermissionChecker pc,
final String srcArg, String holder, ExtendedBlock last, long fileId)
throws IOException {
String src = srcArg;
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("DIR* NameSystem.completeFile: " +
src + " for " + holder);
}
checkBlock(fsn, last);
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
src = fsn.dir.resolvePath(pc, src, pathComponents);
boolean success = completeFileInternal(fsn, src, holder,
ExtendedBlock.getLocalBlock(last),
fileId);
if (success) {
NameNode.stateChangeLog.info("DIR* completeFile: " + srcArg
+ " is closed by " + holder);
}
return success;
}
private static boolean completeFileInternal(
FSNamesystem fsn, String src, String holder, Block last, long fileId)
throws IOException {
assert fsn.hasWriteLock();
final INodeFile pendingFile;
final INodesInPath iip;
INode inode = null;
try {
if (fileId == HdfsConstants.GRANDFATHER_INODE_ID) {
// Older clients may not have given us an inode ID to work with.
// In this case, we have to try to resolve the path and hope it
// hasn't changed or been deleted since the file was opened for write.
iip = fsn.dir.getINodesInPath(src, true);
inode = iip.getLastINode();
} else {
inode = fsn.dir.getInode(fileId);
iip = INodesInPath.fromINode(inode);
if (inode != null) {
src = iip.getPath();
}
}
pendingFile = fsn.checkLease(src, holder, inode, fileId);
} catch (LeaseExpiredException lee) {
if (inode != null && inode.isFile() &&
!inode.asFile().isUnderConstruction()) {
// This could be a retry RPC - i.e the client tried to close
// the file, but missed the RPC response. Thus, it is trying
// again to close the file. If the file still exists and
// the client's view of the last block matches the actual
// last block, then we'll treat it as a successful close.
// See HDFS-3031.
final Block realLastBlock = inode.asFile().getLastBlock();
if (Block.matchingIdAndGenStamp(last, realLastBlock)) {
NameNode.stateChangeLog.info("DIR* completeFile: " +
"request from " + holder + " to complete inode " + fileId +
"(" + src + ") which is already closed. But, it appears to be " +
"an RPC retry. Returning success");
return true;
}
}
throw lee;
}
// Check the state of the penultimate block. It should be completed
// before attempting to complete the last one.
if (!fsn.checkFileProgress(src, pendingFile, false)) {
return false;
}
// commit the last block and complete it if it has minimum replicas
fsn.commitOrCompleteLastBlock(pendingFile, iip, last);
if (!fsn.checkFileProgress(src, pendingFile, true)) {
return false;
}
fsn.finalizeINodeFileUnderConstruction(src, pendingFile,
Snapshot.CURRENT_STATE_ID);
return true;
}
private static INodeFile newINodeFile(
long id, PermissionStatus permissions, long mtime, long atime,
short replication, long preferredBlockSize, byte storagePolicyId) {
return new INodeFile(id, null, permissions, mtime, atime,
BlockInfo.EMPTY_ARRAY, replication, preferredBlockSize,
storagePolicyId);
}
private static INodeFile newINodeFile(long id, PermissionStatus permissions,
long mtime, long atime, short replication, long preferredBlockSize) {
return newINodeFile(id, permissions, mtime, atime, replication,
preferredBlockSize, (byte)0);
}
/**
* Persist the new block (the last block of the given file).
*/
private static void persistNewBlock(
FSNamesystem fsn, String path, INodeFile file) {
Preconditions.checkArgument(file.isUnderConstruction());
fsn.getEditLog().logAddBlock(path, file);
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("persistNewBlock: "
+ path + " with new block " + file.getLastBlock().toString()
+ ", current total block count is " + file.getBlocks().length);
}
}
/**
* Save allocated block at the given pending filename
*
* @param fsn FSNamesystem
* @param src path to the file
* @param inodesInPath representing each of the components of src.
* The last INode is the INode for {@code src} file.
* @param newBlock newly allocated block to be save
* @param targets target datanodes where replicas of the new block is placed
* @throws QuotaExceededException If addition of block exceeds space quota
*/
private static void saveAllocatedBlock(
FSNamesystem fsn, String src, INodesInPath inodesInPath, Block newBlock,
DatanodeStorageInfo[] targets)
throws IOException {
assert fsn.hasWriteLock();
BlockInfo b = addBlock(fsn.dir, src, inodesInPath, newBlock,
targets);
NameNode.stateChangeLog.info("BLOCK* allocate " + b + " for " + src);
DatanodeStorageInfo.incrementBlocksScheduled(targets);
}
private static void setNewINodeStoragePolicy(BlockManager bm, INodeFile
inode, INodesInPath iip, boolean isLazyPersist)
throws IOException {
if (isLazyPersist) {
BlockStoragePolicy lpPolicy =
bm.getStoragePolicy("LAZY_PERSIST");
// Set LAZY_PERSIST storage policy if the flag was passed to
// CreateFile.
if (lpPolicy == null) {
throw new HadoopIllegalArgumentException(
"The LAZY_PERSIST storage policy has been disabled " +
"by the administrator.");
}
inode.setStoragePolicyID(lpPolicy.getId(),
iip.getLatestSnapshotId());
} else {
BlockStoragePolicy effectivePolicy =
bm.getStoragePolicy(inode.getStoragePolicyID());
if (effectivePolicy != null &&
effectivePolicy.isCopyOnCreateFile()) {
// Copy effective policy from ancestor directory to current file.
inode.setStoragePolicyID(effectivePolicy.getId(),
iip.getLatestSnapshotId());
}
}
}
private static class FileState {
final INodeFile inode;
final String path;
final INodesInPath iip;
FileState(INodeFile inode, String fullPath, INodesInPath iip) {
this.inode = inode;
this.path = fullPath;
this.iip = iip;
}
}
static class ValidateAddBlockResult {
final long blockSize;
final int replication;
final byte storagePolicyID;
final String clientMachine;
ValidateAddBlockResult(
long blockSize, int replication, byte storagePolicyID,
String clientMachine) {
this.blockSize = blockSize;
this.replication = replication;
this.storagePolicyID = storagePolicyID;
this.clientMachine = clientMachine;
}
}
static class EncryptionKeyInfo {
final CryptoProtocolVersion protocolVersion;
final CipherSuite suite;
final String ezKeyName;
KeyProviderCryptoExtension.EncryptedKeyVersion edek;
EncryptionKeyInfo(
CryptoProtocolVersion protocolVersion, CipherSuite suite,
String ezKeyName) {
this.protocolVersion = protocolVersion;
this.suite = suite;
this.ezKeyName = ezKeyName;
}
}
}
| 35,766 | 39.460407 | 90 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupInputStream.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.DataInputStream;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import com.google.common.base.Preconditions;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
/**
* An implementation of the abstract class {@link EditLogInputStream},
* which is used to updates HDFS meta-data state on a backup node.
*
* @see org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol#journal
* (org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration,
* int, int, byte[])
*/
class EditLogBackupInputStream extends EditLogInputStream {
final String address; // sender address
private final ByteBufferInputStream inner;
private DataInputStream in;
private FSEditLogOp.Reader reader = null;
private FSEditLogLoader.PositionTrackingInputStream tracker = null;
private int version = 0;
/**
* A ByteArrayInputStream, which lets modify the underlying byte array.
*/
private static class ByteBufferInputStream extends ByteArrayInputStream {
ByteBufferInputStream() {
super(new byte[0]);
}
void setData(byte[] newBytes) {
super.buf = newBytes;
super.count = newBytes == null ? 0 : newBytes.length;
super.mark = 0;
reset();
}
/**
* Number of bytes read from the stream so far.
*/
int length() {
return count;
}
}
EditLogBackupInputStream(String name) throws IOException {
address = name;
inner = new ByteBufferInputStream();
in = null;
reader = null;
}
@Override
public String getName() {
return address;
}
@Override
protected FSEditLogOp nextOp() throws IOException {
Preconditions.checkState(reader != null,
"Must call setBytes() before readOp()");
return reader.readOp(false);
}
@Override
protected FSEditLogOp nextValidOp() {
try {
return reader.readOp(true);
} catch (IOException e) {
throw new RuntimeException("got unexpected IOException " + e, e);
}
}
@Override
public int getVersion(boolean verifyVersion) throws IOException {
return this.version;
}
@Override
public long getPosition() {
return tracker.getPos();
}
@Override
public void close() throws IOException {
in.close();
}
@Override
public long length() throws IOException {
// file size + size of both buffers
return inner.length();
}
void setBytes(byte[] newBytes, int version) throws IOException {
inner.setData(newBytes);
tracker = new FSEditLogLoader.PositionTrackingInputStream(inner);
in = new DataInputStream(tracker);
this.version = version;
reader = new FSEditLogOp.Reader(in, tracker, version);
}
void clear() throws IOException {
setBytes(null, 0);
reader = null;
this.version = 0;
}
@Override
public long getFirstTxId() {
return HdfsServerConstants.INVALID_TXID;
}
@Override
public long getLastTxId() {
return HdfsServerConstants.INVALID_TXID;
}
@Override
public boolean isInProgress() {
return true;
}
@Override
public void setMaxOpSize(int maxOpSize) {
reader.setMaxOpSize(maxOpSize);
}
@Override
public boolean isLocalLog() {
return true;
}
}
| 4,048 | 24.955128 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CheckpointSignature.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
import com.google.common.collect.ComparisonChain;
/**
* A unique signature intended to identify checkpoint transactions.
*/
@InterfaceAudience.Private
public class CheckpointSignature extends StorageInfo
implements Comparable<CheckpointSignature> {
private static final String FIELD_SEPARATOR = ":";
private static final int NUM_FIELDS = 7;
String blockpoolID = "";
long mostRecentCheckpointTxId;
long curSegmentTxId;
CheckpointSignature(FSImage fsImage) {
super(fsImage.getStorage());
blockpoolID = fsImage.getBlockPoolID();
mostRecentCheckpointTxId = fsImage.getStorage().getMostRecentCheckpointTxId();
curSegmentTxId = fsImage.getEditLog().getCurSegmentTxId();
}
CheckpointSignature(String str) {
super(NodeType.NAME_NODE);
String[] fields = str.split(FIELD_SEPARATOR);
assert fields.length == NUM_FIELDS :
"Must be " + NUM_FIELDS + " fields in CheckpointSignature";
int i = 0;
layoutVersion = Integer.parseInt(fields[i++]);
namespaceID = Integer.parseInt(fields[i++]);
cTime = Long.parseLong(fields[i++]);
mostRecentCheckpointTxId = Long.parseLong(fields[i++]);
curSegmentTxId = Long.parseLong(fields[i++]);
clusterID = fields[i++];
blockpoolID = fields[i];
}
public CheckpointSignature(StorageInfo info, String blockpoolID,
long mostRecentCheckpointTxId, long curSegmentTxId) {
super(info);
this.blockpoolID = blockpoolID;
this.mostRecentCheckpointTxId = mostRecentCheckpointTxId;
this.curSegmentTxId = curSegmentTxId;
}
/**
* Get the cluster id from CheckpointSignature
* @return the cluster id
*/
@Override
public String getClusterID() {
return clusterID;
}
/**
* Get the block pool id from CheckpointSignature
* @return the block pool id
*/
public String getBlockpoolID() {
return blockpoolID;
}
public long getMostRecentCheckpointTxId() {
return mostRecentCheckpointTxId;
}
public long getCurSegmentTxId() {
return curSegmentTxId;
}
/**
* Set the block pool id of CheckpointSignature.
*
* @param blockpoolID the new blockpool id
*/
public void setBlockpoolID(String blockpoolID) {
this.blockpoolID = blockpoolID;
}
@Override
public String toString() {
return String.valueOf(layoutVersion) + FIELD_SEPARATOR
+ String.valueOf(namespaceID) + FIELD_SEPARATOR
+ String.valueOf(cTime) + FIELD_SEPARATOR
+ String.valueOf(mostRecentCheckpointTxId) + FIELD_SEPARATOR
+ String.valueOf(curSegmentTxId) + FIELD_SEPARATOR
+ clusterID + FIELD_SEPARATOR
+ blockpoolID ;
}
boolean storageVersionMatches(StorageInfo si) throws IOException {
return (layoutVersion == si.layoutVersion) && (cTime == si.cTime);
}
boolean isSameCluster(FSImage si) {
return namespaceID == si.getStorage().namespaceID &&
clusterID.equals(si.getClusterID()) &&
blockpoolID.equals(si.getBlockPoolID());
}
boolean namespaceIdMatches(FSImage si) {
return namespaceID == si.getStorage().namespaceID;
}
void validateStorageInfo(FSImage si) throws IOException {
if (!isSameCluster(si)
|| !storageVersionMatches(si.getStorage())) {
throw new IOException("Inconsistent checkpoint fields.\n"
+ "LV = " + layoutVersion + " namespaceID = " + namespaceID
+ " cTime = " + cTime
+ " ; clusterId = " + clusterID
+ " ; blockpoolId = " + blockpoolID
+ ".\nExpecting respectively: "
+ si.getStorage().layoutVersion + "; "
+ si.getStorage().namespaceID + "; " + si.getStorage().cTime
+ "; " + si.getClusterID() + "; "
+ si.getBlockPoolID() + ".");
}
}
//
// Comparable interface
//
@Override
public int compareTo(CheckpointSignature o) {
return ComparisonChain.start()
.compare(layoutVersion, o.layoutVersion)
.compare(namespaceID, o.namespaceID)
.compare(cTime, o.cTime)
.compare(mostRecentCheckpointTxId, o.mostRecentCheckpointTxId)
.compare(curSegmentTxId, o.curSegmentTxId)
.compare(clusterID, o.clusterID)
.compare(blockpoolID, o.blockpoolID)
.result();
}
@Override
public boolean equals(Object o) {
if (!(o instanceof CheckpointSignature)) {
return false;
}
return compareTo((CheckpointSignature)o) == 0;
}
@Override
public int hashCode() {
return layoutVersion ^ namespaceID ^
(int)(cTime ^ mostRecentCheckpointTxId ^ curSegmentTxId)
^ clusterID.hashCode() ^ blockpoolID.hashCode();
}
}
| 5,683 | 30.932584 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.HashMap;
import java.util.Map;
import javax.servlet.ServletContext;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress;
import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
import org.apache.hadoop.hdfs.web.resources.Param;
import org.apache.hadoop.hdfs.web.resources.UserParam;
import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.http.HttpServer2;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
/**
* Encapsulates the HTTP server started by the NameNode.
*/
@InterfaceAudience.Private
public class NameNodeHttpServer {
private HttpServer2 httpServer;
private final Configuration conf;
private final NameNode nn;
private InetSocketAddress httpAddress;
private InetSocketAddress httpsAddress;
private final InetSocketAddress bindAddress;
public static final String NAMENODE_ADDRESS_ATTRIBUTE_KEY = "name.node.address";
public static final String FSIMAGE_ATTRIBUTE_KEY = "name.system.image";
protected static final String NAMENODE_ATTRIBUTE_KEY = "name.node";
public static final String STARTUP_PROGRESS_ATTRIBUTE_KEY = "startup.progress";
NameNodeHttpServer(Configuration conf, NameNode nn,
InetSocketAddress bindAddress) {
this.conf = conf;
this.nn = nn;
this.bindAddress = bindAddress;
}
private void initWebHdfs(Configuration conf) throws IOException {
if (WebHdfsFileSystem.isEnabled(conf)) {
// set user pattern based on configuration file
UserParam.setUserPattern(conf.get(
DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY,
DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT));
// add authentication filter for webhdfs
final String className = conf.get(
DFSConfigKeys.DFS_WEBHDFS_AUTHENTICATION_FILTER_KEY,
DFSConfigKeys.DFS_WEBHDFS_AUTHENTICATION_FILTER_DEFAULT);
final String name = className;
final String pathSpec = WebHdfsFileSystem.PATH_PREFIX + "/*";
Map<String, String> params = getAuthFilterParams(conf);
HttpServer2.defineFilter(httpServer.getWebAppContext(), name, className,
params, new String[] { pathSpec });
HttpServer2.LOG.info("Added filter '" + name + "' (class=" + className
+ ")");
// add webhdfs packages
httpServer.addJerseyResourcePackage(NamenodeWebHdfsMethods.class
.getPackage().getName() + ";" + Param.class.getPackage().getName(),
pathSpec);
}
}
/**
* @see DFSUtil#getHttpPolicy(org.apache.hadoop.conf.Configuration)
* for information related to the different configuration options and
* Http Policy is decided.
*/
void start() throws IOException {
HttpConfig.Policy policy = DFSUtil.getHttpPolicy(conf);
final String infoHost = bindAddress.getHostName();
final InetSocketAddress httpAddr = bindAddress;
final String httpsAddrString = conf.getTrimmed(
DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY,
DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT);
InetSocketAddress httpsAddr = NetUtils.createSocketAddr(httpsAddrString);
if (httpsAddr != null) {
// If DFS_NAMENODE_HTTPS_BIND_HOST_KEY exists then it overrides the
// host name portion of DFS_NAMENODE_HTTPS_ADDRESS_KEY.
final String bindHost =
conf.getTrimmed(DFSConfigKeys.DFS_NAMENODE_HTTPS_BIND_HOST_KEY);
if (bindHost != null && !bindHost.isEmpty()) {
httpsAddr = new InetSocketAddress(bindHost, httpsAddr.getPort());
}
}
HttpServer2.Builder builder = DFSUtil.httpServerTemplateForNNAndJN(conf,
httpAddr, httpsAddr, "hdfs",
DFSConfigKeys.DFS_NAMENODE_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY,
DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY);
httpServer = builder.build();
if (policy.isHttpsEnabled()) {
// assume same ssl port for all datanodes
InetSocketAddress datanodeSslPort = NetUtils.createSocketAddr(conf.getTrimmed(
DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, infoHost + ":"
+ DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT));
httpServer.setAttribute(DFSConfigKeys.DFS_DATANODE_HTTPS_PORT_KEY,
datanodeSslPort.getPort());
}
initWebHdfs(conf);
httpServer.setAttribute(NAMENODE_ATTRIBUTE_KEY, nn);
httpServer.setAttribute(JspHelper.CURRENT_CONF, conf);
setupServlets(httpServer, conf);
httpServer.start();
int connIdx = 0;
if (policy.isHttpEnabled()) {
httpAddress = httpServer.getConnectorAddress(connIdx++);
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY,
NetUtils.getHostPortString(httpAddress));
}
if (policy.isHttpsEnabled()) {
httpsAddress = httpServer.getConnectorAddress(connIdx);
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY,
NetUtils.getHostPortString(httpsAddress));
}
}
private Map<String, String> getAuthFilterParams(Configuration conf)
throws IOException {
Map<String, String> params = new HashMap<String, String>();
String principalInConf = conf
.get(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY);
if (principalInConf != null && !principalInConf.isEmpty()) {
params
.put(
DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY,
SecurityUtil.getServerPrincipal(principalInConf,
bindAddress.getHostName()));
} else if (UserGroupInformation.isSecurityEnabled()) {
HttpServer2.LOG.error(
"WebHDFS and security are enabled, but configuration property '" +
DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY +
"' is not set.");
}
String httpKeytab = conf.get(DFSUtil.getSpnegoKeytabKey(conf,
DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY));
if (httpKeytab != null && !httpKeytab.isEmpty()) {
params.put(
DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY,
httpKeytab);
} else if (UserGroupInformation.isSecurityEnabled()) {
HttpServer2.LOG.error(
"WebHDFS and security are enabled, but configuration property '" +
DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY +
"' is not set.");
}
String anonymousAllowed = conf
.get(DFSConfigKeys.DFS_WEB_AUTHENTICATION_SIMPLE_ANONYMOUS_ALLOWED);
if (anonymousAllowed != null && !anonymousAllowed.isEmpty()) {
params.put(
DFSConfigKeys.DFS_WEB_AUTHENTICATION_SIMPLE_ANONYMOUS_ALLOWED,
anonymousAllowed);
}
return params;
}
void stop() throws Exception {
if (httpServer != null) {
httpServer.stop();
}
}
InetSocketAddress getHttpAddress() {
return httpAddress;
}
InetSocketAddress getHttpsAddress() {
return httpsAddress;
}
/**
* Sets fsimage for use by servlets.
*
* @param fsImage FSImage to set
*/
void setFSImage(FSImage fsImage) {
httpServer.setAttribute(FSIMAGE_ATTRIBUTE_KEY, fsImage);
}
/**
* Sets address of namenode for use by servlets.
*
* @param nameNodeAddress InetSocketAddress to set
*/
void setNameNodeAddress(InetSocketAddress nameNodeAddress) {
httpServer.setAttribute(NAMENODE_ADDRESS_ATTRIBUTE_KEY,
NetUtils.getConnectAddress(nameNodeAddress));
}
/**
* Sets startup progress of namenode for use by servlets.
*
* @param prog StartupProgress to set
*/
void setStartupProgress(StartupProgress prog) {
httpServer.setAttribute(STARTUP_PROGRESS_ATTRIBUTE_KEY, prog);
}
private static void setupServlets(HttpServer2 httpServer, Configuration conf) {
httpServer.addInternalServlet("startupProgress",
StartupProgressServlet.PATH_SPEC, StartupProgressServlet.class);
httpServer.addInternalServlet("getDelegationToken",
GetDelegationTokenServlet.PATH_SPEC,
GetDelegationTokenServlet.class, true);
httpServer.addInternalServlet("renewDelegationToken",
RenewDelegationTokenServlet.PATH_SPEC,
RenewDelegationTokenServlet.class, true);
httpServer.addInternalServlet("cancelDelegationToken",
CancelDelegationTokenServlet.PATH_SPEC,
CancelDelegationTokenServlet.class, true);
httpServer.addInternalServlet("fsck", "/fsck", FsckServlet.class,
true);
httpServer.addInternalServlet("imagetransfer", ImageServlet.PATH_SPEC,
ImageServlet.class, true);
httpServer.addInternalServlet("listPaths", "/listPaths/*",
ListPathsServlet.class, false);
httpServer.addInternalServlet("data", "/data/*",
FileDataServlet.class, false);
httpServer.addInternalServlet("checksum", "/fileChecksum/*",
FileChecksumServlets.RedirectServlet.class, false);
httpServer.addInternalServlet("contentSummary", "/contentSummary/*",
ContentSummaryServlet.class, false);
}
static FSImage getFsImageFromContext(ServletContext context) {
return (FSImage)context.getAttribute(FSIMAGE_ATTRIBUTE_KEY);
}
public static NameNode getNameNodeFromContext(ServletContext context) {
return (NameNode)context.getAttribute(NAMENODE_ATTRIBUTE_KEY);
}
static Configuration getConfFromContext(ServletContext context) {
return (Configuration)context.getAttribute(JspHelper.CURRENT_CONF);
}
public static InetSocketAddress getNameNodeAddressFromContext(
ServletContext context) {
return (InetSocketAddress)context.getAttribute(
NAMENODE_ADDRESS_ATTRIBUTE_KEY);
}
/**
* Returns StartupProgress associated with ServletContext.
*
* @param context ServletContext to get
* @return StartupProgress associated with context
*/
static StartupProgress getStartupProgressFromContext(
ServletContext context) {
return (StartupProgress)context.getAttribute(STARTUP_PROGRESS_ATTRIBUTE_KEY);
}
}
| 11,235 | 37.088136 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import com.google.common.base.Preconditions;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.protocol.SnapshotException;
import java.io.IOException;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Set;
import java.util.List;
import static org.apache.hadoop.util.Time.now;
/**
* Restrictions for a concat operation:
* <pre>
* 1. the src file and the target file are in the same dir
* 2. all the source files are not in snapshot
* 3. any source file cannot be the same with the target file
* 4. source files cannot be under construction or empty
* 5. source file's preferred block size cannot be greater than the target file
* </pre>
*/
class FSDirConcatOp {
static HdfsFileStatus concat(FSDirectory fsd, String target, String[] srcs,
boolean logRetryCache) throws IOException {
Preconditions.checkArgument(!target.isEmpty(), "Target file name is empty");
Preconditions.checkArgument(srcs != null && srcs.length > 0,
"No sources given");
assert srcs != null;
if (FSDirectory.LOG.isDebugEnabled()) {
FSDirectory.LOG.debug("concat {} to {}", Arrays.toString(srcs), target);
}
final INodesInPath targetIIP = fsd.getINodesInPath4Write(target);
// write permission for the target
FSPermissionChecker pc = null;
if (fsd.isPermissionEnabled()) {
pc = fsd.getPermissionChecker();
fsd.checkPathAccess(pc, targetIIP, FsAction.WRITE);
}
// check the target
verifyTargetFile(fsd, target, targetIIP);
// check the srcs
INodeFile[] srcFiles = verifySrcFiles(fsd, srcs, targetIIP, pc);
if(NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("DIR* NameSystem.concat: " +
Arrays.toString(srcs) + " to " + target);
}
long timestamp = now();
fsd.writeLock();
try {
unprotectedConcat(fsd, targetIIP, srcFiles, timestamp);
} finally {
fsd.writeUnlock();
}
fsd.getEditLog().logConcat(target, srcs, timestamp, logRetryCache);
return fsd.getAuditFileInfo(targetIIP);
}
private static void verifyTargetFile(FSDirectory fsd, final String target,
final INodesInPath targetIIP) throws IOException {
// check the target
if (fsd.getEZForPath(targetIIP) != null) {
throw new HadoopIllegalArgumentException(
"concat can not be called for files in an encryption zone.");
}
final INodeFile targetINode = INodeFile.valueOf(targetIIP.getLastINode(),
target);
if(targetINode.isUnderConstruction()) {
throw new HadoopIllegalArgumentException("concat: target file "
+ target + " is under construction");
}
}
private static INodeFile[] verifySrcFiles(FSDirectory fsd, String[] srcs,
INodesInPath targetIIP, FSPermissionChecker pc) throws IOException {
// to make sure no two files are the same
Set<INodeFile> si = new HashSet<>();
final INodeFile targetINode = targetIIP.getLastINode().asFile();
final INodeDirectory targetParent = targetINode.getParent();
// now check the srcs
for(String src : srcs) {
final INodesInPath iip = fsd.getINodesInPath4Write(src);
// permission check for srcs
if (pc != null) {
fsd.checkPathAccess(pc, iip, FsAction.READ); // read the file
fsd.checkParentAccess(pc, iip, FsAction.WRITE); // for delete
}
final INode srcINode = iip.getLastINode();
final INodeFile srcINodeFile = INodeFile.valueOf(srcINode, src);
// make sure the src file and the target file are in the same dir
if (srcINodeFile.getParent() != targetParent) {
throw new HadoopIllegalArgumentException("Source file " + src
+ " is not in the same directory with the target "
+ targetIIP.getPath());
}
// make sure all the source files are not in snapshot
if (srcINode.isInLatestSnapshot(iip.getLatestSnapshotId())) {
throw new SnapshotException("Concat: the source file " + src
+ " is in snapshot");
}
// check if the file has other references.
if (srcINode.isReference() && ((INodeReference.WithCount)
srcINode.asReference().getReferredINode()).getReferenceCount() > 1) {
throw new SnapshotException("Concat: the source file " + src
+ " is referred by some other reference in some snapshot.");
}
// source file cannot be the same with the target file
if (srcINode == targetINode) {
throw new HadoopIllegalArgumentException("concat: the src file " + src
+ " is the same with the target file " + targetIIP.getPath());
}
// source file cannot be under construction or empty
if(srcINodeFile.isUnderConstruction() || srcINodeFile.numBlocks() == 0) {
throw new HadoopIllegalArgumentException("concat: source file " + src
+ " is invalid or empty or underConstruction");
}
// source file's preferred block size cannot be greater than the target
// file
if (srcINodeFile.getPreferredBlockSize() >
targetINode.getPreferredBlockSize()) {
throw new HadoopIllegalArgumentException("concat: source file " + src
+ " has preferred block size " + srcINodeFile.getPreferredBlockSize()
+ " which is greater than the target file's preferred block size "
+ targetINode.getPreferredBlockSize());
}
si.add(srcINodeFile);
}
// make sure no two files are the same
if(si.size() < srcs.length) {
// it means at least two files are the same
throw new HadoopIllegalArgumentException(
"concat: at least two of the source files are the same");
}
return si.toArray(new INodeFile[si.size()]);
}
private static QuotaCounts computeQuotaDeltas(FSDirectory fsd,
INodeFile target, INodeFile[] srcList) {
QuotaCounts deltas = new QuotaCounts.Builder().build();
final short targetRepl = target.getPreferredBlockReplication();
for (INodeFile src : srcList) {
short srcRepl = src.getPreferredBlockReplication();
long fileSize = src.computeFileSize();
if (targetRepl != srcRepl) {
deltas.addStorageSpace(fileSize * (targetRepl - srcRepl));
BlockStoragePolicy bsp =
fsd.getBlockStoragePolicySuite().getPolicy(src.getStoragePolicyID());
if (bsp != null) {
List<StorageType> srcTypeChosen = bsp.chooseStorageTypes(srcRepl);
for (StorageType t : srcTypeChosen) {
if (t.supportTypeQuota()) {
deltas.addTypeSpace(t, -fileSize);
}
}
List<StorageType> targetTypeChosen = bsp.chooseStorageTypes(targetRepl);
for (StorageType t : targetTypeChosen) {
if (t.supportTypeQuota()) {
deltas.addTypeSpace(t, fileSize);
}
}
}
}
}
return deltas;
}
private static void verifyQuota(FSDirectory fsd, INodesInPath targetIIP,
QuotaCounts deltas) throws QuotaExceededException {
if (!fsd.getFSNamesystem().isImageLoaded() || fsd.shouldSkipQuotaChecks()) {
// Do not check quota if editlog is still being processed
return;
}
FSDirectory.verifyQuota(targetIIP, targetIIP.length() - 1, deltas, null);
}
/**
* Concat all the blocks from srcs to trg and delete the srcs files
* @param fsd FSDirectory
*/
static void unprotectedConcat(FSDirectory fsd, INodesInPath targetIIP,
INodeFile[] srcList, long timestamp) throws IOException {
assert fsd.hasWriteLock();
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("DIR* FSNamesystem.concat to "
+ targetIIP.getPath());
}
final INodeFile trgInode = targetIIP.getLastINode().asFile();
QuotaCounts deltas = computeQuotaDeltas(fsd, trgInode, srcList);
verifyQuota(fsd, targetIIP, deltas);
// the target file can be included in a snapshot
trgInode.recordModification(targetIIP.getLatestSnapshotId());
INodeDirectory trgParent = targetIIP.getINode(-2).asDirectory();
trgInode.concatBlocks(srcList);
// since we are in the same dir - we can use same parent to remove files
int count = 0;
for (INodeFile nodeToRemove : srcList) {
if(nodeToRemove != null) {
nodeToRemove.clearBlocks();
nodeToRemove.getParent().removeChild(nodeToRemove);
fsd.getINodeMap().remove(nodeToRemove);
count++;
}
}
trgInode.setModificationTime(timestamp, targetIIP.getLatestSnapshotId());
trgParent.updateModificationTime(timestamp, targetIIP.getLatestSnapshotId());
// update quota on the parent directory with deltas
FSDirectory.unprotectedUpdateCount(targetIIP, targetIIP.length() - 1, deltas);
}
}
| 9,925 | 39.680328 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/HdfsAuditLogger.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.net.InetAddress;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
import org.apache.hadoop.security.UserGroupInformation;
/**
* Extension of {@link AuditLogger}.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public abstract class HdfsAuditLogger implements AuditLogger {
@Override
public void logAuditEvent(boolean succeeded, String userName,
InetAddress addr, String cmd, String src, String dst,
FileStatus status) {
logAuditEvent(succeeded, userName, addr, cmd, src, dst, status, null,
null);
}
/**
* Same as
* {@link #logAuditEvent(boolean, String, InetAddress, String, String, String, FileStatus)}
* with additional parameters related to logging delegation token tracking
* IDs.
*
* @param succeeded Whether authorization succeeded.
* @param userName Name of the user executing the request.
* @param addr Remote address of the request.
* @param cmd The requested command.
* @param src Path of affected source file.
* @param dst Path of affected destination file (if any).
* @param stat File information for operations that change the file's metadata
* (permissions, owner, times, etc).
* @param ugi UserGroupInformation of the current user, or null if not logging
* token tracking information
* @param dtSecretManager The token secret manager, or null if not logging
* token tracking information
*/
public abstract void logAuditEvent(boolean succeeded, String userName,
InetAddress addr, String cmd, String src, String dst,
FileStatus stat, UserGroupInformation ugi,
DelegationTokenSecretManager dtSecretManager);
}
| 2,736 | 39.850746 | 93 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/TopAuditLogger.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.top;
import java.net.InetAddress;
import com.google.common.base.Preconditions;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.hdfs.server.namenode.AuditLogger;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.hdfs.server.namenode.top.metrics.TopMetrics;
/**
* An {@link AuditLogger} that sends logged data directly to the metrics
* systems. It is used when the top service is used directly by the name node
*/
@InterfaceAudience.Private
public class TopAuditLogger implements AuditLogger {
public static final Logger LOG = LoggerFactory.getLogger(TopAuditLogger.class);
private final TopMetrics topMetrics;
public TopAuditLogger(TopMetrics topMetrics) {
Preconditions.checkNotNull(topMetrics, "Cannot init with a null " +
"TopMetrics");
this.topMetrics = topMetrics;
}
@Override
public void initialize(Configuration conf) {
}
@Override
public void logAuditEvent(boolean succeeded, String userName,
InetAddress addr, String cmd, String src, String dst, FileStatus status) {
try {
topMetrics.report(succeeded, userName, addr, cmd, src, dst, status);
} catch (Throwable t) {
LOG.error("An error occurred while reflecting the event in top service, "
+ "event: (cmd={},userName={})", cmd, userName);
}
if (LOG.isDebugEnabled()) {
final StringBuilder sb = new StringBuilder();
sb.append("allowed=").append(succeeded).append("\t");
sb.append("ugi=").append(userName).append("\t");
sb.append("ip=").append(addr).append("\t");
sb.append("cmd=").append(cmd).append("\t");
sb.append("src=").append(src).append("\t");
sb.append("dst=").append(dst).append("\t");
if (null == status) {
sb.append("perm=null");
} else {
sb.append("perm=");
sb.append(status.getOwner()).append(":");
sb.append(status.getGroup()).append(":");
sb.append(status.getPermission());
}
LOG.debug("------------------- logged event for top service: " + sb);
}
}
}
| 3,034 | 35.566265 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/TopConf.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.top;
import java.util.concurrent.TimeUnit;
import com.google.common.primitives.Ints;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import com.google.common.base.Preconditions;
/**
* This class is a common place for NNTop configuration.
*/
@InterfaceAudience.Private
public final class TopConf {
/**
* Whether TopMetrics are enabled
*/
public final boolean isEnabled;
/**
* A meta command representing the total number of calls to all commands
*/
public static final String ALL_CMDS = "*";
/**
* nntop reporting periods in milliseconds
*/
public final int[] nntopReportingPeriodsMs;
public TopConf(Configuration conf) {
isEnabled = conf.getBoolean(DFSConfigKeys.NNTOP_ENABLED_KEY,
DFSConfigKeys.NNTOP_ENABLED_DEFAULT);
String[] periodsStr = conf.getTrimmedStrings(
DFSConfigKeys.NNTOP_WINDOWS_MINUTES_KEY,
DFSConfigKeys.NNTOP_WINDOWS_MINUTES_DEFAULT);
nntopReportingPeriodsMs = new int[periodsStr.length];
for (int i = 0; i < periodsStr.length; i++) {
nntopReportingPeriodsMs[i] = Ints.checkedCast(
TimeUnit.MINUTES.toMillis(Integer.parseInt(periodsStr[i])));
}
for (int aPeriodMs: nntopReportingPeriodsMs) {
Preconditions.checkArgument(aPeriodMs >= TimeUnit.MINUTES.toMillis(1),
"minimum reporting period is 1 min!");
}
}
}
| 2,307 | 34.507692 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/metrics/TopMetrics.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.top.metrics;
import java.net.InetAddress;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import com.google.common.collect.Lists;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.server.namenode.top.TopConf;
import org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowManager;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Time;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowManager.TopWindow;
/**
* The interface to the top metrics.
* <p/>
* Metrics are collected by a custom audit logger, {@link org.apache.hadoop
* .hdfs.server.namenode.top.TopAuditLogger}, which calls TopMetrics to
* increment per-operation, per-user counts on every audit log call. These
* counts are used to show the top users by NameNode operation as well as
* across all operations.
* <p/>
* TopMetrics maintains these counts for a configurable number of time
* intervals, e.g. 1min, 5min, 25min. Each interval is tracked by a
* RollingWindowManager.
* <p/>
* These metrics are published as a JSON string via {@link org.apache.hadoop
* .hdfs.server .namenode.metrics.FSNamesystemMBean#getTopWindows}. This is
* done by calling {@link org.apache.hadoop.hdfs.server.namenode.top.window
* .RollingWindowManager#snapshot} on each RollingWindowManager.
* <p/>
* Thread-safe: relies on thread-safety of RollingWindowManager
*/
@InterfaceAudience.Private
public class TopMetrics {
public static final Logger LOG = LoggerFactory.getLogger(TopMetrics.class);
private static void logConf(Configuration conf) {
LOG.info("NNTop conf: " + DFSConfigKeys.NNTOP_BUCKETS_PER_WINDOW_KEY +
" = " + conf.get(DFSConfigKeys.NNTOP_BUCKETS_PER_WINDOW_KEY));
LOG.info("NNTop conf: " + DFSConfigKeys.NNTOP_NUM_USERS_KEY +
" = " + conf.get(DFSConfigKeys.NNTOP_NUM_USERS_KEY));
LOG.info("NNTop conf: " + DFSConfigKeys.NNTOP_WINDOWS_MINUTES_KEY +
" = " + conf.get(DFSConfigKeys.NNTOP_WINDOWS_MINUTES_KEY));
}
/**
* A map from reporting periods to WindowManager. Thread-safety is provided by
* the fact that the mapping is not changed after construction.
*/
final Map<Integer, RollingWindowManager> rollingWindowManagers =
new HashMap<Integer, RollingWindowManager>();
public TopMetrics(Configuration conf, int[] reportingPeriods) {
logConf(conf);
for (int i = 0; i < reportingPeriods.length; i++) {
rollingWindowManagers.put(reportingPeriods[i], new RollingWindowManager(
conf, reportingPeriods[i]));
}
}
/**
* Get a list of the current TopWindow statistics, one TopWindow per tracked
* time interval.
*/
public List<TopWindow> getTopWindows() {
long monoTime = Time.monotonicNow();
List<TopWindow> windows = Lists.newArrayListWithCapacity
(rollingWindowManagers.size());
for (Entry<Integer, RollingWindowManager> entry : rollingWindowManagers
.entrySet()) {
TopWindow window = entry.getValue().snapshot(monoTime);
windows.add(window);
}
return windows;
}
/**
* Pick the same information that DefaultAuditLogger does before writing to a
* log file. This is to be consistent when {@link TopMetrics} is charged with
* data read back from log files instead of being invoked directly by the
* FsNamesystem
*/
public void report(boolean succeeded, String userName, InetAddress addr,
String cmd, String src, String dst, FileStatus status) {
// currently nntop only makes use of the username and the command
report(userName, cmd);
}
public void report(String userName, String cmd) {
long currTime = Time.monotonicNow();
report(currTime, userName, cmd);
}
public void report(long currTime, String userName, String cmd) {
LOG.debug("a metric is reported: cmd: {} user: {}", cmd, userName);
userName = UserGroupInformation.trimLoginMethod(userName);
for (RollingWindowManager rollingWindowManager : rollingWindowManagers
.values()) {
rollingWindowManager.recordMetric(currTime, cmd, userName, 1);
rollingWindowManager.recordMetric(currTime,
TopConf.ALL_CMDS, userName, 1);
}
}
}
| 5,315 | 39.272727 | 95 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindowManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.top.window;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.PriorityQueue;
import java.util.Set;
import java.util.Stack;
import java.util.concurrent.ConcurrentHashMap;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.common.primitives.Ints;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A class to manage the set of {@link RollingWindow}s. This class is the
* interface of metrics system to the {@link RollingWindow}s to retrieve the
* current top metrics.
* <p/>
* Thread-safety is provided by each {@link RollingWindow} being thread-safe as
* well as {@link ConcurrentHashMap} for the collection of them.
*/
@InterfaceAudience.Private
public class RollingWindowManager {
public static final Logger LOG = LoggerFactory.getLogger(
RollingWindowManager.class);
private final int windowLenMs;
private final int bucketsPerWindow; // e.g., 10 buckets per minute
private final int topUsersCnt; // e.g., report top 10 metrics
static private class RollingWindowMap extends
ConcurrentHashMap<String, RollingWindow> {
private static final long serialVersionUID = -6785807073237052051L;
}
/**
* Represents a snapshot of the rolling window. It contains one Op per
* operation in the window, with ranked users for each Op.
*/
public static class TopWindow {
private final int windowMillis;
private final List<Op> top;
public TopWindow(int windowMillis) {
this.windowMillis = windowMillis;
this.top = Lists.newArrayList();
}
public void addOp(Op op) {
top.add(op);
}
public int getWindowLenMs() {
return windowMillis;
}
public List<Op> getOps() {
return top;
}
}
/**
* Represents an operation within a TopWindow. It contains a ranked
* set of the top users for the operation.
*/
public static class Op {
private final String opType;
private final List<User> topUsers;
private final long totalCount;
public Op(String opType, long totalCount) {
this.opType = opType;
this.topUsers = Lists.newArrayList();
this.totalCount = totalCount;
}
public void addUser(User u) {
topUsers.add(u);
}
public String getOpType() {
return opType;
}
public List<User> getTopUsers() {
return topUsers;
}
public long getTotalCount() {
return totalCount;
}
}
/**
* Represents a user who called an Op within a TopWindow. Specifies the
* user and the number of times the user called the operation.
*/
public static class User {
private final String user;
private final long count;
public User(String user, long count) {
this.user = user;
this.count = count;
}
public String getUser() {
return user;
}
public long getCount() {
return count;
}
}
/**
* A mapping from each reported metric to its {@link RollingWindowMap} that
* maintains the set of {@link RollingWindow}s for the users that have
* operated on that metric.
*/
public ConcurrentHashMap<String, RollingWindowMap> metricMap =
new ConcurrentHashMap<String, RollingWindowMap>();
public RollingWindowManager(Configuration conf, int reportingPeriodMs) {
windowLenMs = reportingPeriodMs;
bucketsPerWindow =
conf.getInt(DFSConfigKeys.NNTOP_BUCKETS_PER_WINDOW_KEY,
DFSConfigKeys.NNTOP_BUCKETS_PER_WINDOW_DEFAULT);
Preconditions.checkArgument(bucketsPerWindow > 0,
"a window should have at least one bucket");
Preconditions.checkArgument(bucketsPerWindow <= windowLenMs,
"the minimum size of a bucket is 1 ms");
//same-size buckets
Preconditions.checkArgument(windowLenMs % bucketsPerWindow == 0,
"window size must be a multiplication of number of buckets");
topUsersCnt =
conf.getInt(DFSConfigKeys.NNTOP_NUM_USERS_KEY,
DFSConfigKeys.NNTOP_NUM_USERS_DEFAULT);
Preconditions.checkArgument(topUsersCnt > 0,
"the number of requested top users must be at least 1");
}
/**
* Called when the metric command is changed by "delta" units at time "time"
* via user "user"
*
* @param time the time of the event
* @param command the metric that is updated, e.g., the operation name
* @param user the user that updated the metric
* @param delta the amount of change in the metric, e.g., +1
*/
public void recordMetric(long time, String command, String user, long delta) {
RollingWindow window = getRollingWindow(command, user);
window.incAt(time, delta);
}
/**
* Take a snapshot of current top users in the past period.
*
* @param time the current time
* @return a TopWindow describing the top users for each metric in the
* window.
*/
public TopWindow snapshot(long time) {
TopWindow window = new TopWindow(windowLenMs);
Set<String> metricNames = metricMap.keySet();
LOG.debug("iterating in reported metrics, size={} values={}",
metricNames.size(), metricNames);
for (Map.Entry<String, RollingWindowMap> entry : metricMap.entrySet()) {
String metricName = entry.getKey();
RollingWindowMap rollingWindows = entry.getValue();
TopN topN = getTopUsersForMetric(time, metricName, rollingWindows);
final int size = topN.size();
if (size == 0) {
continue;
}
Op op = new Op(metricName, topN.getTotal());
window.addOp(op);
// Reverse the users from the TopUsers using a stack,
// since we'd like them sorted in descending rather than ascending order
Stack<NameValuePair> reverse = new Stack<NameValuePair>();
for (int i = 0; i < size; i++) {
reverse.push(topN.poll());
}
for (int i = 0; i < size; i++) {
NameValuePair userEntry = reverse.pop();
User user = new User(userEntry.name, Long.valueOf(userEntry.value));
op.addUser(user);
}
}
return window;
}
/**
* Calculates the top N users over a time interval.
*
* @param time the current time
* @param metricName Name of metric
* @return
*/
private TopN getTopUsersForMetric(long time, String metricName,
RollingWindowMap rollingWindows) {
TopN topN = new TopN(topUsersCnt);
Iterator<Map.Entry<String, RollingWindow>> iterator =
rollingWindows.entrySet().iterator();
while (iterator.hasNext()) {
Map.Entry<String, RollingWindow> entry = iterator.next();
String userName = entry.getKey();
RollingWindow aWindow = entry.getValue();
long windowSum = aWindow.getSum(time);
// do the gc here
if (windowSum == 0) {
LOG.debug("gc window of metric: {} userName: {}",
metricName, userName);
iterator.remove();
continue;
}
LOG.debug("offer window of metric: {} userName: {} sum: {}",
metricName, userName, windowSum);
topN.offer(new NameValuePair(userName, windowSum));
}
LOG.debug("topN users size for command {} is: {}", metricName, topN.size());
return topN;
}
/**
* Get the rolling window specified by metric and user.
*
* @param metric the updated metric
* @param user the user that updated the metric
* @return the rolling window
*/
private RollingWindow getRollingWindow(String metric, String user) {
RollingWindowMap rwMap = metricMap.get(metric);
if (rwMap == null) {
rwMap = new RollingWindowMap();
RollingWindowMap prevRwMap = metricMap.putIfAbsent(metric, rwMap);
if (prevRwMap != null) {
rwMap = prevRwMap;
}
}
RollingWindow window = rwMap.get(user);
if (window != null) {
return window;
}
window = new RollingWindow(windowLenMs, bucketsPerWindow);
RollingWindow prevWindow = rwMap.putIfAbsent(user, window);
if (prevWindow != null) {
window = prevWindow;
}
return window;
}
/**
* A pair of a name and its corresponding value. Defines a custom
* comparator so the TopN PriorityQueue sorts based on the count.
*/
static private class NameValuePair implements Comparable<NameValuePair> {
String name;
long value;
public NameValuePair(String metricName, long value) {
this.name = metricName;
this.value = value;
}
@Override
public int compareTo(NameValuePair other) {
return (int) (value - other.value);
}
@Override
public boolean equals(Object other) {
if (other instanceof NameValuePair) {
return compareTo((NameValuePair)other) == 0;
}
return false;
}
@Override
public int hashCode() {
return Long.valueOf(value).hashCode();
}
}
/**
* A fixed-size priority queue, used to retrieve top-n of offered entries.
*/
static private class TopN extends PriorityQueue<NameValuePair> {
private static final long serialVersionUID = 5134028249611535803L;
int n; // > 0
private long total = 0;
TopN(int n) {
super(n);
this.n = n;
}
@Override
public boolean offer(NameValuePair entry) {
updateTotal(entry.value);
if (size() == n) {
NameValuePair smallest = peek();
if (smallest.value >= entry.value) {
return false;
}
poll(); // remove smallest
}
return super.offer(entry);
}
private void updateTotal(long value) {
total += value;
}
public long getTotal() {
return total;
}
}
}
| 10,565 | 29.626087 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindow.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.top.window;
import java.util.Date;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.hadoop.classification.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A class for exposing a rolling window view on the event that occur over time.
* Events are reported based on occurrence time. The total number of events in
* the last period covered by the rolling window can be retrieved by the
* {@link #getSum(long)} method.
* <p/>
*
* Assumptions:
* <p/>
*
* (1) Concurrent invocation of {@link #incAt} method are possible
* <p/>
*
* (2) The time parameter of two consecutive invocation of {@link #incAt} could
* be in any given order
* <p/>
*
* (3) The buffering delays are not more than the window length, i.e., after two
* consecutive invocation {@link #incAt(long time1, long)} and
* {@link #incAt(long time2, long)}, time1 < time2 || time1 - time2 < windowLenMs.
* This assumption helps avoiding unnecessary synchronizations.
* <p/>
*
* Thread-safety is built in the {@link RollingWindow.Bucket}
*/
@InterfaceAudience.Private
public class RollingWindow {
private static final Logger LOG = LoggerFactory.getLogger(RollingWindow.class);
/**
* Each window is composed of buckets, which offer a trade-off between
* accuracy and space complexity: the lower the number of buckets, the less
* memory is required by the rolling window but more inaccuracy is possible in
* reading window total values.
*/
Bucket[] buckets;
final int windowLenMs;
final int bucketSize;
/**
* @param windowLenMs The period that is covered by the window. This period must
* be more than the buffering delays.
* @param numBuckets number of buckets in the window
*/
RollingWindow(int windowLenMs, int numBuckets) {
buckets = new Bucket[numBuckets];
for (int i = 0; i < numBuckets; i++) {
buckets[i] = new Bucket();
}
this.windowLenMs = windowLenMs;
this.bucketSize = windowLenMs / numBuckets;
if (this.bucketSize % bucketSize != 0) {
throw new IllegalArgumentException(
"The bucket size in the rolling window is not integer: windowLenMs= "
+ windowLenMs + " numBuckets= " + numBuckets);
}
}
/**
* When an event occurs at the specified time, this method reflects that in
* the rolling window.
* <p/>
*
* @param time the time at which the event occurred
* @param delta the delta that will be added to the window
*/
public void incAt(long time, long delta) {
int bi = computeBucketIndex(time);
Bucket bucket = buckets[bi];
// If the last time the bucket was updated is out of the scope of the
// rolling window, reset the bucket.
if (bucket.isStaleNow(time)) {
bucket.safeReset(time);
}
bucket.inc(delta);
}
private int computeBucketIndex(long time) {
int positionOnWindow = (int) (time % windowLenMs);
int bucketIndex = positionOnWindow * buckets.length / windowLenMs;
return bucketIndex;
}
/**
* Thread-safety is provided by synchronization when resetting the update time
* as well as atomic fields.
*/
private class Bucket {
AtomicLong value = new AtomicLong(0);
AtomicLong updateTime = new AtomicLong(0);
/**
* Check whether the last time that the bucket was updated is no longer
* covered by rolling window.
*
* @param time the current time
* @return true if the bucket state is stale
*/
boolean isStaleNow(long time) {
long utime = updateTime.get();
return time - utime >= windowLenMs;
}
/**
* Safely reset the bucket state considering concurrent updates (inc) and
* resets.
*
* @param time the current time
*/
void safeReset(long time) {
// At any point in time, only one thread is allowed to reset the
// bucket
synchronized (this) {
if (isStaleNow(time)) {
// reset the value before setting the time, it allows other
// threads to safely assume that the value is updated if the
// time is not stale
value.set(0);
updateTime.set(time);
}
// else a concurrent thread has already reset it: do nothing
}
}
/**
* Increment the bucket. It assumes that staleness check is already
* performed. We do not need to update the {@link #updateTime} because as
* long as the {@link #updateTime} belongs to the current view of the
* rolling window, the algorithm works fine.
*/
void inc(long delta) {
value.addAndGet(delta);
}
}
/**
* Get value represented by this window at the specified time
* <p/>
*
* If time lags behind the latest update time, the new updates are still
* included in the sum
*
* @param time
* @return number of events occurred in the past period
*/
public long getSum(long time) {
long sum = 0;
for (Bucket bucket : buckets) {
boolean stale = bucket.isStaleNow(time);
if (!stale) {
sum += bucket.value.get();
}
if (LOG.isDebugEnabled()) {
long bucketTime = bucket.updateTime.get();
String timeStr = new Date(bucketTime).toString();
LOG.debug("Sum: + " + sum + " Bucket: updateTime: " + timeStr + " ("
+ bucketTime + ") isStale " + stale + " at " + time);
}
}
return sum;
}
}
| 6,263 | 31.968421 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.web.resources;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.PrintWriter;
import java.net.InetAddress;
import java.net.URI;
import java.net.URISyntaxException;
import java.security.PrivilegedExceptionAction;
import java.util.EnumSet;
import java.util.HashSet;
import java.util.List;
import javax.servlet.ServletContext;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.ws.rs.Consumes;
import javax.ws.rs.DELETE;
import javax.ws.rs.DefaultValue;
import javax.ws.rs.GET;
import javax.ws.rs.POST;
import javax.ws.rs.PUT;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
import javax.ws.rs.QueryParam;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.StreamingOutput;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.XAttrHelper;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.hdfs.web.JsonUtil;
import org.apache.hadoop.hdfs.web.ParamFilter;
import org.apache.hadoop.hdfs.web.WebHdfsConstants;
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
import org.apache.hadoop.hdfs.web.resources.*;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.RetriableException;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.net.NetworkTopology.InvalidTopologyException;
import org.apache.hadoop.net.Node;
import org.apache.hadoop.net.NodeBase;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.util.StringUtils;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Charsets;
import com.google.common.collect.Lists;
import com.sun.jersey.spi.container.ResourceFilters;
/** Web-hdfs NameNode implementation. */
@Path("")
@ResourceFilters(ParamFilter.class)
public class NamenodeWebHdfsMethods {
public static final Log LOG = LogFactory.getLog(NamenodeWebHdfsMethods.class);
private static final UriFsPathParam ROOT = new UriFsPathParam("");
private static final ThreadLocal<String> REMOTE_ADDRESS = new ThreadLocal<String>();
/** @return the remote client address. */
public static String getRemoteAddress() {
return REMOTE_ADDRESS.get();
}
public static InetAddress getRemoteIp() {
try {
return InetAddress.getByName(getRemoteAddress());
} catch (Exception e) {
return null;
}
}
/**
* Returns true if a WebHdfs request is in progress. Akin to
* {@link Server#isRpcInvocation()}.
*/
public static boolean isWebHdfsInvocation() {
return getRemoteAddress() != null;
}
private @Context ServletContext context;
private @Context HttpServletRequest request;
private @Context HttpServletResponse response;
private void init(final UserGroupInformation ugi,
final DelegationParam delegation,
final UserParam username, final DoAsParam doAsUser,
final UriFsPathParam path, final HttpOpParam<?> op,
final Param<?, ?>... parameters) {
if (LOG.isTraceEnabled()) {
LOG.trace("HTTP " + op.getValue().getType() + ": " + op + ", " + path
+ ", ugi=" + ugi + ", " + username + ", " + doAsUser
+ Param.toSortedString(", ", parameters));
}
//clear content type
response.setContentType(null);
// set the remote address, if coming in via a trust proxy server then
// the address with be that of the proxied client
REMOTE_ADDRESS.set(JspHelper.getRemoteAddr(request));
}
private void reset() {
REMOTE_ADDRESS.set(null);
}
private static NamenodeProtocols getRPCServer(NameNode namenode)
throws IOException {
final NamenodeProtocols np = namenode.getRpcServer();
if (np == null) {
throw new RetriableException("Namenode is in startup mode");
}
return np;
}
@VisibleForTesting
static DatanodeInfo chooseDatanode(final NameNode namenode,
final String path, final HttpOpParam.Op op, final long openOffset,
final long blocksize, final String excludeDatanodes) throws IOException {
FSNamesystem fsn = namenode.getNamesystem();
if (fsn == null) {
throw new IOException("Namesystem has not been intialized yet.");
}
final BlockManager bm = fsn.getBlockManager();
HashSet<Node> excludes = new HashSet<Node>();
if (excludeDatanodes != null) {
for (String host : StringUtils
.getTrimmedStringCollection(excludeDatanodes)) {
int idx = host.indexOf(":");
if (idx != -1) {
excludes.add(bm.getDatanodeManager().getDatanodeByXferAddr(
host.substring(0, idx), Integer.parseInt(host.substring(idx + 1))));
} else {
excludes.add(bm.getDatanodeManager().getDatanodeByHost(host));
}
}
}
if (op == PutOpParam.Op.CREATE) {
//choose a datanode near to client
final DatanodeDescriptor clientNode = bm.getDatanodeManager(
).getDatanodeByHost(getRemoteAddress());
if (clientNode != null) {
final DatanodeStorageInfo[] storages = bm.chooseTarget4WebHDFS(
path, clientNode, excludes, blocksize);
if (storages.length > 0) {
return storages[0].getDatanodeDescriptor();
}
}
} else if (op == GetOpParam.Op.OPEN
|| op == GetOpParam.Op.GETFILECHECKSUM
|| op == PostOpParam.Op.APPEND) {
//choose a datanode containing a replica
final NamenodeProtocols np = getRPCServer(namenode);
final HdfsFileStatus status = np.getFileInfo(path);
if (status == null) {
throw new FileNotFoundException("File " + path + " not found.");
}
final long len = status.getLen();
if (op == GetOpParam.Op.OPEN) {
if (openOffset < 0L || (openOffset >= len && len > 0)) {
throw new IOException("Offset=" + openOffset
+ " out of the range [0, " + len + "); " + op + ", path=" + path);
}
}
if (len > 0) {
final long offset = op == GetOpParam.Op.OPEN? openOffset: len - 1;
final LocatedBlocks locations = np.getBlockLocations(path, offset, 1);
final int count = locations.locatedBlockCount();
if (count > 0) {
return bestNode(locations.get(0).getLocations(), excludes);
}
}
}
return (DatanodeDescriptor)bm.getDatanodeManager().getNetworkTopology(
).chooseRandom(NodeBase.ROOT);
}
/**
* Choose the datanode to redirect the request. Note that the nodes have been
* sorted based on availability and network distances, thus it is sufficient
* to return the first element of the node here.
*/
private static DatanodeInfo bestNode(DatanodeInfo[] nodes,
HashSet<Node> excludes) throws IOException {
for (DatanodeInfo dn: nodes) {
if (false == dn.isDecommissioned() && false == excludes.contains(dn)) {
return dn;
}
}
throw new IOException("No active nodes contain this block");
}
private Token<? extends TokenIdentifier> generateDelegationToken(
final NameNode namenode, final UserGroupInformation ugi,
final String renewer) throws IOException {
final Credentials c = DelegationTokenSecretManager.createCredentials(
namenode, ugi, renewer != null? renewer: ugi.getShortUserName());
if (c == null) {
return null;
}
final Token<? extends TokenIdentifier> t = c.getAllTokens().iterator().next();
Text kind = request.getScheme().equals("http") ? WebHdfsConstants.WEBHDFS_TOKEN_KIND
: WebHdfsConstants.SWEBHDFS_TOKEN_KIND;
t.setKind(kind);
return t;
}
private URI redirectURI(final NameNode namenode,
final UserGroupInformation ugi, final DelegationParam delegation,
final UserParam username, final DoAsParam doAsUser,
final String path, final HttpOpParam.Op op, final long openOffset,
final long blocksize, final String excludeDatanodes,
final Param<?, ?>... parameters) throws URISyntaxException, IOException {
final DatanodeInfo dn;
try {
dn = chooseDatanode(namenode, path, op, openOffset, blocksize,
excludeDatanodes);
} catch (InvalidTopologyException ite) {
throw new IOException("Failed to find datanode, suggest to check cluster health.", ite);
}
final String delegationQuery;
if (!UserGroupInformation.isSecurityEnabled()) {
//security disabled
delegationQuery = Param.toSortedString("&", doAsUser, username);
} else if (delegation.getValue() != null) {
//client has provided a token
delegationQuery = "&" + delegation;
} else {
//generate a token
final Token<? extends TokenIdentifier> t = generateDelegationToken(
namenode, ugi, request.getUserPrincipal().getName());
delegationQuery = "&" + new DelegationParam(t.encodeToUrlString());
}
final String query = op.toQueryString() + delegationQuery
+ "&" + new NamenodeAddressParam(namenode)
+ Param.toSortedString("&", parameters);
final String uripath = WebHdfsFileSystem.PATH_PREFIX + path;
final String scheme = request.getScheme();
int port = "http".equals(scheme) ? dn.getInfoPort() : dn
.getInfoSecurePort();
final URI uri = new URI(scheme, null, dn.getHostName(), port, uripath,
query, null);
if (LOG.isTraceEnabled()) {
LOG.trace("redirectURI=" + uri);
}
return uri;
}
/** Handle HTTP PUT request for the root. */
@PUT
@Path("/")
@Consumes({"*/*"})
@Produces({MediaType.APPLICATION_OCTET_STREAM, MediaType.APPLICATION_JSON})
public Response putRoot(
@Context final UserGroupInformation ugi,
@QueryParam(DelegationParam.NAME) @DefaultValue(DelegationParam.DEFAULT)
final DelegationParam delegation,
@QueryParam(UserParam.NAME) @DefaultValue(UserParam.DEFAULT)
final UserParam username,
@QueryParam(DoAsParam.NAME) @DefaultValue(DoAsParam.DEFAULT)
final DoAsParam doAsUser,
@QueryParam(PutOpParam.NAME) @DefaultValue(PutOpParam.DEFAULT)
final PutOpParam op,
@QueryParam(DestinationParam.NAME) @DefaultValue(DestinationParam.DEFAULT)
final DestinationParam destination,
@QueryParam(OwnerParam.NAME) @DefaultValue(OwnerParam.DEFAULT)
final OwnerParam owner,
@QueryParam(GroupParam.NAME) @DefaultValue(GroupParam.DEFAULT)
final GroupParam group,
@QueryParam(PermissionParam.NAME) @DefaultValue(PermissionParam.DEFAULT)
final PermissionParam permission,
@QueryParam(OverwriteParam.NAME) @DefaultValue(OverwriteParam.DEFAULT)
final OverwriteParam overwrite,
@QueryParam(BufferSizeParam.NAME) @DefaultValue(BufferSizeParam.DEFAULT)
final BufferSizeParam bufferSize,
@QueryParam(ReplicationParam.NAME) @DefaultValue(ReplicationParam.DEFAULT)
final ReplicationParam replication,
@QueryParam(BlockSizeParam.NAME) @DefaultValue(BlockSizeParam.DEFAULT)
final BlockSizeParam blockSize,
@QueryParam(ModificationTimeParam.NAME) @DefaultValue(ModificationTimeParam.DEFAULT)
final ModificationTimeParam modificationTime,
@QueryParam(AccessTimeParam.NAME) @DefaultValue(AccessTimeParam.DEFAULT)
final AccessTimeParam accessTime,
@QueryParam(RenameOptionSetParam.NAME) @DefaultValue(RenameOptionSetParam.DEFAULT)
final RenameOptionSetParam renameOptions,
@QueryParam(CreateParentParam.NAME) @DefaultValue(CreateParentParam.DEFAULT)
final CreateParentParam createParent,
@QueryParam(TokenArgumentParam.NAME) @DefaultValue(TokenArgumentParam.DEFAULT)
final TokenArgumentParam delegationTokenArgument,
@QueryParam(AclPermissionParam.NAME) @DefaultValue(AclPermissionParam.DEFAULT)
final AclPermissionParam aclPermission,
@QueryParam(XAttrNameParam.NAME) @DefaultValue(XAttrNameParam.DEFAULT)
final XAttrNameParam xattrName,
@QueryParam(XAttrValueParam.NAME) @DefaultValue(XAttrValueParam.DEFAULT)
final XAttrValueParam xattrValue,
@QueryParam(XAttrSetFlagParam.NAME) @DefaultValue(XAttrSetFlagParam.DEFAULT)
final XAttrSetFlagParam xattrSetFlag,
@QueryParam(SnapshotNameParam.NAME) @DefaultValue(SnapshotNameParam.DEFAULT)
final SnapshotNameParam snapshotName,
@QueryParam(OldSnapshotNameParam.NAME) @DefaultValue(OldSnapshotNameParam.DEFAULT)
final OldSnapshotNameParam oldSnapshotName,
@QueryParam(ExcludeDatanodesParam.NAME) @DefaultValue(ExcludeDatanodesParam.DEFAULT)
final ExcludeDatanodesParam excludeDatanodes
) throws IOException, InterruptedException {
return put(ugi, delegation, username, doAsUser, ROOT, op, destination,
owner, group, permission, overwrite, bufferSize, replication,
blockSize, modificationTime, accessTime, renameOptions, createParent,
delegationTokenArgument, aclPermission, xattrName, xattrValue,
xattrSetFlag, snapshotName, oldSnapshotName, excludeDatanodes);
}
/** Handle HTTP PUT request. */
@PUT
@Path("{" + UriFsPathParam.NAME + ":.*}")
@Consumes({"*/*"})
@Produces({MediaType.APPLICATION_OCTET_STREAM, MediaType.APPLICATION_JSON})
public Response put(
@Context final UserGroupInformation ugi,
@QueryParam(DelegationParam.NAME) @DefaultValue(DelegationParam.DEFAULT)
final DelegationParam delegation,
@QueryParam(UserParam.NAME) @DefaultValue(UserParam.DEFAULT)
final UserParam username,
@QueryParam(DoAsParam.NAME) @DefaultValue(DoAsParam.DEFAULT)
final DoAsParam doAsUser,
@PathParam(UriFsPathParam.NAME) final UriFsPathParam path,
@QueryParam(PutOpParam.NAME) @DefaultValue(PutOpParam.DEFAULT)
final PutOpParam op,
@QueryParam(DestinationParam.NAME) @DefaultValue(DestinationParam.DEFAULT)
final DestinationParam destination,
@QueryParam(OwnerParam.NAME) @DefaultValue(OwnerParam.DEFAULT)
final OwnerParam owner,
@QueryParam(GroupParam.NAME) @DefaultValue(GroupParam.DEFAULT)
final GroupParam group,
@QueryParam(PermissionParam.NAME) @DefaultValue(PermissionParam.DEFAULT)
final PermissionParam permission,
@QueryParam(OverwriteParam.NAME) @DefaultValue(OverwriteParam.DEFAULT)
final OverwriteParam overwrite,
@QueryParam(BufferSizeParam.NAME) @DefaultValue(BufferSizeParam.DEFAULT)
final BufferSizeParam bufferSize,
@QueryParam(ReplicationParam.NAME) @DefaultValue(ReplicationParam.DEFAULT)
final ReplicationParam replication,
@QueryParam(BlockSizeParam.NAME) @DefaultValue(BlockSizeParam.DEFAULT)
final BlockSizeParam blockSize,
@QueryParam(ModificationTimeParam.NAME) @DefaultValue(ModificationTimeParam.DEFAULT)
final ModificationTimeParam modificationTime,
@QueryParam(AccessTimeParam.NAME) @DefaultValue(AccessTimeParam.DEFAULT)
final AccessTimeParam accessTime,
@QueryParam(RenameOptionSetParam.NAME) @DefaultValue(RenameOptionSetParam.DEFAULT)
final RenameOptionSetParam renameOptions,
@QueryParam(CreateParentParam.NAME) @DefaultValue(CreateParentParam.DEFAULT)
final CreateParentParam createParent,
@QueryParam(TokenArgumentParam.NAME) @DefaultValue(TokenArgumentParam.DEFAULT)
final TokenArgumentParam delegationTokenArgument,
@QueryParam(AclPermissionParam.NAME) @DefaultValue(AclPermissionParam.DEFAULT)
final AclPermissionParam aclPermission,
@QueryParam(XAttrNameParam.NAME) @DefaultValue(XAttrNameParam.DEFAULT)
final XAttrNameParam xattrName,
@QueryParam(XAttrValueParam.NAME) @DefaultValue(XAttrValueParam.DEFAULT)
final XAttrValueParam xattrValue,
@QueryParam(XAttrSetFlagParam.NAME) @DefaultValue(XAttrSetFlagParam.DEFAULT)
final XAttrSetFlagParam xattrSetFlag,
@QueryParam(SnapshotNameParam.NAME) @DefaultValue(SnapshotNameParam.DEFAULT)
final SnapshotNameParam snapshotName,
@QueryParam(OldSnapshotNameParam.NAME) @DefaultValue(OldSnapshotNameParam.DEFAULT)
final OldSnapshotNameParam oldSnapshotName,
@QueryParam(ExcludeDatanodesParam.NAME) @DefaultValue(ExcludeDatanodesParam.DEFAULT)
final ExcludeDatanodesParam excludeDatanodes
) throws IOException, InterruptedException {
init(ugi, delegation, username, doAsUser, path, op, destination, owner,
group, permission, overwrite, bufferSize, replication, blockSize,
modificationTime, accessTime, renameOptions, delegationTokenArgument,
aclPermission, xattrName, xattrValue, xattrSetFlag, snapshotName,
oldSnapshotName, excludeDatanodes);
return ugi.doAs(new PrivilegedExceptionAction<Response>() {
@Override
public Response run() throws IOException, URISyntaxException {
try {
return put(ugi, delegation, username, doAsUser,
path.getAbsolutePath(), op, destination, owner, group,
permission, overwrite, bufferSize, replication, blockSize,
modificationTime, accessTime, renameOptions, createParent,
delegationTokenArgument, aclPermission, xattrName, xattrValue,
xattrSetFlag, snapshotName, oldSnapshotName, excludeDatanodes);
} finally {
reset();
}
}
});
}
private Response put(
final UserGroupInformation ugi,
final DelegationParam delegation,
final UserParam username,
final DoAsParam doAsUser,
final String fullpath,
final PutOpParam op,
final DestinationParam destination,
final OwnerParam owner,
final GroupParam group,
final PermissionParam permission,
final OverwriteParam overwrite,
final BufferSizeParam bufferSize,
final ReplicationParam replication,
final BlockSizeParam blockSize,
final ModificationTimeParam modificationTime,
final AccessTimeParam accessTime,
final RenameOptionSetParam renameOptions,
final CreateParentParam createParent,
final TokenArgumentParam delegationTokenArgument,
final AclPermissionParam aclPermission,
final XAttrNameParam xattrName,
final XAttrValueParam xattrValue,
final XAttrSetFlagParam xattrSetFlag,
final SnapshotNameParam snapshotName,
final OldSnapshotNameParam oldSnapshotName,
final ExcludeDatanodesParam exclDatanodes
) throws IOException, URISyntaxException {
final Configuration conf = (Configuration)context.getAttribute(JspHelper.CURRENT_CONF);
final NameNode namenode = (NameNode)context.getAttribute("name.node");
final NamenodeProtocols np = getRPCServer(namenode);
switch(op.getValue()) {
case CREATE:
{
final URI uri = redirectURI(namenode, ugi, delegation, username,
doAsUser, fullpath, op.getValue(), -1L, blockSize.getValue(conf),
exclDatanodes.getValue(), permission, overwrite, bufferSize,
replication, blockSize);
return Response.temporaryRedirect(uri).type(MediaType.APPLICATION_OCTET_STREAM).build();
}
case MKDIRS:
{
final boolean b = np.mkdirs(fullpath, permission.getFsPermission(), true);
final String js = JsonUtil.toJsonString("boolean", b);
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
}
case CREATESYMLINK:
{
np.createSymlink(destination.getValue(), fullpath,
PermissionParam.getDefaultFsPermission(), createParent.getValue());
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
}
case RENAME:
{
final EnumSet<Options.Rename> s = renameOptions.getValue();
if (s.isEmpty()) {
final boolean b = np.rename(fullpath, destination.getValue());
final String js = JsonUtil.toJsonString("boolean", b);
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
} else {
np.rename2(fullpath, destination.getValue(),
s.toArray(new Options.Rename[s.size()]));
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
}
}
case SETREPLICATION:
{
final boolean b = np.setReplication(fullpath, replication.getValue(conf));
final String js = JsonUtil.toJsonString("boolean", b);
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
}
case SETOWNER:
{
if (owner.getValue() == null && group.getValue() == null) {
throw new IllegalArgumentException("Both owner and group are empty.");
}
np.setOwner(fullpath, owner.getValue(), group.getValue());
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
}
case SETPERMISSION:
{
np.setPermission(fullpath, permission.getFsPermission());
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
}
case SETTIMES:
{
np.setTimes(fullpath, modificationTime.getValue(), accessTime.getValue());
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
}
case RENEWDELEGATIONTOKEN:
{
final Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>();
token.decodeFromUrlString(delegationTokenArgument.getValue());
final long expiryTime = np.renewDelegationToken(token);
final String js = JsonUtil.toJsonString("long", expiryTime);
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
}
case CANCELDELEGATIONTOKEN:
{
final Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>();
token.decodeFromUrlString(delegationTokenArgument.getValue());
np.cancelDelegationToken(token);
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
}
case MODIFYACLENTRIES: {
np.modifyAclEntries(fullpath, aclPermission.getAclPermission(true));
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
}
case REMOVEACLENTRIES: {
np.removeAclEntries(fullpath, aclPermission.getAclPermission(false));
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
}
case REMOVEDEFAULTACL: {
np.removeDefaultAcl(fullpath);
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
}
case REMOVEACL: {
np.removeAcl(fullpath);
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
}
case SETACL: {
np.setAcl(fullpath, aclPermission.getAclPermission(true));
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
}
case SETXATTR: {
np.setXAttr(
fullpath,
XAttrHelper.buildXAttr(xattrName.getXAttrName(),
xattrValue.getXAttrValue()), xattrSetFlag.getFlag());
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
}
case REMOVEXATTR: {
np.removeXAttr(fullpath, XAttrHelper.buildXAttr(xattrName.getXAttrName()));
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
}
case CREATESNAPSHOT: {
String snapshotPath = np.createSnapshot(fullpath, snapshotName.getValue());
final String js = JsonUtil.toJsonString(
org.apache.hadoop.fs.Path.class.getSimpleName(), snapshotPath);
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
}
case RENAMESNAPSHOT: {
np.renameSnapshot(fullpath, oldSnapshotName.getValue(),
snapshotName.getValue());
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
}
default:
throw new UnsupportedOperationException(op + " is not supported");
}
}
/** Handle HTTP POST request for the root. */
@POST
@Path("/")
@Consumes({"*/*"})
@Produces({MediaType.APPLICATION_OCTET_STREAM, MediaType.APPLICATION_JSON})
public Response postRoot(
@Context final UserGroupInformation ugi,
@QueryParam(DelegationParam.NAME) @DefaultValue(DelegationParam.DEFAULT)
final DelegationParam delegation,
@QueryParam(UserParam.NAME) @DefaultValue(UserParam.DEFAULT)
final UserParam username,
@QueryParam(DoAsParam.NAME) @DefaultValue(DoAsParam.DEFAULT)
final DoAsParam doAsUser,
@QueryParam(PostOpParam.NAME) @DefaultValue(PostOpParam.DEFAULT)
final PostOpParam op,
@QueryParam(ConcatSourcesParam.NAME) @DefaultValue(ConcatSourcesParam.DEFAULT)
final ConcatSourcesParam concatSrcs,
@QueryParam(BufferSizeParam.NAME) @DefaultValue(BufferSizeParam.DEFAULT)
final BufferSizeParam bufferSize,
@QueryParam(ExcludeDatanodesParam.NAME) @DefaultValue(ExcludeDatanodesParam.DEFAULT)
final ExcludeDatanodesParam excludeDatanodes,
@QueryParam(NewLengthParam.NAME) @DefaultValue(NewLengthParam.DEFAULT)
final NewLengthParam newLength
) throws IOException, InterruptedException {
return post(ugi, delegation, username, doAsUser, ROOT, op, concatSrcs,
bufferSize, excludeDatanodes, newLength);
}
/** Handle HTTP POST request. */
@POST
@Path("{" + UriFsPathParam.NAME + ":.*}")
@Consumes({"*/*"})
@Produces({MediaType.APPLICATION_OCTET_STREAM, MediaType.APPLICATION_JSON})
public Response post(
@Context final UserGroupInformation ugi,
@QueryParam(DelegationParam.NAME) @DefaultValue(DelegationParam.DEFAULT)
final DelegationParam delegation,
@QueryParam(UserParam.NAME) @DefaultValue(UserParam.DEFAULT)
final UserParam username,
@QueryParam(DoAsParam.NAME) @DefaultValue(DoAsParam.DEFAULT)
final DoAsParam doAsUser,
@PathParam(UriFsPathParam.NAME) final UriFsPathParam path,
@QueryParam(PostOpParam.NAME) @DefaultValue(PostOpParam.DEFAULT)
final PostOpParam op,
@QueryParam(ConcatSourcesParam.NAME) @DefaultValue(ConcatSourcesParam.DEFAULT)
final ConcatSourcesParam concatSrcs,
@QueryParam(BufferSizeParam.NAME) @DefaultValue(BufferSizeParam.DEFAULT)
final BufferSizeParam bufferSize,
@QueryParam(ExcludeDatanodesParam.NAME) @DefaultValue(ExcludeDatanodesParam.DEFAULT)
final ExcludeDatanodesParam excludeDatanodes,
@QueryParam(NewLengthParam.NAME) @DefaultValue(NewLengthParam.DEFAULT)
final NewLengthParam newLength
) throws IOException, InterruptedException {
init(ugi, delegation, username, doAsUser, path, op, concatSrcs, bufferSize,
excludeDatanodes, newLength);
return ugi.doAs(new PrivilegedExceptionAction<Response>() {
@Override
public Response run() throws IOException, URISyntaxException {
try {
return post(ugi, delegation, username, doAsUser,
path.getAbsolutePath(), op, concatSrcs, bufferSize,
excludeDatanodes, newLength);
} finally {
reset();
}
}
});
}
private Response post(
final UserGroupInformation ugi,
final DelegationParam delegation,
final UserParam username,
final DoAsParam doAsUser,
final String fullpath,
final PostOpParam op,
final ConcatSourcesParam concatSrcs,
final BufferSizeParam bufferSize,
final ExcludeDatanodesParam excludeDatanodes,
final NewLengthParam newLength
) throws IOException, URISyntaxException {
final NameNode namenode = (NameNode)context.getAttribute("name.node");
final NamenodeProtocols np = getRPCServer(namenode);
switch(op.getValue()) {
case APPEND:
{
final URI uri = redirectURI(namenode, ugi, delegation, username,
doAsUser, fullpath, op.getValue(), -1L, -1L,
excludeDatanodes.getValue(), bufferSize);
return Response.temporaryRedirect(uri).type(MediaType.APPLICATION_OCTET_STREAM).build();
}
case CONCAT:
{
np.concat(fullpath, concatSrcs.getAbsolutePaths());
return Response.ok().build();
}
case TRUNCATE:
{
// We treat each rest request as a separate client.
final boolean b = np.truncate(fullpath, newLength.getValue(),
"DFSClient_" + DFSUtil.getSecureRandom().nextLong());
final String js = JsonUtil.toJsonString("boolean", b);
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
}
default:
throw new UnsupportedOperationException(op + " is not supported");
}
}
/** Handle HTTP GET request for the root. */
@GET
@Path("/")
@Produces({MediaType.APPLICATION_OCTET_STREAM, MediaType.APPLICATION_JSON})
public Response getRoot(
@Context final UserGroupInformation ugi,
@QueryParam(DelegationParam.NAME) @DefaultValue(DelegationParam.DEFAULT)
final DelegationParam delegation,
@QueryParam(UserParam.NAME) @DefaultValue(UserParam.DEFAULT)
final UserParam username,
@QueryParam(DoAsParam.NAME) @DefaultValue(DoAsParam.DEFAULT)
final DoAsParam doAsUser,
@QueryParam(GetOpParam.NAME) @DefaultValue(GetOpParam.DEFAULT)
final GetOpParam op,
@QueryParam(OffsetParam.NAME) @DefaultValue(OffsetParam.DEFAULT)
final OffsetParam offset,
@QueryParam(LengthParam.NAME) @DefaultValue(LengthParam.DEFAULT)
final LengthParam length,
@QueryParam(RenewerParam.NAME) @DefaultValue(RenewerParam.DEFAULT)
final RenewerParam renewer,
@QueryParam(BufferSizeParam.NAME) @DefaultValue(BufferSizeParam.DEFAULT)
final BufferSizeParam bufferSize,
@QueryParam(XAttrNameParam.NAME) @DefaultValue(XAttrNameParam.DEFAULT)
final List<XAttrNameParam> xattrNames,
@QueryParam(XAttrEncodingParam.NAME) @DefaultValue(XAttrEncodingParam.DEFAULT)
final XAttrEncodingParam xattrEncoding,
@QueryParam(ExcludeDatanodesParam.NAME) @DefaultValue(ExcludeDatanodesParam.DEFAULT)
final ExcludeDatanodesParam excludeDatanodes,
@QueryParam(FsActionParam.NAME) @DefaultValue(FsActionParam.DEFAULT)
final FsActionParam fsAction,
@QueryParam(TokenKindParam.NAME) @DefaultValue(TokenKindParam.DEFAULT)
final TokenKindParam tokenKind,
@QueryParam(TokenServiceParam.NAME) @DefaultValue(TokenServiceParam.DEFAULT)
final TokenServiceParam tokenService
) throws IOException, InterruptedException {
return get(ugi, delegation, username, doAsUser, ROOT, op, offset, length,
renewer, bufferSize, xattrNames, xattrEncoding, excludeDatanodes, fsAction,
tokenKind, tokenService);
}
/** Handle HTTP GET request. */
@GET
@Path("{" + UriFsPathParam.NAME + ":.*}")
@Produces({MediaType.APPLICATION_OCTET_STREAM, MediaType.APPLICATION_JSON})
public Response get(
@Context final UserGroupInformation ugi,
@QueryParam(DelegationParam.NAME) @DefaultValue(DelegationParam.DEFAULT)
final DelegationParam delegation,
@QueryParam(UserParam.NAME) @DefaultValue(UserParam.DEFAULT)
final UserParam username,
@QueryParam(DoAsParam.NAME) @DefaultValue(DoAsParam.DEFAULT)
final DoAsParam doAsUser,
@PathParam(UriFsPathParam.NAME) final UriFsPathParam path,
@QueryParam(GetOpParam.NAME) @DefaultValue(GetOpParam.DEFAULT)
final GetOpParam op,
@QueryParam(OffsetParam.NAME) @DefaultValue(OffsetParam.DEFAULT)
final OffsetParam offset,
@QueryParam(LengthParam.NAME) @DefaultValue(LengthParam.DEFAULT)
final LengthParam length,
@QueryParam(RenewerParam.NAME) @DefaultValue(RenewerParam.DEFAULT)
final RenewerParam renewer,
@QueryParam(BufferSizeParam.NAME) @DefaultValue(BufferSizeParam.DEFAULT)
final BufferSizeParam bufferSize,
@QueryParam(XAttrNameParam.NAME) @DefaultValue(XAttrNameParam.DEFAULT)
final List<XAttrNameParam> xattrNames,
@QueryParam(XAttrEncodingParam.NAME) @DefaultValue(XAttrEncodingParam.DEFAULT)
final XAttrEncodingParam xattrEncoding,
@QueryParam(ExcludeDatanodesParam.NAME) @DefaultValue(ExcludeDatanodesParam.DEFAULT)
final ExcludeDatanodesParam excludeDatanodes,
@QueryParam(FsActionParam.NAME) @DefaultValue(FsActionParam.DEFAULT)
final FsActionParam fsAction,
@QueryParam(TokenKindParam.NAME) @DefaultValue(TokenKindParam.DEFAULT)
final TokenKindParam tokenKind,
@QueryParam(TokenServiceParam.NAME) @DefaultValue(TokenServiceParam.DEFAULT)
final TokenServiceParam tokenService
) throws IOException, InterruptedException {
init(ugi, delegation, username, doAsUser, path, op, offset, length,
renewer, bufferSize, xattrEncoding, excludeDatanodes, fsAction,
tokenKind, tokenService);
return ugi.doAs(new PrivilegedExceptionAction<Response>() {
@Override
public Response run() throws IOException, URISyntaxException {
try {
return get(ugi, delegation, username, doAsUser,
path.getAbsolutePath(), op, offset, length, renewer, bufferSize,
xattrNames, xattrEncoding, excludeDatanodes, fsAction, tokenKind,
tokenService);
} finally {
reset();
}
}
});
}
private Response get(
final UserGroupInformation ugi,
final DelegationParam delegation,
final UserParam username,
final DoAsParam doAsUser,
final String fullpath,
final GetOpParam op,
final OffsetParam offset,
final LengthParam length,
final RenewerParam renewer,
final BufferSizeParam bufferSize,
final List<XAttrNameParam> xattrNames,
final XAttrEncodingParam xattrEncoding,
final ExcludeDatanodesParam excludeDatanodes,
final FsActionParam fsAction,
final TokenKindParam tokenKind,
final TokenServiceParam tokenService
) throws IOException, URISyntaxException {
final NameNode namenode = (NameNode)context.getAttribute("name.node");
final Configuration conf = (Configuration) context
.getAttribute(JspHelper.CURRENT_CONF);
final NamenodeProtocols np = getRPCServer(namenode);
switch(op.getValue()) {
case OPEN:
{
final URI uri = redirectURI(namenode, ugi, delegation, username,
doAsUser, fullpath, op.getValue(), offset.getValue(), -1L,
excludeDatanodes.getValue(), offset, length, bufferSize);
return Response.temporaryRedirect(uri).type(MediaType.APPLICATION_OCTET_STREAM).build();
}
case GET_BLOCK_LOCATIONS:
{
final long offsetValue = offset.getValue();
final Long lengthValue = length.getValue();
final LocatedBlocks locatedblocks = np.getBlockLocations(fullpath,
offsetValue, lengthValue != null? lengthValue: Long.MAX_VALUE);
final String js = JsonUtil.toJsonString(locatedblocks);
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
}
case GETFILESTATUS:
{
final HdfsFileStatus status = np.getFileInfo(fullpath);
if (status == null) {
throw new FileNotFoundException("File does not exist: " + fullpath);
}
final String js = JsonUtil.toJsonString(status, true);
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
}
case LISTSTATUS:
{
final StreamingOutput streaming = getListingStream(np, fullpath);
return Response.ok(streaming).type(MediaType.APPLICATION_JSON).build();
}
case GETCONTENTSUMMARY:
{
final ContentSummary contentsummary = np.getContentSummary(fullpath);
final String js = JsonUtil.toJsonString(contentsummary);
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
}
case GETFILECHECKSUM:
{
final URI uri = redirectURI(namenode, ugi, delegation, username, doAsUser,
fullpath, op.getValue(), -1L, -1L, null);
return Response.temporaryRedirect(uri).type(MediaType.APPLICATION_OCTET_STREAM).build();
}
case GETDELEGATIONTOKEN:
{
if (delegation.getValue() != null) {
throw new IllegalArgumentException(delegation.getName()
+ " parameter is not null.");
}
final Token<? extends TokenIdentifier> token = generateDelegationToken(
namenode, ugi, renewer.getValue());
final String setServiceName = tokenService.getValue();
final String setKind = tokenKind.getValue();
if (setServiceName != null) {
token.setService(new Text(setServiceName));
}
if (setKind != null) {
token.setKind(new Text(setKind));
}
final String js = JsonUtil.toJsonString(token);
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
}
case GETHOMEDIRECTORY: {
final String js = JsonUtil.toJsonString("Path",
FileSystem.get(conf != null ? conf : new Configuration())
.getHomeDirectory().toUri().getPath());
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
}
case GETACLSTATUS: {
AclStatus status = np.getAclStatus(fullpath);
if (status == null) {
throw new FileNotFoundException("File does not exist: " + fullpath);
}
final String js = JsonUtil.toJsonString(status);
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
}
case GETXATTRS: {
List<String> names = null;
if (xattrNames != null) {
names = Lists.newArrayListWithCapacity(xattrNames.size());
for (XAttrNameParam xattrName : xattrNames) {
if (xattrName.getXAttrName() != null) {
names.add(xattrName.getXAttrName());
}
}
}
List<XAttr> xAttrs = np.getXAttrs(fullpath, (names != null &&
!names.isEmpty()) ? XAttrHelper.buildXAttrs(names) : null);
final String js = JsonUtil.toJsonString(xAttrs,
xattrEncoding.getEncoding());
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
}
case LISTXATTRS: {
final List<XAttr> xAttrs = np.listXAttrs(fullpath);
final String js = JsonUtil.toJsonString(xAttrs);
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
}
case CHECKACCESS: {
np.checkAccess(fullpath, FsAction.getFsAction(fsAction.getValue()));
return Response.ok().build();
}
default:
throw new UnsupportedOperationException(op + " is not supported");
}
}
private static DirectoryListing getDirectoryListing(final NamenodeProtocols np,
final String p, byte[] startAfter) throws IOException {
final DirectoryListing listing = np.getListing(p, startAfter, false);
if (listing == null) { // the directory does not exist
throw new FileNotFoundException("File " + p + " does not exist.");
}
return listing;
}
private static StreamingOutput getListingStream(final NamenodeProtocols np,
final String p) throws IOException {
// allows exceptions like FNF or ACE to prevent http response of 200 for
// a failure since we can't (currently) return error responses in the
// middle of a streaming operation
final DirectoryListing firstDirList = getDirectoryListing(np, p,
HdfsFileStatus.EMPTY_NAME);
// must save ugi because the streaming object will be executed outside
// the remote user's ugi
final UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
return new StreamingOutput() {
@Override
public void write(final OutputStream outstream) throws IOException {
final PrintWriter out = new PrintWriter(new OutputStreamWriter(
outstream, Charsets.UTF_8));
out.println("{\"" + FileStatus.class.getSimpleName() + "es\":{\""
+ FileStatus.class.getSimpleName() + "\":[");
try {
// restore remote user's ugi
ugi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws IOException {
long n = 0;
for (DirectoryListing dirList = firstDirList; ;
dirList = getDirectoryListing(np, p, dirList.getLastName())
) {
// send each segment of the directory listing
for (HdfsFileStatus s : dirList.getPartialListing()) {
if (n++ > 0) {
out.println(',');
}
out.print(JsonUtil.toJsonString(s, false));
}
// stop if last segment
if (!dirList.hasMore()) {
break;
}
}
return null;
}
});
} catch (InterruptedException e) {
throw new IOException(e);
}
out.println();
out.println("]}}");
out.flush();
}
};
}
/** Handle HTTP DELETE request for the root. */
@DELETE
@Path("/")
@Produces(MediaType.APPLICATION_JSON)
public Response deleteRoot(
@Context final UserGroupInformation ugi,
@QueryParam(DelegationParam.NAME) @DefaultValue(DelegationParam.DEFAULT)
final DelegationParam delegation,
@QueryParam(UserParam.NAME) @DefaultValue(UserParam.DEFAULT)
final UserParam username,
@QueryParam(DoAsParam.NAME) @DefaultValue(DoAsParam.DEFAULT)
final DoAsParam doAsUser,
@QueryParam(DeleteOpParam.NAME) @DefaultValue(DeleteOpParam.DEFAULT)
final DeleteOpParam op,
@QueryParam(RecursiveParam.NAME) @DefaultValue(RecursiveParam.DEFAULT)
final RecursiveParam recursive,
@QueryParam(SnapshotNameParam.NAME) @DefaultValue(SnapshotNameParam.DEFAULT)
final SnapshotNameParam snapshotName
) throws IOException, InterruptedException {
return delete(ugi, delegation, username, doAsUser, ROOT, op, recursive,
snapshotName);
}
/** Handle HTTP DELETE request. */
@DELETE
@Path("{" + UriFsPathParam.NAME + ":.*}")
@Produces(MediaType.APPLICATION_JSON)
public Response delete(
@Context final UserGroupInformation ugi,
@QueryParam(DelegationParam.NAME) @DefaultValue(DelegationParam.DEFAULT)
final DelegationParam delegation,
@QueryParam(UserParam.NAME) @DefaultValue(UserParam.DEFAULT)
final UserParam username,
@QueryParam(DoAsParam.NAME) @DefaultValue(DoAsParam.DEFAULT)
final DoAsParam doAsUser,
@PathParam(UriFsPathParam.NAME) final UriFsPathParam path,
@QueryParam(DeleteOpParam.NAME) @DefaultValue(DeleteOpParam.DEFAULT)
final DeleteOpParam op,
@QueryParam(RecursiveParam.NAME) @DefaultValue(RecursiveParam.DEFAULT)
final RecursiveParam recursive,
@QueryParam(SnapshotNameParam.NAME) @DefaultValue(SnapshotNameParam.DEFAULT)
final SnapshotNameParam snapshotName
) throws IOException, InterruptedException {
init(ugi, delegation, username, doAsUser, path, op, recursive, snapshotName);
return ugi.doAs(new PrivilegedExceptionAction<Response>() {
@Override
public Response run() throws IOException {
try {
return delete(ugi, delegation, username, doAsUser,
path.getAbsolutePath(), op, recursive, snapshotName);
} finally {
reset();
}
}
});
}
private Response delete(
final UserGroupInformation ugi,
final DelegationParam delegation,
final UserParam username,
final DoAsParam doAsUser,
final String fullpath,
final DeleteOpParam op,
final RecursiveParam recursive,
final SnapshotNameParam snapshotName
) throws IOException {
final NameNode namenode = (NameNode)context.getAttribute("name.node");
final NamenodeProtocols np = getRPCServer(namenode);
switch(op.getValue()) {
case DELETE: {
final boolean b = np.delete(fullpath, recursive.getValue());
final String js = JsonUtil.toJsonString("boolean", b);
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
}
case DELETESNAPSHOT: {
np.deleteSnapshot(fullpath, snapshotName.getValue());
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
}
default:
throw new UnsupportedOperationException(op + " is not supported");
}
}
}
| 46,641 | 41.673376 | 94 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.metrics;
import static org.apache.hadoop.metrics2.impl.MsInfo.ProcessName;
import static org.apache.hadoop.metrics2.impl.MsInfo.SessionId;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.metrics2.MetricsSystem;
import org.apache.hadoop.metrics2.annotation.Metric;
import org.apache.hadoop.metrics2.annotation.Metrics;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.metrics2.lib.MetricsRegistry;
import org.apache.hadoop.metrics2.lib.MutableCounterLong;
import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
import org.apache.hadoop.metrics2.lib.MutableQuantiles;
import org.apache.hadoop.metrics2.lib.MutableRate;
import org.apache.hadoop.metrics2.source.JvmMetrics;
/**
* This class is for maintaining the various NameNode activity statistics
* and publishing them through the metrics interfaces.
*/
@Metrics(name="NameNodeActivity", about="NameNode metrics", context="dfs")
public class NameNodeMetrics {
final MetricsRegistry registry = new MetricsRegistry("namenode");
@Metric MutableCounterLong createFileOps;
@Metric MutableCounterLong filesCreated;
@Metric MutableCounterLong filesAppended;
@Metric MutableCounterLong getBlockLocations;
@Metric MutableCounterLong filesRenamed;
@Metric MutableCounterLong filesTruncated;
@Metric MutableCounterLong getListingOps;
@Metric MutableCounterLong deleteFileOps;
@Metric("Number of files/dirs deleted by delete or rename operations")
MutableCounterLong filesDeleted;
@Metric MutableCounterLong fileInfoOps;
@Metric MutableCounterLong addBlockOps;
@Metric MutableCounterLong getAdditionalDatanodeOps;
@Metric MutableCounterLong createSymlinkOps;
@Metric MutableCounterLong getLinkTargetOps;
@Metric MutableCounterLong filesInGetListingOps;
@Metric("Number of allowSnapshot operations")
MutableCounterLong allowSnapshotOps;
@Metric("Number of disallowSnapshot operations")
MutableCounterLong disallowSnapshotOps;
@Metric("Number of createSnapshot operations")
MutableCounterLong createSnapshotOps;
@Metric("Number of deleteSnapshot operations")
MutableCounterLong deleteSnapshotOps;
@Metric("Number of renameSnapshot operations")
MutableCounterLong renameSnapshotOps;
@Metric("Number of listSnapshottableDirectory operations")
MutableCounterLong listSnapshottableDirOps;
@Metric("Number of snapshotDiffReport operations")
MutableCounterLong snapshotDiffReportOps;
@Metric("Number of blockReceivedAndDeleted calls")
MutableCounterLong blockReceivedAndDeletedOps;
@Metric("Number of blockReports from individual storages")
MutableCounterLong storageBlockReportOps;
@Metric("Number of file system operations")
public long totalFileOps(){
return
getBlockLocations.value() +
createFileOps.value() +
filesAppended.value() +
addBlockOps.value() +
getAdditionalDatanodeOps.value() +
filesRenamed.value() +
filesTruncated.value() +
deleteFileOps.value() +
getListingOps.value() +
fileInfoOps.value() +
getLinkTargetOps.value() +
createSnapshotOps.value() +
deleteSnapshotOps.value() +
allowSnapshotOps.value() +
disallowSnapshotOps.value() +
renameSnapshotOps.value() +
listSnapshottableDirOps.value() +
createSymlinkOps.value() +
snapshotDiffReportOps.value();
}
@Metric("Journal transactions") MutableRate transactions;
@Metric("Journal syncs") MutableRate syncs;
final MutableQuantiles[] syncsQuantiles;
@Metric("Journal transactions batched in sync")
MutableCounterLong transactionsBatchedInSync;
@Metric("Block report") MutableRate blockReport;
final MutableQuantiles[] blockReportQuantiles;
@Metric("Cache report") MutableRate cacheReport;
final MutableQuantiles[] cacheReportQuantiles;
@Metric("Duration in SafeMode at startup in msec")
MutableGaugeInt safeModeTime;
@Metric("Time loading FS Image at startup in msec")
MutableGaugeInt fsImageLoadTime;
@Metric("GetImageServlet getEdit")
MutableRate getEdit;
@Metric("GetImageServlet getImage")
MutableRate getImage;
@Metric("GetImageServlet putImage")
MutableRate putImage;
JvmMetrics jvmMetrics = null;
NameNodeMetrics(String processName, String sessionId, int[] intervals,
final JvmMetrics jvmMetrics) {
this.jvmMetrics = jvmMetrics;
registry.tag(ProcessName, processName).tag(SessionId, sessionId);
final int len = intervals.length;
syncsQuantiles = new MutableQuantiles[len];
blockReportQuantiles = new MutableQuantiles[len];
cacheReportQuantiles = new MutableQuantiles[len];
for (int i = 0; i < len; i++) {
int interval = intervals[i];
syncsQuantiles[i] = registry.newQuantiles(
"syncs" + interval + "s",
"Journal syncs", "ops", "latency", interval);
blockReportQuantiles[i] = registry.newQuantiles(
"blockReport" + interval + "s",
"Block report", "ops", "latency", interval);
cacheReportQuantiles[i] = registry.newQuantiles(
"cacheReport" + interval + "s",
"Cache report", "ops", "latency", interval);
}
}
public static NameNodeMetrics create(Configuration conf, NamenodeRole r) {
String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY);
String processName = r.toString();
MetricsSystem ms = DefaultMetricsSystem.instance();
JvmMetrics jm = JvmMetrics.create(processName, sessionId, ms);
// Percentile measurement is off by default, by watching no intervals
int[] intervals =
conf.getInts(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY);
return ms.register(new NameNodeMetrics(processName, sessionId,
intervals, jm));
}
public JvmMetrics getJvmMetrics() {
return jvmMetrics;
}
public void shutdown() {
DefaultMetricsSystem.shutdown();
}
public void incrGetBlockLocations() {
getBlockLocations.incr();
}
public void incrFilesCreated() {
filesCreated.incr();
}
public void incrCreateFileOps() {
createFileOps.incr();
}
public void incrFilesAppended() {
filesAppended.incr();
}
public void incrAddBlockOps() {
addBlockOps.incr();
}
public void incrGetAdditionalDatanodeOps() {
getAdditionalDatanodeOps.incr();
}
public void incrFilesRenamed() {
filesRenamed.incr();
}
public void incrFilesTruncated() {
filesTruncated.incr();
}
public void incrFilesDeleted(long delta) {
filesDeleted.incr(delta);
}
public void incrDeleteFileOps() {
deleteFileOps.incr();
}
public void incrGetListingOps() {
getListingOps.incr();
}
public void incrFilesInGetListingOps(int delta) {
filesInGetListingOps.incr(delta);
}
public void incrFileInfoOps() {
fileInfoOps.incr();
}
public void incrCreateSymlinkOps() {
createSymlinkOps.incr();
}
public void incrGetLinkTargetOps() {
getLinkTargetOps.incr();
}
public void incrAllowSnapshotOps() {
allowSnapshotOps.incr();
}
public void incrDisAllowSnapshotOps() {
disallowSnapshotOps.incr();
}
public void incrCreateSnapshotOps() {
createSnapshotOps.incr();
}
public void incrDeleteSnapshotOps() {
deleteSnapshotOps.incr();
}
public void incrRenameSnapshotOps() {
renameSnapshotOps.incr();
}
public void incrListSnapshottableDirOps() {
listSnapshottableDirOps.incr();
}
public void incrSnapshotDiffReportOps() {
snapshotDiffReportOps.incr();
}
public void incrBlockReceivedAndDeletedOps() {
blockReceivedAndDeletedOps.incr();
}
public void incrStorageBlockReportOps() {
storageBlockReportOps.incr();
}
public void addTransaction(long latency) {
transactions.add(latency);
}
public void incrTransactionsBatchedInSync() {
transactionsBatchedInSync.incr();
}
public void addSync(long elapsed) {
syncs.add(elapsed);
for (MutableQuantiles q : syncsQuantiles) {
q.add(elapsed);
}
}
public void setFsImageLoadTime(long elapsed) {
fsImageLoadTime.set((int) elapsed);
}
public void addBlockReport(long latency) {
blockReport.add(latency);
for (MutableQuantiles q : blockReportQuantiles) {
q.add(latency);
}
}
public void addCacheBlockReport(long latency) {
cacheReport.add(latency);
for (MutableQuantiles q : cacheReportQuantiles) {
q.add(latency);
}
}
public void setSafeModeTime(long elapsed) {
safeModeTime.set((int) elapsed);
}
public void addGetEdit(long latency) {
getEdit.add(latency);
}
public void addGetImage(long latency) {
getImage.add(latency);
}
public void addPutImage(long latency) {
putImage.add(latency);
}
}
| 9,725 | 29.489028 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.metrics;
import org.apache.hadoop.classification.InterfaceAudience;
/**
*
* This Interface defines the methods to get the status of a the FSNamesystem of
* a name node.
* It is also used for publishing via JMX (hence we follow the JMX naming
* convention.)
*
* Note we have not used the MetricsDynamicMBeanBase to implement this
* because the interface for the NameNodeStateMBean is stable and should
* be published as an interface.
*
* <p>
* Name Node runtime activity statistic info is reported in
* @see org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics
*
*/
@InterfaceAudience.Private
public interface FSNamesystemMBean {
/**
* The state of the file system: Safemode or Operational
* @return the state
*/
public String getFSState();
/**
* Number of allocated blocks in the system
* @return - number of allocated blocks
*/
public long getBlocksTotal();
/**
* Total storage capacity
* @return - total capacity in bytes
*/
public long getCapacityTotal();
/**
* Free (unused) storage capacity
* @return - free capacity in bytes
*/
public long getCapacityRemaining();
/**
* Used storage capacity
* @return - used capacity in bytes
*/
public long getCapacityUsed();
/**
* Total number of files and directories
* @return - num of files and directories
*/
public long getFilesTotal();
/**
* Blocks pending to be replicated
* @return - num of blocks to be replicated
*/
public long getPendingReplicationBlocks();
/**
* Blocks under replicated
* @return - num of blocks under replicated
*/
public long getUnderReplicatedBlocks();
/**
* Blocks scheduled for replication
* @return - num of blocks scheduled for replication
*/
public long getScheduledReplicationBlocks();
/**
* Total Load on the FSNamesystem
* @return - total load of FSNamesystem
*/
public int getTotalLoad();
/**
* Number of Live data nodes
* @return number of live data nodes
*/
public int getNumLiveDataNodes();
/**
* Number of dead data nodes
* @return number of dead data nodes
*/
public int getNumDeadDataNodes();
/**
* Number of stale data nodes
* @return number of stale data nodes
*/
public int getNumStaleDataNodes();
/**
* Number of decommissioned Live data nodes
* @return number of decommissioned live data nodes
*/
public int getNumDecomLiveDataNodes();
/**
* Number of decommissioned dead data nodes
* @return number of decommissioned dead data nodes
*/
public int getNumDecomDeadDataNodes();
/**
* Number of failed data volumes across all live data nodes.
* @return number of failed data volumes across all live data nodes
*/
int getVolumeFailuresTotal();
/**
* Returns an estimate of total capacity lost due to volume failures in bytes
* across all live data nodes.
* @return estimate of total capacity lost in bytes
*/
long getEstimatedCapacityLostTotal();
/**
* Number of data nodes that are in the decommissioning state
*/
public int getNumDecommissioningDataNodes();
/**
* The statistics of snapshots
*/
public String getSnapshotStats();
/**
* Return the maximum number of inodes in the file system
*/
public long getMaxObjects();
/**
* Number of blocks pending deletion
* @return number of blocks pending deletion
*/
long getPendingDeletionBlocks();
/**
* Time when block deletions will begin
* @return time when block deletions will begin
*/
long getBlockDeletionStartTime();
/**
* Number of content stale storages.
* @return number of content stale storages
*/
public int getNumStaleStorages();
/**
* Returns a nested JSON object listing the top users for different RPC
* operations over tracked time windows.
*
* @return JSON string
*/
public String getTopUserOpCounts();
/**
* Return the number of encryption zones in the system.
*/
int getNumEncryptionZones();
}
| 4,912 | 24.455959 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/package-info.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This package provides a mechanism for tracking {@link NameNode} startup
* progress. The package models NameNode startup as a series of {@link Phase}s,
* with each phase further sub-divided into multiple {@link Step}s. All phases
* are coarse-grained and typically known in advance, implied by the structure of
* the NameNode codebase (example: loading fsimage). Steps are more granular and
* typically only known at runtime after startup begins (example: loading a
* specific fsimage file with a known length from a particular location).
*
* {@link StartupProgress} provides a thread-safe data structure for
* recording status information and counters. Various parts of the NameNode
* codebase use this to describe the NameNode's activities during startup.
*
* {@link StartupProgressView} provides an immutable, consistent view of the
* current state of NameNode startup progress. This can be used to present the
* data to a user.
*
* {@link StartupProgressMetrics} exposes startup progress information via JMX
* through the standard metrics system.
*/
@InterfaceAudience.Private
package org.apache.hadoop.hdfs.server.namenode.startupprogress;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
| 2,101 | 46.772727 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/StartupProgress.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.startupprogress;
import static org.apache.hadoop.util.Time.monotonicNow;
import java.util.EnumSet;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import org.apache.hadoop.classification.InterfaceAudience;
/**
* StartupProgress is used in various parts of the namenode codebase to indicate
* startup progress. Its methods provide ways to indicate begin and end of a
* {@link Phase} or {@link Step} within a phase. Additional methods provide ways
* to associate a step or phase with optional information, such as a file name or
* file size. It also provides counters, which can be incremented by the caller
* to indicate progress through a long-running task.
*
* This class is thread-safe. Any number of threads may call any methods, even
* for the same phase or step, without risk of corrupting internal state. For
* all begin/end methods and set methods, the last one in wins, overwriting any
* prior writes. Instances of {@link Counter} provide an atomic increment
* operation to prevent lost updates.
*
* After startup completes, the tracked data is frozen. Any subsequent updates
* or counter increments are no-ops.
*
* For read access, call {@link #createView()} to create a consistent view with
* a clone of the data.
*/
@InterfaceAudience.Private
public class StartupProgress {
// package-private for access by StartupProgressView
final Map<Phase, PhaseTracking> phases =
new ConcurrentHashMap<Phase, PhaseTracking>();
/**
* Allows a caller to increment a counter for tracking progress.
*/
public static interface Counter {
/**
* Atomically increments this counter, adding 1 to the current value.
*/
void increment();
}
/**
* Creates a new StartupProgress by initializing internal data structure for
* tracking progress of all defined phases.
*/
public StartupProgress() {
for (Phase phase: EnumSet.allOf(Phase.class)) {
phases.put(phase, new PhaseTracking());
}
}
/**
* Begins execution of the specified phase.
*
* @param phase Phase to begin
*/
public void beginPhase(Phase phase) {
if (!isComplete()) {
phases.get(phase).beginTime = monotonicNow();
}
}
/**
* Begins execution of the specified step within the specified phase.
*
* @param phase Phase to begin
* @param step Step to begin
*/
public void beginStep(Phase phase, Step step) {
if (!isComplete()) {
lazyInitStep(phase, step).beginTime = monotonicNow();
}
}
/**
* Ends execution of the specified phase.
*
* @param phase Phase to end
*/
public void endPhase(Phase phase) {
if (!isComplete()) {
phases.get(phase).endTime = monotonicNow();
}
}
/**
* Ends execution of the specified step within the specified phase.
*
* @param phase Phase to end
* @param step Step to end
*/
public void endStep(Phase phase, Step step) {
if (!isComplete()) {
lazyInitStep(phase, step).endTime = monotonicNow();
}
}
/**
* Returns the current run status of the specified phase.
*
* @param phase Phase to get
* @return Status run status of phase
*/
public Status getStatus(Phase phase) {
PhaseTracking tracking = phases.get(phase);
if (tracking.beginTime == Long.MIN_VALUE) {
return Status.PENDING;
} else if (tracking.endTime == Long.MIN_VALUE) {
return Status.RUNNING;
} else {
return Status.COMPLETE;
}
}
/**
* Returns a counter associated with the specified phase and step. Typical
* usage is to increment a counter within a tight loop. Callers may use this
* method to obtain a counter once and then increment that instance repeatedly
* within a loop. This prevents redundant lookup operations and object
* creation within the tight loop. Incrementing the counter is an atomic
* operation, so there is no risk of lost updates even if multiple threads
* increment the same counter.
*
* @param phase Phase to get
* @param step Step to get
* @return Counter associated with phase and step
*/
public Counter getCounter(Phase phase, Step step) {
if (!isComplete()) {
final StepTracking tracking = lazyInitStep(phase, step);
return new Counter() {
@Override
public void increment() {
tracking.count.incrementAndGet();
}
};
} else {
return new Counter() {
@Override
public void increment() {
// no-op, because startup has completed
}
};
}
}
/**
* Sets counter to the specified value.
*
* @param phase Phase to set
* @param step Step to set
* @param count long to set
*/
public void setCount(Phase phase, Step step, long count) {
lazyInitStep(phase, step).count.set(count);
}
/**
* Sets the optional file name associated with the specified phase. For
* example, this can be used while loading fsimage to indicate the full path to
* the fsimage file.
*
* @param phase Phase to set
* @param file String file name to set
*/
public void setFile(Phase phase, String file) {
if (!isComplete()) {
phases.get(phase).file = file;
}
}
/**
* Sets the optional size in bytes associated with the specified phase. For
* example, this can be used while loading fsimage to indicate the size of the
* fsimage file.
*
* @param phase Phase to set
* @param size long to set
*/
public void setSize(Phase phase, long size) {
if (!isComplete()) {
phases.get(phase).size = size;
}
}
/**
* Sets the total associated with the specified phase and step. For example,
* this can be used while loading edits to indicate the number of operations to
* be applied.
*
* @param phase Phase to set
* @param step Step to set
* @param total long to set
*/
public void setTotal(Phase phase, Step step, long total) {
if (!isComplete()) {
lazyInitStep(phase, step).total = total;
}
}
/**
* Creates a {@link StartupProgressView} containing data cloned from this
* StartupProgress. Subsequent updates to this StartupProgress will not be
* shown in the view. This gives a consistent, unchanging view for callers
* that need to perform multiple related read operations. Calculations that
* require aggregation, such as overall percent complete, will not be impacted
* by mutations performed in other threads mid-way through the calculation.
*
* @return StartupProgressView containing cloned data
*/
public StartupProgressView createView() {
return new StartupProgressView(this);
}
/**
* Returns true if the entire startup process has completed, determined by
* checking if each phase is complete.
*
* @return boolean true if the entire startup process has completed
*/
private boolean isComplete() {
for (Phase phase: EnumSet.allOf(Phase.class)) {
if (getStatus(phase) != Status.COMPLETE) {
return false;
}
}
return true;
}
/**
* Lazily initializes the internal data structure for tracking the specified
* phase and step. Returns either the newly initialized data structure or the
* existing one. Initialization is atomic, so there is no risk of lost updates
* even if multiple threads attempt to initialize the same step simultaneously.
*
* @param phase Phase to initialize
* @param step Step to initialize
* @return StepTracking newly initialized, or existing if found
*/
private StepTracking lazyInitStep(Phase phase, Step step) {
ConcurrentMap<Step, StepTracking> steps = phases.get(phase).steps;
if (!steps.containsKey(step)) {
steps.putIfAbsent(step, new StepTracking());
}
return steps.get(step);
}
}
| 8,707 | 31.132841 | 81 |
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.