repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/InconsistentFSStateException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.common;
import java.io.File;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.util.StringUtils;
/**
* The exception is thrown when file system state is inconsistent
* and is not recoverable.
*
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class InconsistentFSStateException extends IOException {
private static final long serialVersionUID = 1L;
public InconsistentFSStateException(File dir, String descr) {
super("Directory " + getFilePath(dir)
+ " is in an inconsistent state: " + descr);
}
public InconsistentFSStateException(File dir, String descr, Throwable ex) {
this(dir, descr + "\n" + StringUtils.stringifyException(ex));
}
private static String getFilePath(File dir) {
try {
return dir.getCanonicalPath();
} catch(IOException e) {}
return dir.getPath();
}
}
| 1,820 | 33.358491 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.common;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.util.Map;
import java.util.Properties;
import java.util.SortedSet;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
import org.apache.hadoop.hdfs.protocol.LayoutVersion.LayoutFeature;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.datanode.DataNodeLayoutVersion;
import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion;
import com.google.common.base.Joiner;
/**
* Common class for storage information.
*
* TODO namespaceID should be long and computed as hash(address + port)
*/
@InterfaceAudience.Private
public class StorageInfo {
public int layoutVersion; // layout version of the storage data
public int namespaceID; // id of the file system
public String clusterID; // id of the cluster
public long cTime; // creation time of the file system state
protected final NodeType storageType; // Type of the node using this storage
protected static final String STORAGE_FILE_VERSION = "VERSION";
public StorageInfo(NodeType type) {
this(0, 0, "", 0L, type);
}
public StorageInfo(int layoutV, int nsID, String cid, long cT, NodeType type) {
layoutVersion = layoutV;
clusterID = cid;
namespaceID = nsID;
cTime = cT;
storageType = type;
}
public StorageInfo(StorageInfo from) {
this(from.layoutVersion, from.namespaceID, from.clusterID, from.cTime,
from.storageType);
}
/**
* Layout version of the storage data.
*/
public int getLayoutVersion(){ return layoutVersion; }
/**
* Namespace id of the file system.<p>
* Assigned to the file system at formatting and never changes after that.
* Shared by all file system components.
*/
public int getNamespaceID() { return namespaceID; }
/**
* cluster id of the file system.<p>
*/
public String getClusterID() { return clusterID; }
/**
* Creation time of the file system state.<p>
* Modified during upgrades.
*/
public long getCTime() { return cTime; }
public void setStorageInfo(StorageInfo from) {
layoutVersion = from.layoutVersion;
clusterID = from.clusterID;
namespaceID = from.namespaceID;
cTime = from.cTime;
}
public boolean versionSupportsFederation(
Map<Integer, SortedSet<LayoutFeature>> map) {
return LayoutVersion.supports(map, LayoutVersion.Feature.FEDERATION,
layoutVersion);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("lv=").append(layoutVersion).append(";cid=").append(clusterID)
.append(";nsid=").append(namespaceID).append(";c=").append(cTime);
return sb.toString();
}
public String toColonSeparatedString() {
return Joiner.on(":").join(
layoutVersion, namespaceID, cTime, clusterID);
}
/**
* Get common storage fields.
* Should be overloaded if additional fields need to be get.
*
* @param props properties
* @throws IOException on error
*/
protected void setFieldsFromProperties(
Properties props, StorageDirectory sd) throws IOException {
setLayoutVersion(props, sd);
setNamespaceID(props, sd);
setcTime(props, sd);
setClusterId(props, layoutVersion, sd);
checkStorageType(props, sd);
}
/** Validate and set storage type from {@link Properties}*/
protected void checkStorageType(Properties props, StorageDirectory sd)
throws InconsistentFSStateException {
if (storageType == null) { //don't care about storage type
return;
}
NodeType type = NodeType.valueOf(getProperty(props, sd, "storageType"));
if (!storageType.equals(type)) {
throw new InconsistentFSStateException(sd.root,
"Incompatible node types: storageType=" + storageType
+ " but StorageDirectory type=" + type);
}
}
/** Validate and set ctime from {@link Properties}*/
protected void setcTime(Properties props, StorageDirectory sd)
throws InconsistentFSStateException {
cTime = Long.parseLong(getProperty(props, sd, "cTime"));
}
/** Validate and set clusterId from {@link Properties}*/
protected void setClusterId(Properties props, int layoutVersion,
StorageDirectory sd) throws InconsistentFSStateException {
// Set cluster ID in version that supports federation
if (LayoutVersion.supports(getServiceLayoutFeatureMap(),
Feature.FEDERATION, layoutVersion)) {
String cid = getProperty(props, sd, "clusterID");
if (!(clusterID.equals("") || cid.equals("") || clusterID.equals(cid))) {
throw new InconsistentFSStateException(sd.getRoot(),
"cluster Id is incompatible with others.");
}
clusterID = cid;
}
}
/** Validate and set layout version from {@link Properties}*/
protected void setLayoutVersion(Properties props, StorageDirectory sd)
throws IncorrectVersionException, InconsistentFSStateException {
int lv = Integer.parseInt(getProperty(props, sd, "layoutVersion"));
if (lv < getServiceLayoutVersion()) { // future version
throw new IncorrectVersionException(getServiceLayoutVersion(), lv,
"storage directory " + sd.root.getAbsolutePath());
}
layoutVersion = lv;
}
/** Validate and set namespaceID version from {@link Properties}*/
protected void setNamespaceID(Properties props, StorageDirectory sd)
throws InconsistentFSStateException {
int nsId = Integer.parseInt(getProperty(props, sd, "namespaceID"));
if (namespaceID != 0 && nsId != 0 && namespaceID != nsId) {
throw new InconsistentFSStateException(sd.root,
"namespaceID is incompatible with others.");
}
namespaceID = nsId;
}
public void setServiceLayoutVersion(int lv) {
this.layoutVersion = lv;
}
public int getServiceLayoutVersion() {
return storageType == NodeType.DATA_NODE ? HdfsServerConstants.DATANODE_LAYOUT_VERSION
: HdfsServerConstants.NAMENODE_LAYOUT_VERSION;
}
public Map<Integer, SortedSet<LayoutFeature>> getServiceLayoutFeatureMap() {
return storageType == NodeType.DATA_NODE? DataNodeLayoutVersion.FEATURES
: NameNodeLayoutVersion.FEATURES;
}
protected static String getProperty(Properties props, StorageDirectory sd,
String name) throws InconsistentFSStateException {
String property = props.getProperty(name);
if (property == null) {
throw new InconsistentFSStateException(sd.root, "file "
+ STORAGE_FILE_VERSION + " has " + name + " missing.");
}
return property;
}
public static int getNsIdFromColonSeparatedString(String in) {
return Integer.parseInt(in.split(":")[1]);
}
public static String getClusterIdFromColonSeparatedString(String in) {
return in.split(":")[3];
}
/**
* Read properties from the VERSION file in the given storage directory.
*/
public void readProperties(StorageDirectory sd) throws IOException {
Properties props = readPropertiesFile(sd.getVersionFile());
setFieldsFromProperties(props, sd);
}
/**
* Read properties from the the previous/VERSION file in the given storage directory.
*/
public void readPreviousVersionProperties(StorageDirectory sd)
throws IOException {
Properties props = readPropertiesFile(sd.getPreviousVersionFile());
setFieldsFromProperties(props, sd);
}
public static Properties readPropertiesFile(File from) throws IOException {
RandomAccessFile file = new RandomAccessFile(from, "rws");
FileInputStream in = null;
Properties props = new Properties();
try {
in = new FileInputStream(file.getFD());
file.seek(0);
props.load(in);
} finally {
if (in != null) {
in.close();
}
file.close();
}
return props;
}
}
| 8,970 | 33.503846 | 90 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.common;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.datanode.DataNodeLayoutVersion;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
import org.apache.hadoop.hdfs.server.namenode.MetaRecoveryContext;
import com.google.common.base.Preconditions;
import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion;
import org.apache.hadoop.util.StringUtils;
/************************************
* Some handy internal HDFS constants
*
************************************/
@InterfaceAudience.Private
public interface HdfsServerConstants {
int MIN_BLOCKS_FOR_WRITE = 1;
/**
* For a HDFS client to write to a file, a lease is granted; During the lease
* period, no other client can write to the file. The writing client can
* periodically renew the lease. When the file is closed, the lease is
* revoked. The lease duration is bound by this soft limit and a
* {@link HdfsServerConstants#LEASE_HARDLIMIT_PERIOD hard limit}. Until the
* soft limit expires, the writer has sole write access to the file. If the
* soft limit expires and the client fails to close the file or renew the
* lease, another client can preempt the lease.
*/
long LEASE_SOFTLIMIT_PERIOD = 60 * 1000;
/**
* For a HDFS client to write to a file, a lease is granted; During the lease
* period, no other client can write to the file. The writing client can
* periodically renew the lease. When the file is closed, the lease is
* revoked. The lease duration is bound by a
* {@link HdfsServerConstants#LEASE_SOFTLIMIT_PERIOD soft limit} and this hard
* limit. If after the hard limit expires and the client has failed to renew
* the lease, HDFS assumes that the client has quit and will automatically
* close the file on behalf of the writer, and recover the lease.
*/
long LEASE_HARDLIMIT_PERIOD = 60 * LEASE_SOFTLIMIT_PERIOD;
long LEASE_RECOVER_PERIOD = 10 * 1000; // in ms
// We need to limit the length and depth of a path in the filesystem.
// HADOOP-438
// Currently we set the maximum length to 8k characters and the maximum depth
// to 1k.
int MAX_PATH_LENGTH = 8000;
int MAX_PATH_DEPTH = 1000;
// An invalid transaction ID that will never be seen in a real namesystem.
long INVALID_TXID = -12345;
// Number of generation stamps reserved for legacy blocks.
long RESERVED_GENERATION_STAMPS_V1 =
1024L * 1024 * 1024 * 1024;
/**
* Current layout version for NameNode.
* Please see {@link NameNodeLayoutVersion.Feature} on adding new layout version.
*/
int NAMENODE_LAYOUT_VERSION
= NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION;
/**
* Current layout version for DataNode.
* Please see {@link DataNodeLayoutVersion.Feature} on adding new layout version.
*/
int DATANODE_LAYOUT_VERSION
= DataNodeLayoutVersion.CURRENT_LAYOUT_VERSION;
/**
* Path components that are reserved in HDFS.
* <p>
* .reserved is only reserved under root ("/").
*/
String[] RESERVED_PATH_COMPONENTS = new String[] {
HdfsConstants.DOT_SNAPSHOT_DIR,
FSDirectory.DOT_RESERVED_STRING
};
byte[] DOT_SNAPSHOT_DIR_BYTES
= DFSUtil.string2Bytes(HdfsConstants.DOT_SNAPSHOT_DIR);
byte MEMORY_STORAGE_POLICY_ID = 15;
byte ALLSSD_STORAGE_POLICY_ID = 12;
byte ONESSD_STORAGE_POLICY_ID = 10;
byte HOT_STORAGE_POLICY_ID = 7;
byte WARM_STORAGE_POLICY_ID = 5;
byte COLD_STORAGE_POLICY_ID = 2;
/**
* Type of the node
*/
enum NodeType {
NAME_NODE,
DATA_NODE,
JOURNAL_NODE
}
/** Startup options for rolling upgrade. */
public static enum RollingUpgradeStartupOption{
ROLLBACK, DOWNGRADE, STARTED;
public String getOptionString() {
return StartupOption.ROLLINGUPGRADE.getName() + " "
+ StringUtils.toLowerCase(name());
}
public boolean matches(StartupOption option) {
return option == StartupOption.ROLLINGUPGRADE
&& option.getRollingUpgradeStartupOption() == this;
}
private static final RollingUpgradeStartupOption[] VALUES = values();
static RollingUpgradeStartupOption fromString(String s) {
for(RollingUpgradeStartupOption opt : VALUES) {
if (opt.name().equalsIgnoreCase(s)) {
return opt;
}
}
throw new IllegalArgumentException("Failed to convert \"" + s
+ "\" to " + RollingUpgradeStartupOption.class.getSimpleName());
}
public static String getAllOptionString() {
final StringBuilder b = new StringBuilder("<");
for(RollingUpgradeStartupOption opt : VALUES) {
b.append(StringUtils.toLowerCase(opt.name())).append("|");
}
b.setCharAt(b.length() - 1, '>');
return b.toString();
}
}
/** Startup options */
enum StartupOption{
FORMAT ("-format"),
CLUSTERID ("-clusterid"),
GENCLUSTERID ("-genclusterid"),
REGULAR ("-regular"),
BACKUP ("-backup"),
CHECKPOINT("-checkpoint"),
UPGRADE ("-upgrade"),
ROLLBACK("-rollback"),
FINALIZE("-finalize"),
ROLLINGUPGRADE("-rollingUpgrade"),
IMPORT ("-importCheckpoint"),
BOOTSTRAPSTANDBY("-bootstrapStandby"),
INITIALIZESHAREDEDITS("-initializeSharedEdits"),
RECOVER ("-recover"),
FORCE("-force"),
NONINTERACTIVE("-nonInteractive"),
RENAMERESERVED("-renameReserved"),
METADATAVERSION("-metadataVersion"),
UPGRADEONLY("-upgradeOnly"),
// The -hotswap constant should not be used as a startup option, it is
// only used for StorageDirectory.analyzeStorage() in hot swap drive scenario.
// TODO refactor StorageDirectory.analyzeStorage() so that we can do away with
// this in StartupOption.
HOTSWAP("-hotswap");
private static final Pattern ENUM_WITH_ROLLING_UPGRADE_OPTION = Pattern.compile(
"(\\w+)\\((\\w+)\\)");
private final String name;
// Used only with format and upgrade options
private String clusterId = null;
// Used only by rolling upgrade
private RollingUpgradeStartupOption rollingUpgradeStartupOption;
// Used only with format option
private boolean isForceFormat = false;
private boolean isInteractiveFormat = true;
// Used only with recovery option
private int force = 0;
StartupOption(String arg) {this.name = arg;}
public String getName() {return name;}
public NamenodeRole toNodeRole() {
switch(this) {
case BACKUP:
return NamenodeRole.BACKUP;
case CHECKPOINT:
return NamenodeRole.CHECKPOINT;
default:
return NamenodeRole.NAMENODE;
}
}
public void setClusterId(String cid) {
clusterId = cid;
}
public String getClusterId() {
return clusterId;
}
public void setRollingUpgradeStartupOption(String opt) {
Preconditions.checkState(this == ROLLINGUPGRADE);
rollingUpgradeStartupOption = RollingUpgradeStartupOption.fromString(opt);
}
public RollingUpgradeStartupOption getRollingUpgradeStartupOption() {
Preconditions.checkState(this == ROLLINGUPGRADE);
return rollingUpgradeStartupOption;
}
public MetaRecoveryContext createRecoveryContext() {
if (!name.equals(RECOVER.name))
return null;
return new MetaRecoveryContext(force);
}
public void setForce(int force) {
this.force = force;
}
public int getForce() {
return this.force;
}
public boolean getForceFormat() {
return isForceFormat;
}
public void setForceFormat(boolean force) {
isForceFormat = force;
}
public boolean getInteractiveFormat() {
return isInteractiveFormat;
}
public void setInteractiveFormat(boolean interactive) {
isInteractiveFormat = interactive;
}
@Override
public String toString() {
if (this == ROLLINGUPGRADE) {
return new StringBuilder(super.toString())
.append("(").append(getRollingUpgradeStartupOption()).append(")")
.toString();
}
return super.toString();
}
static public StartupOption getEnum(String value) {
Matcher matcher = ENUM_WITH_ROLLING_UPGRADE_OPTION.matcher(value);
if (matcher.matches()) {
StartupOption option = StartupOption.valueOf(matcher.group(1));
option.setRollingUpgradeStartupOption(matcher.group(2));
return option;
} else {
return StartupOption.valueOf(value);
}
}
}
// Timeouts for communicating with DataNode for streaming writes/reads
int READ_TIMEOUT = 60 * 1000;
int READ_TIMEOUT_EXTENSION = 5 * 1000;
int WRITE_TIMEOUT = 8 * 60 * 1000;
int WRITE_TIMEOUT_EXTENSION = 5 * 1000; //for write pipeline
/**
* Defines the NameNode role.
*/
enum NamenodeRole {
NAMENODE ("NameNode"),
BACKUP ("Backup Node"),
CHECKPOINT("Checkpoint Node");
private String description = null;
NamenodeRole(String arg) {this.description = arg;}
@Override
public String toString() {
return description;
}
}
/**
* Block replica states, which it can go through while being constructed.
*/
enum ReplicaState {
/** Replica is finalized. The state when replica is not modified. */
FINALIZED(0),
/** Replica is being written to. */
RBW(1),
/** Replica is waiting to be recovered. */
RWR(2),
/** Replica is under recovery. */
RUR(3),
/** Temporary replica: created for replication and relocation only. */
TEMPORARY(4);
private final int value;
ReplicaState(int v) {
value = v;
}
public int getValue() {
return value;
}
public static ReplicaState getState(int v) {
return ReplicaState.values()[v];
}
/** Read from in */
public static ReplicaState read(DataInput in) throws IOException {
return values()[in.readByte()];
}
/** Write to out */
public void write(DataOutput out) throws IOException {
out.writeByte(ordinal());
}
}
/**
* States, which a block can go through while it is under construction.
*/
enum BlockUCState {
/**
* Block construction completed.<br>
* The block has at least the configured minimal replication number
* of {@link ReplicaState#FINALIZED} replica(s), and is not going to be
* modified.
* NOTE, in some special cases, a block may be forced to COMPLETE state,
* even if it doesn't have required minimal replications.
*/
COMPLETE,
/**
* The block is under construction.<br>
* It has been recently allocated for write or append.
*/
UNDER_CONSTRUCTION,
/**
* The block is under recovery.<br>
* When a file lease expires its last block may not be {@link #COMPLETE}
* and needs to go through a recovery procedure,
* which synchronizes the existing replicas contents.
*/
UNDER_RECOVERY,
/**
* The block is committed.<br>
* The client reported that all bytes are written to data-nodes
* with the given generation stamp and block length, but no
* {@link ReplicaState#FINALIZED}
* replicas has yet been reported by data-nodes themselves.
*/
COMMITTED
}
String NAMENODE_LEASE_HOLDER = "HDFS_NameNode";
long NAMENODE_LEASE_RECHECK_INTERVAL = 2000;
String CRYPTO_XATTR_ENCRYPTION_ZONE =
"raw.hdfs.crypto.encryption.zone";
String CRYPTO_XATTR_FILE_ENCRYPTION_INFO =
"raw.hdfs.crypto.file.encryption.info";
String SECURITY_XATTR_UNREADABLE_BY_SUPERUSER =
"security.hdfs.unreadable.by.superuser";
}
| 12,697 | 31.558974 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.common;
import static org.apache.hadoop.fs.CommonConfigurationKeys.DEFAULT_HADOOP_HTTP_STATIC_USER;
import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_HTTP_STATIC_USER;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.Arrays;
import java.util.Comparator;
import java.util.HashMap;
import java.util.concurrent.ThreadLocalRandom;
import javax.servlet.ServletContext;
import javax.servlet.http.HttpServletRequest;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeHttpServer;
import org.apache.hadoop.hdfs.web.resources.DelegationParam;
import org.apache.hadoop.hdfs.web.resources.DoAsParam;
import org.apache.hadoop.hdfs.web.resources.UserParam;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
import org.apache.hadoop.security.authentication.util.KerberosName;
import org.apache.hadoop.security.authorize.ProxyServers;
import org.apache.hadoop.security.authorize.ProxyUsers;
import org.apache.hadoop.security.token.Token;
import javax.servlet.ServletContext;
import javax.servlet.http.HttpServletRequest;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.IOException;
import java.net.InetSocketAddress;
import static org.apache.hadoop.fs.CommonConfigurationKeys.DEFAULT_HADOOP_HTTP_STATIC_USER;
import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_HTTP_STATIC_USER;
@InterfaceAudience.Private
public class JspHelper {
public static final String CURRENT_CONF = "current.conf";
public static final String DELEGATION_PARAMETER_NAME = DelegationParam.NAME;
public static final String NAMENODE_ADDRESS = "nnaddr";
static final String SET_DELEGATION = "&" + DELEGATION_PARAMETER_NAME +
"=";
private static final Log LOG = LogFactory.getLog(JspHelper.class);
/** Private constructor for preventing creating JspHelper object. */
private JspHelper() {}
// data structure to count number of blocks on datanodes.
private static class NodeRecord extends DatanodeInfo {
int frequency;
public NodeRecord(DatanodeInfo info, int count) {
super(info);
this.frequency = count;
}
@Override
public boolean equals(Object obj) {
// Sufficient to use super equality as datanodes are uniquely identified
// by DatanodeID
return (this == obj) || super.equals(obj);
}
@Override
public int hashCode() {
// Super implementation is sufficient
return super.hashCode();
}
}
// compare two records based on their frequency
private static class NodeRecordComparator implements Comparator<NodeRecord> {
@Override
public int compare(NodeRecord o1, NodeRecord o2) {
if (o1.frequency < o2.frequency) {
return -1;
} else if (o1.frequency > o2.frequency) {
return 1;
}
return 0;
}
}
public static DatanodeInfo bestNode(LocatedBlocks blks, Configuration conf)
throws IOException {
HashMap<DatanodeInfo, NodeRecord> map =
new HashMap<DatanodeInfo, NodeRecord>();
for (LocatedBlock block : blks.getLocatedBlocks()) {
DatanodeInfo[] nodes = block.getLocations();
for (DatanodeInfo node : nodes) {
NodeRecord record = map.get(node);
if (record == null) {
map.put(node, new NodeRecord(node, 1));
} else {
record.frequency++;
}
}
}
NodeRecord[] nodes = map.values().toArray(new NodeRecord[map.size()]);
Arrays.sort(nodes, new NodeRecordComparator());
return bestNode(nodes, false);
}
private static DatanodeInfo bestNode(DatanodeInfo[] nodes, boolean doRandom)
throws IOException {
if (nodes == null || nodes.length == 0) {
throw new IOException("No nodes contain this block");
}
int l = 0;
while (l < nodes.length && !nodes[l].isDecommissioned()) {
++l;
}
if (l == 0) {
throw new IOException("No active nodes contain this block");
}
int index = doRandom ? ThreadLocalRandom.current().nextInt(l) : 0;
return nodes[index];
}
/**
* Validate filename.
* @return null if the filename is invalid.
* Otherwise, return the validated filename.
*/
public static String validatePath(String p) {
return p == null || p.length() == 0?
null: new Path(p).toUri().getPath();
}
/**
* Validate a long value.
* @return null if the value is invalid.
* Otherwise, return the validated Long object.
*/
public static Long validateLong(String value) {
return value == null? null: Long.parseLong(value);
}
public static String getDefaultWebUserName(Configuration conf) throws IOException {
String user = conf.get(
HADOOP_HTTP_STATIC_USER, DEFAULT_HADOOP_HTTP_STATIC_USER);
if (user == null || user.length() == 0) {
throw new IOException("Cannot determine UGI from request or conf");
}
return user;
}
private static InetSocketAddress getNNServiceAddress(ServletContext context,
HttpServletRequest request) {
String namenodeAddressInUrl = request.getParameter(NAMENODE_ADDRESS);
InetSocketAddress namenodeAddress = null;
if (namenodeAddressInUrl != null) {
namenodeAddress = NetUtils.createSocketAddr(namenodeAddressInUrl);
} else if (context != null) {
namenodeAddress = NameNodeHttpServer.getNameNodeAddressFromContext(
context);
}
if (namenodeAddress != null) {
return namenodeAddress;
}
return null;
}
/** Same as getUGI(null, request, conf). */
public static UserGroupInformation getUGI(HttpServletRequest request,
Configuration conf) throws IOException {
return getUGI(null, request, conf);
}
/** Same as getUGI(context, request, conf, KERBEROS_SSL, true). */
public static UserGroupInformation getUGI(ServletContext context,
HttpServletRequest request, Configuration conf) throws IOException {
return getUGI(context, request, conf, AuthenticationMethod.KERBEROS_SSL, true);
}
/**
* Get {@link UserGroupInformation} and possibly the delegation token out of
* the request.
* @param context the ServletContext that is serving this request.
* @param request the http request
* @param conf configuration
* @param secureAuthMethod the AuthenticationMethod used in secure mode.
* @param tryUgiParameter Should it try the ugi parameter?
* @return a new user from the request
* @throws AccessControlException if the request has no token
*/
public static UserGroupInformation getUGI(ServletContext context,
HttpServletRequest request, Configuration conf,
final AuthenticationMethod secureAuthMethod,
final boolean tryUgiParameter) throws IOException {
UserGroupInformation ugi = null;
final String usernameFromQuery = getUsernameFromQuery(request, tryUgiParameter);
final String doAsUserFromQuery = request.getParameter(DoAsParam.NAME);
final String remoteUser;
if (UserGroupInformation.isSecurityEnabled()) {
remoteUser = request.getRemoteUser();
final String tokenString = request.getParameter(DELEGATION_PARAMETER_NAME);
if (tokenString != null) {
// Token-based connections need only verify the effective user, and
// disallow proxying to different user. Proxy authorization checks
// are not required since the checks apply to issuing a token.
ugi = getTokenUGI(context, request, tokenString, conf);
checkUsername(ugi.getShortUserName(), usernameFromQuery);
checkUsername(ugi.getShortUserName(), doAsUserFromQuery);
} else if (remoteUser == null) {
throw new IOException(
"Security enabled but user not authenticated by filter");
}
} else {
// Security's not on, pull from url or use default web user
remoteUser = (usernameFromQuery == null)
? getDefaultWebUserName(conf) // not specified in request
: usernameFromQuery;
}
if (ugi == null) { // security is off, or there's no token
ugi = UserGroupInformation.createRemoteUser(remoteUser);
checkUsername(ugi.getShortUserName(), usernameFromQuery);
if (UserGroupInformation.isSecurityEnabled()) {
// This is not necessarily true, could have been auth'ed by user-facing
// filter
ugi.setAuthenticationMethod(secureAuthMethod);
}
if (doAsUserFromQuery != null) {
// create and attempt to authorize a proxy user
ugi = UserGroupInformation.createProxyUser(doAsUserFromQuery, ugi);
ProxyUsers.authorize(ugi, getRemoteAddr(request));
}
}
if(LOG.isDebugEnabled())
LOG.debug("getUGI is returning: " + ugi.getShortUserName());
return ugi;
}
private static UserGroupInformation getTokenUGI(ServletContext context,
HttpServletRequest request,
String tokenString,
Configuration conf)
throws IOException {
final Token<DelegationTokenIdentifier> token =
new Token<DelegationTokenIdentifier>();
token.decodeFromUrlString(tokenString);
InetSocketAddress serviceAddress = getNNServiceAddress(context, request);
if (serviceAddress != null) {
SecurityUtil.setTokenService(token, serviceAddress);
token.setKind(DelegationTokenIdentifier.HDFS_DELEGATION_KIND);
}
ByteArrayInputStream buf =
new ByteArrayInputStream(token.getIdentifier());
DataInputStream in = new DataInputStream(buf);
DelegationTokenIdentifier id = new DelegationTokenIdentifier();
id.readFields(in);
if (context != null) {
final NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context);
if (nn != null) {
// Verify the token.
nn.getNamesystem().verifyToken(id, token.getPassword());
}
}
UserGroupInformation ugi = id.getUser();
ugi.addToken(token);
return ugi;
}
// honor the X-Forwarded-For header set by a configured set of trusted
// proxy servers. allows audit logging and proxy user checks to work
// via an http proxy
public static String getRemoteAddr(HttpServletRequest request) {
String remoteAddr = request.getRemoteAddr();
String proxyHeader = request.getHeader("X-Forwarded-For");
if (proxyHeader != null && ProxyServers.isProxyServer(remoteAddr)) {
final String clientAddr = proxyHeader.split(",")[0].trim();
if (!clientAddr.isEmpty()) {
remoteAddr = clientAddr;
}
}
return remoteAddr;
}
/**
* Expected user name should be a short name.
*/
public static void checkUsername(final String expected, final String name
) throws IOException {
if (expected == null && name != null) {
throw new IOException("Usernames not matched: expecting null but name="
+ name);
}
if (name == null) { //name is optional, null is okay
return;
}
KerberosName u = new KerberosName(name);
String shortName = u.getShortName();
if (!shortName.equals(expected)) {
throw new IOException("Usernames not matched: name=" + shortName
+ " != expected=" + expected);
}
}
private static String getUsernameFromQuery(final HttpServletRequest request,
final boolean tryUgiParameter) {
String username = request.getParameter(UserParam.NAME);
if (username == null && tryUgiParameter) {
//try ugi parameter
final String ugiStr = request.getParameter("ugi");
if (ugiStr != null) {
username = ugiStr.split(",")[0];
}
}
return username;
}
/**
* Returns the url parameter for the given token string.
* @param tokenString
* @return url parameter
*/
public static String getDelegationTokenUrlParam(String tokenString) {
if (tokenString == null ) {
return "";
}
if (UserGroupInformation.isSecurityEnabled()) {
return SET_DELEGATION + tokenString;
} else {
return "";
}
}
/**
* Returns the url parameter for the given string, prefixed with
* paramSeparator.
*
* @param name parameter name
* @param val parameter value
* @param paramSeparator URL parameter prefix, i.e. either '?' or '&'
* @return url parameter
*/
public static String getUrlParam(String name, String val, String paramSeparator) {
return val == null ? "" : paramSeparator + name + "=" + val;
}
/**
* Returns the url parameter for the given string, prefixed with '?' if
* firstParam is true, prefixed with '&' if firstParam is false.
*
* @param name parameter name
* @param val parameter value
* @param firstParam true if this is the first parameter in the list, false otherwise
* @return url parameter
*/
public static String getUrlParam(String name, String val, boolean firstParam) {
return getUrlParam(name, val, firstParam ? "?" : "&");
}
/**
* Returns the url parameter for the given string, prefixed with '&'.
*
* @param name parameter name
* @param val parameter value
* @return url parameter
*/
public static String getUrlParam(String name, String val) {
return getUrlParam(name, val, false);
}
}
| 15,055 | 35.901961 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/IncorrectVersionException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.common;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
/**
* The exception is thrown when external version does not match
* current version of the application.
*
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class IncorrectVersionException extends IOException {
private static final long serialVersionUID = 1L;
public IncorrectVersionException(String message) {
super(message);
}
public IncorrectVersionException(String minimumVersion, String reportedVersion,
String remoteDaemon, String thisDaemon) {
this("The reported " + remoteDaemon + " version is too low to communicate" +
" with this " + thisDaemon + ". " + remoteDaemon + " version: '" +
reportedVersion + "' Minimum " + remoteDaemon + " version: '" +
minimumVersion + "'");
}
public IncorrectVersionException(int currentLayoutVersion,
int versionReported, String ofWhat) {
this(versionReported, ofWhat, currentLayoutVersion);
}
public IncorrectVersionException(int versionReported,
String ofWhat,
int versionExpected) {
this("Unexpected version "
+ (ofWhat==null ? "" : "of " + ofWhat) + ". Reported: "
+ versionReported + ". Expecting = " + versionExpected + ".");
}
}
| 2,325 | 36.516129 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.common;
import java.io.File;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
@InterfaceAudience.Private
public final class Util {
private final static Log LOG = LogFactory.getLog(Util.class.getName());
/**
* Interprets the passed string as a URI. In case of error it
* assumes the specified string is a file.
*
* @param s the string to interpret
* @return the resulting URI
* @throws IOException
*/
public static URI stringAsURI(String s) throws IOException {
URI u = null;
// try to make a URI
try {
u = new URI(s);
} catch (URISyntaxException e){
LOG.error("Syntax error in URI " + s
+ ". Please check hdfs configuration.", e);
}
// if URI is null or scheme is undefined, then assume it's file://
if(u == null || u.getScheme() == null){
LOG.warn("Path " + s + " should be specified as a URI "
+ "in configuration files. Please update hdfs configuration.");
u = fileAsURI(new File(s));
}
return u;
}
/**
* Converts the passed File to a URI. This method trims the trailing slash if
* one is appended because the underlying file is in fact a directory that
* exists.
*
* @param f the file to convert
* @return the resulting URI
* @throws IOException
*/
public static URI fileAsURI(File f) throws IOException {
URI u = f.getCanonicalFile().toURI();
// trim the trailing slash, if it's present
if (u.getPath().endsWith("/")) {
String uriAsString = u.toString();
try {
u = new URI(uriAsString.substring(0, uriAsString.length() - 1));
} catch (URISyntaxException e) {
throw new IOException(e);
}
}
return u;
}
/**
* Converts a collection of strings into a collection of URIs.
* @param names collection of strings to convert to URIs
* @return collection of URIs
*/
public static List<URI> stringCollectionAsURIs(
Collection<String> names) {
List<URI> uris = new ArrayList<URI>(names.size());
for(String name : names) {
try {
uris.add(stringAsURI(name));
} catch (IOException e) {
LOG.error("Error while processing URI: " + name, e);
}
}
return uris;
}
}
| 3,376 | 30.858491 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.UserGroupInformation;
import javax.servlet.http.HttpServletRequest;
import java.io.IOException;
import java.security.PrivilegedExceptionAction;
@InterfaceAudience.Private
public class DatanodeJspHelper {
private static DFSClient getDFSClient(final UserGroupInformation user,
final String addr,
final Configuration conf
) throws IOException,
InterruptedException {
return
user.doAs(new PrivilegedExceptionAction<DFSClient>() {
@Override
public DFSClient run() throws IOException {
return new DFSClient(NetUtils.createSocketAddr(addr), conf);
}
});
}
/** Get DFSClient for a namenode corresponding to the BPID from a datanode */
public static DFSClient getDFSClient(final HttpServletRequest request,
final DataNode datanode, final Configuration conf,
final UserGroupInformation ugi) throws IOException, InterruptedException {
final String nnAddr = request.getParameter(JspHelper.NAMENODE_ADDRESS);
return getDFSClient(ugi, nnAddr, conf);
}
}
| 2,323 | 41.254545 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.io.RandomAccessFile;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.StringUtils;
/**
* This class defines a replica in a pipeline, which
* includes a persistent replica being written to by a dfs client or
* a temporary replica being replicated by a source datanode or
* being copied for the balancing purpose.
*
* The base class implements a temporary replica
*/
public class ReplicaInPipeline extends ReplicaInfo
implements ReplicaInPipelineInterface {
private long bytesAcked;
private long bytesOnDisk;
private byte[] lastChecksum;
private Thread writer;
/**
* Bytes reserved for this replica on the containing volume.
* Based off difference between the estimated maximum block length and
* the bytes already written to this block.
*/
private long bytesReserved;
private final long originalBytesReserved;
/**
* Constructor for a zero length replica
* @param blockId block id
* @param genStamp replica generation stamp
* @param vol volume where replica is located
* @param dir directory path where block and meta files are located
* @param bytesToReserve disk space to reserve for this replica, based on
* the estimated maximum block length.
*/
public ReplicaInPipeline(long blockId, long genStamp,
FsVolumeSpi vol, File dir, long bytesToReserve) {
this(blockId, 0L, genStamp, vol, dir, Thread.currentThread(), bytesToReserve);
}
/**
* Constructor
* @param block a block
* @param vol volume where replica is located
* @param dir directory path where block and meta files are located
* @param writer a thread that is writing to this replica
*/
ReplicaInPipeline(Block block,
FsVolumeSpi vol, File dir, Thread writer) {
this( block.getBlockId(), block.getNumBytes(), block.getGenerationStamp(),
vol, dir, writer, 0L);
}
/**
* Constructor
* @param blockId block id
* @param len replica length
* @param genStamp replica generation stamp
* @param vol volume where replica is located
* @param dir directory path where block and meta files are located
* @param writer a thread that is writing to this replica
* @param bytesToReserve disk space to reserve for this replica, based on
* the estimated maximum block length.
*/
ReplicaInPipeline(long blockId, long len, long genStamp,
FsVolumeSpi vol, File dir, Thread writer, long bytesToReserve) {
super( blockId, len, genStamp, vol, dir);
this.bytesAcked = len;
this.bytesOnDisk = len;
this.writer = writer;
this.bytesReserved = bytesToReserve;
this.originalBytesReserved = bytesToReserve;
}
/**
* Copy constructor.
* @param from where to copy from
*/
public ReplicaInPipeline(ReplicaInPipeline from) {
super(from);
this.bytesAcked = from.getBytesAcked();
this.bytesOnDisk = from.getBytesOnDisk();
this.writer = from.writer;
this.bytesReserved = from.bytesReserved;
this.originalBytesReserved = from.originalBytesReserved;
}
@Override
public long getVisibleLength() {
return -1;
}
@Override //ReplicaInfo
public ReplicaState getState() {
return ReplicaState.TEMPORARY;
}
@Override // ReplicaInPipelineInterface
public long getBytesAcked() {
return bytesAcked;
}
@Override // ReplicaInPipelineInterface
public void setBytesAcked(long bytesAcked) {
long newBytesAcked = bytesAcked - this.bytesAcked;
this.bytesAcked = bytesAcked;
// Once bytes are ACK'ed we can release equivalent space from the
// volume's reservedForRbw count. We could have released it as soon
// as the write-to-disk completed but that would be inefficient.
getVolume().releaseReservedSpace(newBytesAcked);
bytesReserved -= newBytesAcked;
}
@Override // ReplicaInPipelineInterface
public long getBytesOnDisk() {
return bytesOnDisk;
}
@Override
public long getBytesReserved() {
return bytesReserved;
}
@Override
public long getOriginalBytesReserved() {
return originalBytesReserved;
}
@Override
public void releaseAllBytesReserved() { // ReplicaInPipelineInterface
getVolume().releaseReservedSpace(bytesReserved);
getVolume().releaseLockedMemory(bytesReserved);
bytesReserved = 0;
}
@Override // ReplicaInPipelineInterface
public synchronized void setLastChecksumAndDataLen(long dataLength, byte[] lastChecksum) {
this.bytesOnDisk = dataLength;
this.lastChecksum = lastChecksum;
}
@Override // ReplicaInPipelineInterface
public synchronized ChunkChecksum getLastChecksumAndDataLen() {
return new ChunkChecksum(getBytesOnDisk(), lastChecksum);
}
/**
* Set the thread that is writing to this replica
* @param writer a thread writing to this replica
*/
public void setWriter(Thread writer) {
this.writer = writer;
}
@Override // Object
public boolean equals(Object o) {
return super.equals(o);
}
/**
* Interrupt the writing thread and wait until it dies
* @throws IOException the waiting is interrupted
*/
public void stopWriter(long xceiverStopTimeout) throws IOException {
if (writer != null && writer != Thread.currentThread() && writer.isAlive()) {
writer.interrupt();
try {
writer.join(xceiverStopTimeout);
if (writer.isAlive()) {
final String msg = "Join on writer thread " + writer + " timed out";
DataNode.LOG.warn(msg + "\n" + StringUtils.getStackTrace(writer));
throw new IOException(msg);
}
} catch (InterruptedException e) {
throw new IOException("Waiting for writer thread is interrupted.");
}
}
}
@Override // Object
public int hashCode() {
return super.hashCode();
}
@Override // ReplicaInPipelineInterface
public ReplicaOutputStreams createStreams(boolean isCreate,
DataChecksum requestedChecksum) throws IOException {
File blockFile = getBlockFile();
File metaFile = getMetaFile();
if (DataNode.LOG.isDebugEnabled()) {
DataNode.LOG.debug("writeTo blockfile is " + blockFile +
" of size " + blockFile.length());
DataNode.LOG.debug("writeTo metafile is " + metaFile +
" of size " + metaFile.length());
}
long blockDiskSize = 0L;
long crcDiskSize = 0L;
// the checksum that should actually be used -- this
// may differ from requestedChecksum for appends.
final DataChecksum checksum;
RandomAccessFile metaRAF = new RandomAccessFile(metaFile, "rw");
if (!isCreate) {
// For append or recovery, we must enforce the existing checksum.
// Also, verify that the file has correct lengths, etc.
boolean checkedMeta = false;
try {
BlockMetadataHeader header = BlockMetadataHeader.readHeader(metaRAF);
checksum = header.getChecksum();
if (checksum.getBytesPerChecksum() !=
requestedChecksum.getBytesPerChecksum()) {
throw new IOException("Client requested checksum " +
requestedChecksum + " when appending to an existing block " +
"with different chunk size: " + checksum);
}
int bytesPerChunk = checksum.getBytesPerChecksum();
int checksumSize = checksum.getChecksumSize();
blockDiskSize = bytesOnDisk;
crcDiskSize = BlockMetadataHeader.getHeaderSize() +
(blockDiskSize+bytesPerChunk-1)/bytesPerChunk*checksumSize;
if (blockDiskSize>0 &&
(blockDiskSize>blockFile.length() || crcDiskSize>metaFile.length())) {
throw new IOException("Corrupted block: " + this);
}
checkedMeta = true;
} finally {
if (!checkedMeta) {
// clean up in case of exceptions.
IOUtils.closeStream(metaRAF);
}
}
} else {
// for create, we can use the requested checksum
checksum = requestedChecksum;
}
FileOutputStream blockOut = null;
FileOutputStream crcOut = null;
try {
blockOut = new FileOutputStream(
new RandomAccessFile( blockFile, "rw" ).getFD() );
crcOut = new FileOutputStream(metaRAF.getFD() );
if (!isCreate) {
blockOut.getChannel().position(blockDiskSize);
crcOut.getChannel().position(crcDiskSize);
}
return new ReplicaOutputStreams(blockOut, crcOut, checksum,
getVolume().isTransientStorage());
} catch (IOException e) {
IOUtils.closeStream(blockOut);
IOUtils.closeStream(metaRAF);
throw e;
}
}
@Override
public OutputStream createRestartMetaStream() throws IOException {
File blockFile = getBlockFile();
File restartMeta = new File(blockFile.getParent() +
File.pathSeparator + "." + blockFile.getName() + ".restart");
if (restartMeta.exists() && !restartMeta.delete()) {
DataNode.LOG.warn("Failed to delete restart meta file: " +
restartMeta.getPath());
}
return new FileOutputStream(restartMeta);
}
@Override
public String toString() {
return super.toString()
+ "\n bytesAcked=" + bytesAcked
+ "\n bytesOnDisk=" + bytesOnDisk;
}
}
| 10,589 | 33.051447 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import java.io.IOException;
import java.net.SocketTimeoutException;
import java.nio.channels.AsynchronousCloseException;
import java.util.HashMap;
import org.apache.commons.logging.Log;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.net.Peer;
import org.apache.hadoop.hdfs.net.PeerServer;
import org.apache.hadoop.hdfs.util.DataTransferThrottler;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.Daemon;
import com.google.common.annotations.VisibleForTesting;
/**
* Server used for receiving/sending a block of data.
* This is created to listen for requests from clients or
* other DataNodes. This small server does not use the
* Hadoop IPC mechanism.
*/
class DataXceiverServer implements Runnable {
public static final Log LOG = DataNode.LOG;
private final PeerServer peerServer;
private final DataNode datanode;
private final HashMap<Peer, Thread> peers = new HashMap<Peer, Thread>();
private final HashMap<Peer, DataXceiver> peersXceiver = new HashMap<Peer, DataXceiver>();
private boolean closed = false;
/**
* Maximal number of concurrent xceivers per node.
* Enforcing the limit is required in order to avoid data-node
* running out of memory.
*/
int maxXceiverCount =
DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_DEFAULT;
/** A manager to make sure that cluster balancing does not
* take too much resources.
*
* It limits the number of block moves for balancing and
* the total amount of bandwidth they can use.
*/
static class BlockBalanceThrottler extends DataTransferThrottler {
private int numThreads;
private int maxThreads;
/**Constructor
*
* @param bandwidth Total amount of bandwidth can be used for balancing
*/
private BlockBalanceThrottler(long bandwidth, int maxThreads) {
super(bandwidth);
this.maxThreads = maxThreads;
LOG.info("Balancing bandwith is "+ bandwidth + " bytes/s");
LOG.info("Number threads for balancing is "+ maxThreads);
}
/** Check if the block move can start.
*
* Return true if the thread quota is not exceeded and
* the counter is incremented; False otherwise.
*/
synchronized boolean acquire() {
if (numThreads >= maxThreads) {
return false;
}
numThreads++;
return true;
}
/** Mark that the move is completed. The thread counter is decremented. */
synchronized void release() {
numThreads--;
}
}
final BlockBalanceThrottler balanceThrottler;
/**
* We need an estimate for block size to check if the disk partition has
* enough space. Newer clients pass the expected block size to the DataNode.
* For older clients we just use the server-side default block size.
*/
final long estimateBlockSize;
DataXceiverServer(PeerServer peerServer, Configuration conf,
DataNode datanode) {
this.peerServer = peerServer;
this.datanode = datanode;
this.maxXceiverCount =
conf.getInt(DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_KEY,
DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_DEFAULT);
this.estimateBlockSize = conf.getLongBytes(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT);
//set up parameter for cluster balancing
this.balanceThrottler = new BlockBalanceThrottler(
conf.getLong(DFSConfigKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY,
DFSConfigKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_DEFAULT),
conf.getInt(DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY,
DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT));
}
@Override
public void run() {
Peer peer = null;
while (datanode.shouldRun && !datanode.shutdownForUpgrade) {
try {
peer = peerServer.accept();
// Make sure the xceiver count is not exceeded
int curXceiverCount = datanode.getXceiverCount();
if (curXceiverCount > maxXceiverCount) {
throw new IOException("Xceiver count " + curXceiverCount
+ " exceeds the limit of concurrent xcievers: "
+ maxXceiverCount);
}
new Daemon(datanode.threadGroup,
DataXceiver.create(peer, datanode, this))
.start();
} catch (SocketTimeoutException ignored) {
// wake up to see if should continue to run
} catch (AsynchronousCloseException ace) {
// another thread closed our listener socket - that's expected during shutdown,
// but not in other circumstances
if (datanode.shouldRun && !datanode.shutdownForUpgrade) {
LOG.warn(datanode.getDisplayName() + ":DataXceiverServer: ", ace);
}
} catch (IOException ie) {
IOUtils.cleanup(null, peer);
LOG.warn(datanode.getDisplayName() + ":DataXceiverServer: ", ie);
} catch (OutOfMemoryError ie) {
IOUtils.cleanup(null, peer);
// DataNode can run out of memory if there is too many transfers.
// Log the event, Sleep for 30 seconds, other transfers may complete by
// then.
LOG.error("DataNode is out of memory. Will retry in 30 seconds.", ie);
try {
Thread.sleep(30 * 1000);
} catch (InterruptedException e) {
// ignore
}
} catch (Throwable te) {
LOG.error(datanode.getDisplayName()
+ ":DataXceiverServer: Exiting due to: ", te);
datanode.shouldRun = false;
}
}
// Close the server to stop reception of more requests.
try {
peerServer.close();
closed = true;
} catch (IOException ie) {
LOG.warn(datanode.getDisplayName()
+ " :DataXceiverServer: close exception", ie);
}
// if in restart prep stage, notify peers before closing them.
if (datanode.shutdownForUpgrade) {
restartNotifyPeers();
// Each thread needs some time to process it. If a thread needs
// to send an OOB message to the client, but blocked on network for
// long time, we need to force its termination.
LOG.info("Shutting down DataXceiverServer before restart");
// Allow roughly up to 2 seconds.
for (int i = 0; getNumPeers() > 0 && i < 10; i++) {
try {
Thread.sleep(200);
} catch (InterruptedException e) {
// ignore
}
}
}
// Close all peers.
closeAllPeers();
}
void kill() {
assert (datanode.shouldRun == false || datanode.shutdownForUpgrade) :
"shoudRun should be set to false or restarting should be true"
+ " before killing";
try {
this.peerServer.close();
this.closed = true;
} catch (IOException ie) {
LOG.warn(datanode.getDisplayName() + ":DataXceiverServer.kill(): ", ie);
}
}
synchronized void addPeer(Peer peer, Thread t, DataXceiver xceiver)
throws IOException {
if (closed) {
throw new IOException("Server closed.");
}
peers.put(peer, t);
peersXceiver.put(peer, xceiver);
}
synchronized void closePeer(Peer peer) {
peers.remove(peer);
peersXceiver.remove(peer);
IOUtils.cleanup(null, peer);
}
// Sending OOB to all peers
public synchronized void sendOOBToPeers() {
if (!datanode.shutdownForUpgrade) {
return;
}
for (Peer p : peers.keySet()) {
try {
peersXceiver.get(p).sendOOB();
} catch (IOException e) {
LOG.warn("Got error when sending OOB message.", e);
} catch (InterruptedException e) {
LOG.warn("Interrupted when sending OOB message.");
}
}
}
// Notify all peers of the shutdown and restart.
// datanode.shouldRun should still be true and datanode.restarting should
// be set true before calling this method.
synchronized void restartNotifyPeers() {
assert (datanode.shouldRun == true && datanode.shutdownForUpgrade);
for (Thread t : peers.values()) {
// interrupt each and every DataXceiver thread.
t.interrupt();
}
}
// Close all peers and clear the map.
synchronized void closeAllPeers() {
LOG.info("Closing all peers.");
for (Peer p : peers.keySet()) {
IOUtils.cleanup(LOG, p);
}
peers.clear();
peersXceiver.clear();
}
// Return the number of peers.
synchronized int getNumPeers() {
return peers.size();
}
// Return the number of peers and DataXceivers.
@VisibleForTesting
synchronized int getNumPeersXceiver() {
return peersXceiver.size();
}
synchronized void releasePeer(Peer peer) {
peers.remove(peer);
peersXceiver.remove(peer);
}
}
| 9,566 | 32.334495 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SHARED_FILE_DESCRIPTOR_PATHS;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SHARED_FILE_DESCRIPTOR_PATHS_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS_DEFAULT;
import java.io.Closeable;
import java.io.FileInputStream;
import java.io.IOException;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Set;
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.io.IOUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.InvalidRequestException;
import org.apache.hadoop.hdfs.ExtendedBlockId;
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm;
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.ShmId;
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.Slot;
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.SlotId;
import org.apache.hadoop.io.nativeio.SharedFileDescriptorFactory;
import org.apache.hadoop.net.unix.DomainSocket;
import org.apache.hadoop.net.unix.DomainSocketWatcher;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
import com.google.common.collect.HashMultimap;
/**
* Manages client short-circuit memory segments on the DataNode.
*
* DFSClients request shared memory segments from the DataNode. The
* ShortCircuitRegistry generates and manages these segments. Each segment
* has a randomly generated 128-bit ID which uniquely identifies it. The
* segments each contain several "slots."
*
* Before performing a short-circuit read, DFSClients must request a pair of
* file descriptors from the DataNode via the REQUEST_SHORT_CIRCUIT_FDS
* operation. As part of this operation, DFSClients pass the ID of the shared
* memory segment they would like to use to communicate information about this
* replica, as well as the slot number within that segment they would like to
* use. Slot allocation is always done by the client.
*
* Slots are used to track the state of the block on the both the client and
* datanode. When this DataNode mlocks a block, the corresponding slots for the
* replicas are marked as "anchorable". Anchorable blocks can be safely read
* without verifying the checksum. This means that BlockReaderLocal objects
* using these replicas can skip checksumming. It also means that we can do
* zero-copy reads on these replicas (the ZCR interface has no way of
* verifying checksums.)
*
* When a DN needs to munlock a block, it needs to first wait for the block to
* be unanchored by clients doing a no-checksum read or a zero-copy read. The
* DN also marks the block's slots as "unanchorable" to prevent additional
* clients from initiating these operations in the future.
*
* The counterpart of this class on the client is {@link DfsClientShmManager}.
*/
public class ShortCircuitRegistry {
public static final Log LOG = LogFactory.getLog(ShortCircuitRegistry.class);
private static final int SHM_LENGTH = 8192;
public static class RegisteredShm extends ShortCircuitShm
implements DomainSocketWatcher.Handler {
private final String clientName;
private final ShortCircuitRegistry registry;
RegisteredShm(String clientName, ShmId shmId, FileInputStream stream,
ShortCircuitRegistry registry) throws IOException {
super(shmId, stream);
this.clientName = clientName;
this.registry = registry;
}
@Override
public boolean handle(DomainSocket sock) {
synchronized (registry) {
synchronized (this) {
registry.removeShm(this);
}
}
return true;
}
String getClientName() {
return clientName;
}
}
public synchronized void removeShm(ShortCircuitShm shm) {
if (LOG.isTraceEnabled()) {
LOG.debug("removing shm " + shm);
}
// Stop tracking the shmId.
RegisteredShm removedShm = segments.remove(shm.getShmId());
Preconditions.checkState(removedShm == shm,
"failed to remove " + shm.getShmId());
// Stop tracking the slots.
for (Iterator<Slot> iter = shm.slotIterator(); iter.hasNext(); ) {
Slot slot = iter.next();
boolean removed = slots.remove(slot.getBlockId(), slot);
Preconditions.checkState(removed);
slot.makeInvalid();
}
// De-allocate the memory map and close the shared file.
shm.free();
}
/**
* Whether or not the registry is enabled.
*/
private boolean enabled;
/**
* The factory which creates shared file descriptors.
*/
private final SharedFileDescriptorFactory shmFactory;
/**
* A watcher which sends out callbacks when the UNIX domain socket
* associated with a shared memory segment closes.
*/
private final DomainSocketWatcher watcher;
private final HashMap<ShmId, RegisteredShm> segments =
new HashMap<ShmId, RegisteredShm>(0);
private final HashMultimap<ExtendedBlockId, Slot> slots =
HashMultimap.create(0, 1);
public ShortCircuitRegistry(Configuration conf) throws IOException {
boolean enabled = false;
SharedFileDescriptorFactory shmFactory = null;
DomainSocketWatcher watcher = null;
try {
int interruptCheck = conf.getInt(
DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS,
DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS_DEFAULT);
if (interruptCheck <= 0) {
throw new IOException(
DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS +
" was set to " + interruptCheck);
}
String shmPaths[] =
conf.getTrimmedStrings(DFS_DATANODE_SHARED_FILE_DESCRIPTOR_PATHS);
if (shmPaths.length == 0) {
shmPaths =
DFS_DATANODE_SHARED_FILE_DESCRIPTOR_PATHS_DEFAULT.split(",");
}
shmFactory = SharedFileDescriptorFactory.
create("HadoopShortCircuitShm_", shmPaths);
String dswLoadingFailure = DomainSocketWatcher.getLoadingFailureReason();
if (dswLoadingFailure != null) {
throw new IOException(dswLoadingFailure);
}
watcher = new DomainSocketWatcher(interruptCheck, "datanode");
enabled = true;
if (LOG.isDebugEnabled()) {
LOG.debug("created new ShortCircuitRegistry with interruptCheck=" +
interruptCheck + ", shmPath=" + shmFactory.getPath());
}
} catch (IOException e) {
if (LOG.isDebugEnabled()) {
LOG.debug("Disabling ShortCircuitRegistry", e);
}
} finally {
this.enabled = enabled;
this.shmFactory = shmFactory;
this.watcher = watcher;
}
}
/**
* Process a block mlock event from the FsDatasetCache.
*
* @param blockId The block that was mlocked.
*/
public synchronized void processBlockMlockEvent(ExtendedBlockId blockId) {
if (!enabled) return;
Set<Slot> affectedSlots = slots.get(blockId);
for (Slot slot : affectedSlots) {
slot.makeAnchorable();
}
}
/**
* Mark any slots associated with this blockId as unanchorable.
*
* @param blockId The block ID.
* @return True if we should allow the munlock request.
*/
public synchronized boolean processBlockMunlockRequest(
ExtendedBlockId blockId) {
if (!enabled) return true;
boolean allowMunlock = true;
Set<Slot> affectedSlots = slots.get(blockId);
for (Slot slot : affectedSlots) {
slot.makeUnanchorable();
if (slot.isAnchored()) {
allowMunlock = false;
}
}
return allowMunlock;
}
/**
* Invalidate any slot associated with a blockId that we are invalidating
* (deleting) from this DataNode. When a slot is invalid, the DFSClient will
* not use the corresponding replica for new read or mmap operations (although
* existing, ongoing read or mmap operations will complete.)
*
* @param blockId The block ID.
*/
public synchronized void processBlockInvalidation(ExtendedBlockId blockId) {
if (!enabled) return;
final Set<Slot> affectedSlots = slots.get(blockId);
if (!affectedSlots.isEmpty()) {
final StringBuilder bld = new StringBuilder();
String prefix = "";
bld.append("Block ").append(blockId).append(" has been invalidated. ").
append("Marking short-circuit slots as invalid: ");
for (Slot slot : affectedSlots) {
slot.makeInvalid();
bld.append(prefix).append(slot.toString());
prefix = ", ";
}
LOG.info(bld.toString());
}
}
public synchronized String getClientNames(ExtendedBlockId blockId) {
if (!enabled) return "";
final HashSet<String> clientNames = new HashSet<String>();
final Set<Slot> affectedSlots = slots.get(blockId);
for (Slot slot : affectedSlots) {
clientNames.add(((RegisteredShm)slot.getShm()).getClientName());
}
return Joiner.on(",").join(clientNames);
}
public static class NewShmInfo implements Closeable {
public final ShmId shmId;
public final FileInputStream stream;
NewShmInfo(ShmId shmId, FileInputStream stream) {
this.shmId = shmId;
this.stream = stream;
}
@Override
public void close() throws IOException {
stream.close();
}
}
/**
* Handle a DFSClient request to create a new memory segment.
*
* @param clientName Client name as reported by the client.
* @param sock The DomainSocket to associate with this memory
* segment. When this socket is closed, or the
* other side writes anything to the socket, the
* segment will be closed. This can happen at any
* time, including right after this function returns.
* @return A NewShmInfo object. The caller must close the
* NewShmInfo object once they are done with it.
* @throws IOException If the new memory segment could not be created.
*/
public NewShmInfo createNewMemorySegment(String clientName,
DomainSocket sock) throws IOException {
NewShmInfo info = null;
RegisteredShm shm = null;
ShmId shmId = null;
synchronized (this) {
if (!enabled) {
if (LOG.isTraceEnabled()) {
LOG.trace("createNewMemorySegment: ShortCircuitRegistry is " +
"not enabled.");
}
throw new UnsupportedOperationException();
}
FileInputStream fis = null;
try {
do {
shmId = ShmId.createRandom();
} while (segments.containsKey(shmId));
fis = shmFactory.createDescriptor(clientName, SHM_LENGTH);
shm = new RegisteredShm(clientName, shmId, fis, this);
} finally {
if (shm == null) {
IOUtils.closeQuietly(fis);
}
}
info = new NewShmInfo(shmId, fis);
segments.put(shmId, shm);
}
// Drop the registry lock to prevent deadlock.
// After this point, RegisteredShm#handle may be called at any time.
watcher.add(sock, shm);
if (LOG.isTraceEnabled()) {
LOG.trace("createNewMemorySegment: created " + info.shmId);
}
return info;
}
public synchronized void registerSlot(ExtendedBlockId blockId, SlotId slotId,
boolean isCached) throws InvalidRequestException {
if (!enabled) {
if (LOG.isTraceEnabled()) {
LOG.trace(this + " can't register a slot because the " +
"ShortCircuitRegistry is not enabled.");
}
throw new UnsupportedOperationException();
}
ShmId shmId = slotId.getShmId();
RegisteredShm shm = segments.get(shmId);
if (shm == null) {
throw new InvalidRequestException("there is no shared memory segment " +
"registered with shmId " + shmId);
}
Slot slot = shm.registerSlot(slotId.getSlotIdx(), blockId);
if (isCached) {
slot.makeAnchorable();
} else {
slot.makeUnanchorable();
}
boolean added = slots.put(blockId, slot);
Preconditions.checkState(added);
if (LOG.isTraceEnabled()) {
LOG.trace(this + ": registered " + blockId + " with slot " +
slotId + " (isCached=" + isCached + ")");
}
}
public synchronized void unregisterSlot(SlotId slotId)
throws InvalidRequestException {
if (!enabled) {
if (LOG.isTraceEnabled()) {
LOG.trace("unregisterSlot: ShortCircuitRegistry is " +
"not enabled.");
}
throw new UnsupportedOperationException();
}
ShmId shmId = slotId.getShmId();
RegisteredShm shm = segments.get(shmId);
if (shm == null) {
throw new InvalidRequestException("there is no shared memory segment " +
"registered with shmId " + shmId);
}
Slot slot = shm.getSlot(slotId.getSlotIdx());
slot.makeInvalid();
shm.unregisterSlot(slotId.getSlotIdx());
slots.remove(slot.getBlockId(), slot);
}
public void shutdown() {
synchronized (this) {
if (!enabled) return;
enabled = false;
}
IOUtils.closeQuietly(watcher);
}
public static interface Visitor {
void accept(HashMap<ShmId, RegisteredShm> segments,
HashMultimap<ExtendedBlockId, Slot> slots);
}
@VisibleForTesting
public synchronized void visit(Visitor visitor) {
visitor.accept(segments, slots);
}
}
| 14,461 | 35.336683 | 118 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import java.io.DataOutputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.Iterator;
import java.util.LinkedHashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.concurrent.TimeUnit;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.datanode.BlockScanner.Conf;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi.BlockIterator;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
import org.apache.hadoop.hdfs.util.DataTransferThrottler;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.Time;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* VolumeScanner scans a single volume. Each VolumeScanner has its own thread.<p/>
* They are all managed by the DataNode's BlockScanner.
*/
public class VolumeScanner extends Thread {
public static final Logger LOG =
LoggerFactory.getLogger(VolumeScanner.class);
/**
* Number of seconds in a minute.
*/
private final static int SECONDS_PER_MINUTE = 60;
/**
* Number of minutes in an hour.
*/
private final static int MINUTES_PER_HOUR = 60;
/**
* Name of the block iterator used by this scanner.
*/
private final static String BLOCK_ITERATOR_NAME = "scanner";
/**
* The configuration.
*/
private final Conf conf;
/**
* The DataNode this VolumEscanner is associated with.
*/
private final DataNode datanode;
/**
* A reference to the volume that we're scanning.
*/
private final FsVolumeReference ref;
/**
* The volume that we're scanning.
*/
final FsVolumeSpi volume;
/**
* The number of scanned bytes in each minute of the last hour.<p/>
*
* This array is managed as a circular buffer. We take the monotonic time and
* divide it up into one-minute periods. Each entry in the array represents
* how many bytes were scanned during that period.
*/
private final long scannedBytes[] = new long[MINUTES_PER_HOUR];
/**
* The sum of all the values of scannedBytes.
*/
private long scannedBytesSum = 0;
/**
* The throttler to use with BlockSender objects.
*/
private final DataTransferThrottler throttler = new DataTransferThrottler(1);
/**
* The null output stream to use with BlockSender objects.
*/
private final DataOutputStream nullStream =
new DataOutputStream(new IOUtils.NullOutputStream());
/**
* The block iterators associated with this VolumeScanner.<p/>
*
* Each block pool has its own BlockIterator.
*/
private final List<BlockIterator> blockIters =
new LinkedList<BlockIterator>();
/**
* Blocks which are suspect.
* The scanner prioritizes scanning these blocks.
*/
private final LinkedHashSet<ExtendedBlock> suspectBlocks =
new LinkedHashSet<ExtendedBlock>();
/**
* Blocks which were suspect which we have scanned.
* This is used to avoid scanning the same suspect block over and over.
*/
private final Cache<ExtendedBlock, Boolean> recentSuspectBlocks =
CacheBuilder.newBuilder().maximumSize(1000)
.expireAfterAccess(10, TimeUnit.MINUTES).build();
/**
* The current block iterator, or null if there is none.
*/
private BlockIterator curBlockIter = null;
/**
* True if the thread is stopping.<p/>
* Protected by this object's lock.
*/
private boolean stopping = false;
/**
* The monotonic minute that the volume scanner was started on.
*/
private long startMinute = 0;
/**
* The current minute, in monotonic terms.
*/
private long curMinute = 0;
/**
* Handles scan results.
*/
private final ScanResultHandler resultHandler;
private final Statistics stats = new Statistics();
static class Statistics {
long bytesScannedInPastHour = 0;
long blocksScannedInCurrentPeriod = 0;
long blocksScannedSinceRestart = 0;
long scansSinceRestart = 0;
long scanErrorsSinceRestart = 0;
long nextBlockPoolScanStartMs = -1;
long blockPoolPeriodEndsMs = -1;
ExtendedBlock lastBlockScanned = null;
boolean eof = false;
Statistics() {
}
Statistics(Statistics other) {
this.bytesScannedInPastHour = other.bytesScannedInPastHour;
this.blocksScannedInCurrentPeriod = other.blocksScannedInCurrentPeriod;
this.blocksScannedSinceRestart = other.blocksScannedSinceRestart;
this.scansSinceRestart = other.scansSinceRestart;
this.scanErrorsSinceRestart = other.scanErrorsSinceRestart;
this.nextBlockPoolScanStartMs = other.nextBlockPoolScanStartMs;
this.blockPoolPeriodEndsMs = other.blockPoolPeriodEndsMs;
this.lastBlockScanned = other.lastBlockScanned;
this.eof = other.eof;
}
@Override
public String toString() {
return new StringBuilder().
append("Statistics{").
append("bytesScannedInPastHour=").append(bytesScannedInPastHour).
append(", blocksScannedInCurrentPeriod=").
append(blocksScannedInCurrentPeriod).
append(", blocksScannedSinceRestart=").
append(blocksScannedSinceRestart).
append(", scansSinceRestart=").append(scansSinceRestart).
append(", scanErrorsSinceRestart=").append(scanErrorsSinceRestart).
append(", nextBlockPoolScanStartMs=").append(nextBlockPoolScanStartMs).
append(", blockPoolPeriodEndsMs=").append(blockPoolPeriodEndsMs).
append(", lastBlockScanned=").append(lastBlockScanned).
append(", eof=").append(eof).
append("}").toString();
}
}
private static double positiveMsToHours(long ms) {
if (ms <= 0) {
return 0;
} else {
return TimeUnit.HOURS.convert(ms, TimeUnit.MILLISECONDS);
}
}
public void printStats(StringBuilder p) {
p.append("Block scanner information for volume " +
volume.getStorageID() + " with base path " + volume.getBasePath() +
"%n");
synchronized (stats) {
p.append(String.format("Bytes verified in last hour : %57d%n",
stats.bytesScannedInPastHour));
p.append(String.format("Blocks scanned in current period : %57d%n",
stats.blocksScannedInCurrentPeriod));
p.append(String.format("Blocks scanned since restart : %57d%n",
stats.blocksScannedSinceRestart));
p.append(String.format("Block pool scans since restart : %57d%n",
stats.scansSinceRestart));
p.append(String.format("Block scan errors since restart : %57d%n",
stats.scanErrorsSinceRestart));
if (stats.nextBlockPoolScanStartMs > 0) {
p.append(String.format("Hours until next block pool scan : %57.3f%n",
positiveMsToHours(stats.nextBlockPoolScanStartMs -
Time.monotonicNow())));
}
if (stats.blockPoolPeriodEndsMs > 0) {
p.append(String.format("Hours until possible pool rescan : %57.3f%n",
positiveMsToHours(stats.blockPoolPeriodEndsMs -
Time.now())));
}
p.append(String.format("Last block scanned : %57s%n",
((stats.lastBlockScanned == null) ? "none" :
stats.lastBlockScanned.toString())));
p.append(String.format("More blocks to scan in period : %57s%n",
!stats.eof));
p.append("%n");
}
}
static class ScanResultHandler {
private VolumeScanner scanner;
public void setup(VolumeScanner scanner) {
LOG.trace("Starting VolumeScanner {}",
scanner.volume.getBasePath());
this.scanner = scanner;
}
public void handle(ExtendedBlock block, IOException e) {
FsVolumeSpi volume = scanner.volume;
if (e == null) {
LOG.trace("Successfully scanned {} on {}", block, volume.getBasePath());
return;
}
// If the block does not exist anymore, then it's not an error.
if (!volume.getDataset().contains(block)) {
LOG.debug("Volume {}: block {} is no longer in the dataset.",
volume.getBasePath(), block);
return;
}
// If the block exists, the exception may due to a race with write:
// The BlockSender got an old block path in rbw. BlockReceiver removed
// the rbw block from rbw to finalized but BlockSender tried to open the
// file before BlockReceiver updated the VolumeMap. The state of the
// block can be changed again now, so ignore this error here. If there
// is a block really deleted by mistake, DirectoryScan should catch it.
if (e instanceof FileNotFoundException ) {
LOG.info("Volume {}: verification failed for {} because of " +
"FileNotFoundException. This may be due to a race with write.",
volume.getBasePath(), block);
return;
}
LOG.warn("Reporting bad {} on {}", block, volume.getBasePath());
try {
scanner.datanode.reportBadBlocks(block);
} catch (IOException ie) {
// This is bad, but not bad enough to shut down the scanner.
LOG.warn("Cannot report bad " + block.getBlockId(), e);
}
}
}
VolumeScanner(Conf conf, DataNode datanode, FsVolumeReference ref) {
this.conf = conf;
this.datanode = datanode;
this.ref = ref;
this.volume = ref.getVolume();
ScanResultHandler handler;
try {
handler = conf.resultHandler.newInstance();
} catch (Throwable e) {
LOG.error("unable to instantiate {}", conf.resultHandler, e);
handler = new ScanResultHandler();
}
this.resultHandler = handler;
setName("VolumeScannerThread(" + volume.getBasePath() + ")");
setDaemon(true);
}
private void saveBlockIterator(BlockIterator iter) {
try {
iter.save();
} catch (IOException e) {
LOG.warn("{}: error saving {}.", this, iter, e);
}
}
private void expireOldScannedBytesRecords(long monotonicMs) {
long newMinute =
TimeUnit.MINUTES.convert(monotonicMs, TimeUnit.MILLISECONDS);
if (curMinute == newMinute) {
return;
}
// If a minute or more has gone past since we last updated the scannedBytes
// array, zero out the slots corresponding to those minutes.
for (long m = curMinute + 1; m <= newMinute; m++) {
int slotIdx = (int)(m % MINUTES_PER_HOUR);
LOG.trace("{}: updateScannedBytes is zeroing out slotIdx {}. " +
"curMinute = {}; newMinute = {}", this, slotIdx,
curMinute, newMinute);
scannedBytesSum -= scannedBytes[slotIdx];
scannedBytes[slotIdx] = 0;
}
curMinute = newMinute;
}
/**
* Find a usable block iterator.<p/>
*
* We will consider available block iterators in order. This property is
* important so that we don't keep rescanning the same block pool id over
* and over, while other block pools stay unscanned.<p/>
*
* A block pool is always ready to scan if the iterator is not at EOF. If
* the iterator is at EOF, the block pool will be ready to scan when
* conf.scanPeriodMs milliseconds have elapsed since the iterator was last
* rewound.<p/>
*
* @return 0 if we found a usable block iterator; the
* length of time we should delay before
* checking again otherwise.
*/
private synchronized long findNextUsableBlockIter() {
int numBlockIters = blockIters.size();
if (numBlockIters == 0) {
LOG.debug("{}: no block pools are registered.", this);
return Long.MAX_VALUE;
}
int curIdx;
if (curBlockIter == null) {
curIdx = 0;
} else {
curIdx = blockIters.indexOf(curBlockIter);
Preconditions.checkState(curIdx >= 0);
}
// Note that this has to be wall-clock time, not monotonic time. This is
// because the time saved in the cursor file is a wall-clock time. We do
// not want to save a monotonic time in the cursor file, because it resets
// every time the machine reboots (on most platforms).
long nowMs = Time.now();
long minTimeoutMs = Long.MAX_VALUE;
for (int i = 0; i < numBlockIters; i++) {
int idx = (curIdx + i + 1) % numBlockIters;
BlockIterator iter = blockIters.get(idx);
if (!iter.atEnd()) {
LOG.info("Now scanning bpid {} on volume {}",
iter.getBlockPoolId(), volume.getBasePath());
curBlockIter = iter;
return 0L;
}
long iterStartMs = iter.getIterStartMs();
long waitMs = (iterStartMs + conf.scanPeriodMs) - nowMs;
if (waitMs <= 0) {
iter.rewind();
LOG.info("Now rescanning bpid {} on volume {}, after more than " +
"{} hour(s)", iter.getBlockPoolId(), volume.getBasePath(),
TimeUnit.HOURS.convert(conf.scanPeriodMs, TimeUnit.MILLISECONDS));
curBlockIter = iter;
return 0L;
}
minTimeoutMs = Math.min(minTimeoutMs, waitMs);
}
LOG.info("{}: no suitable block pools found to scan. Waiting {} ms.",
this, minTimeoutMs);
return minTimeoutMs;
}
/**
* Scan a block.
*
* @param cblock The block to scan.
* @param bytesPerSec The bytes per second to scan at.
*
* @return The length of the block that was scanned, or
* -1 if the block could not be scanned.
*/
private long scanBlock(ExtendedBlock cblock, long bytesPerSec) {
// 'cblock' has a valid blockId and block pool id, but we don't yet know the
// genstamp the block is supposed to have. Ask the FsDatasetImpl for this
// information.
ExtendedBlock block = null;
try {
Block b = volume.getDataset().getStoredBlock(
cblock.getBlockPoolId(), cblock.getBlockId());
if (b == null) {
LOG.info("FileNotFound while finding block {} on volume {}",
cblock, volume.getBasePath());
} else {
block = new ExtendedBlock(cblock.getBlockPoolId(), b);
}
} catch (FileNotFoundException e) {
LOG.info("FileNotFoundException while finding block {} on volume {}",
cblock, volume.getBasePath());
} catch (IOException e) {
LOG.warn("I/O error while finding block {} on volume {}",
cblock, volume.getBasePath());
}
if (block == null) {
return -1; // block not found.
}
BlockSender blockSender = null;
try {
blockSender = new BlockSender(block, 0, -1,
false, true, true, datanode, null,
CachingStrategy.newDropBehind());
throttler.setBandwidth(bytesPerSec);
long bytesRead = blockSender.sendBlock(nullStream, null, throttler);
resultHandler.handle(block, null);
return bytesRead;
} catch (IOException e) {
resultHandler.handle(block, e);
} finally {
IOUtils.cleanup(null, blockSender);
}
return -1;
}
@VisibleForTesting
static boolean calculateShouldScan(String storageId, long targetBytesPerSec,
long scannedBytesSum, long startMinute, long curMinute) {
long runMinutes = curMinute - startMinute;
long effectiveBytesPerSec;
if (runMinutes <= 0) {
// avoid division by zero
effectiveBytesPerSec = scannedBytesSum;
} else {
if (runMinutes > MINUTES_PER_HOUR) {
// we only keep an hour's worth of rate information
runMinutes = MINUTES_PER_HOUR;
}
effectiveBytesPerSec = scannedBytesSum /
(SECONDS_PER_MINUTE * runMinutes);
}
boolean shouldScan = effectiveBytesPerSec <= targetBytesPerSec;
LOG.trace("{}: calculateShouldScan: effectiveBytesPerSec = {}, and " +
"targetBytesPerSec = {}. startMinute = {}, curMinute = {}, " +
"shouldScan = {}",
storageId, effectiveBytesPerSec, targetBytesPerSec,
startMinute, curMinute, shouldScan);
return shouldScan;
}
/**
* Run an iteration of the VolumeScanner loop.
*
* @param suspectBlock A suspect block which we should scan, or null to
* scan the next regularly scheduled block.
*
* @return The number of milliseconds to delay before running the loop
* again, or 0 to re-run the loop immediately.
*/
private long runLoop(ExtendedBlock suspectBlock) {
long bytesScanned = -1;
boolean scanError = false;
ExtendedBlock block = null;
try {
long monotonicMs = Time.monotonicNow();
expireOldScannedBytesRecords(monotonicMs);
if (!calculateShouldScan(volume.getStorageID(), conf.targetBytesPerSec,
scannedBytesSum, startMinute, curMinute)) {
// If neededBytesPerSec is too low, then wait few seconds for some old
// scannedBytes records to expire.
return 30000L;
}
// Find a usable block pool to scan.
if (suspectBlock != null) {
block = suspectBlock;
} else {
if ((curBlockIter == null) || curBlockIter.atEnd()) {
long timeout = findNextUsableBlockIter();
if (timeout > 0) {
LOG.trace("{}: no block pools are ready to scan yet. Waiting " +
"{} ms.", this, timeout);
synchronized (stats) {
stats.nextBlockPoolScanStartMs = Time.monotonicNow() + timeout;
}
return timeout;
}
synchronized (stats) {
stats.scansSinceRestart++;
stats.blocksScannedInCurrentPeriod = 0;
stats.nextBlockPoolScanStartMs = -1;
}
return 0L;
}
try {
block = curBlockIter.nextBlock();
} catch (IOException e) {
// There was an error listing the next block in the volume. This is a
// serious issue.
LOG.warn("{}: nextBlock error on {}", this, curBlockIter);
// On the next loop iteration, curBlockIter#eof will be set to true, and
// we will pick a different block iterator.
return 0L;
}
if (block == null) {
// The BlockIterator is at EOF.
LOG.info("{}: finished scanning block pool {}",
this, curBlockIter.getBlockPoolId());
saveBlockIterator(curBlockIter);
return 0;
}
}
if (curBlockIter != null) {
long saveDelta = monotonicMs - curBlockIter.getLastSavedMs();
if (saveDelta >= conf.cursorSaveMs) {
LOG.debug("{}: saving block iterator {} after {} ms.",
this, curBlockIter, saveDelta);
saveBlockIterator(curBlockIter);
}
}
bytesScanned = scanBlock(block, conf.targetBytesPerSec);
if (bytesScanned >= 0) {
scannedBytesSum += bytesScanned;
scannedBytes[(int)(curMinute % MINUTES_PER_HOUR)] += bytesScanned;
} else {
scanError = true;
}
return 0L;
} finally {
synchronized (stats) {
stats.bytesScannedInPastHour = scannedBytesSum;
if (bytesScanned > 0) {
stats.blocksScannedInCurrentPeriod++;
stats.blocksScannedSinceRestart++;
}
if (scanError) {
stats.scanErrorsSinceRestart++;
}
if (block != null) {
stats.lastBlockScanned = block;
}
if (curBlockIter == null) {
stats.eof = true;
stats.blockPoolPeriodEndsMs = -1;
} else {
stats.eof = curBlockIter.atEnd();
stats.blockPoolPeriodEndsMs =
curBlockIter.getIterStartMs() + conf.scanPeriodMs;
}
}
}
}
/**
* If there are elements in the suspectBlocks list, removes
* and returns the first one. Otherwise, returns null.
*/
private synchronized ExtendedBlock popNextSuspectBlock() {
Iterator<ExtendedBlock> iter = suspectBlocks.iterator();
if (!iter.hasNext()) {
return null;
}
ExtendedBlock block = iter.next();
iter.remove();
return block;
}
@Override
public void run() {
// Record the minute on which the scanner started.
this.startMinute =
TimeUnit.MINUTES.convert(Time.monotonicNow(), TimeUnit.MILLISECONDS);
this.curMinute = startMinute;
try {
LOG.trace("{}: thread starting.", this);
resultHandler.setup(this);
try {
long timeout = 0;
while (true) {
ExtendedBlock suspectBlock = null;
// Take the lock to check if we should stop, and access the
// suspect block list.
synchronized (this) {
if (stopping) {
break;
}
if (timeout > 0) {
wait(timeout);
if (stopping) {
break;
}
}
suspectBlock = popNextSuspectBlock();
}
timeout = runLoop(suspectBlock);
}
} catch (InterruptedException e) {
// We are exiting because of an InterruptedException,
// probably sent by VolumeScanner#shutdown.
LOG.trace("{} exiting because of InterruptedException.", this);
} catch (Throwable e) {
LOG.error("{} exiting because of exception ", this, e);
}
LOG.info("{} exiting.", this);
// Save the current position of all block iterators and close them.
for (BlockIterator iter : blockIters) {
saveBlockIterator(iter);
IOUtils.cleanup(null, iter);
}
} finally {
// When the VolumeScanner exits, release the reference we were holding
// on the volume. This will allow the volume to be removed later.
IOUtils.cleanup(null, ref);
}
}
@Override
public String toString() {
return "VolumeScanner(" + volume.getBasePath() +
", " + volume.getStorageID() + ")";
}
/**
* Shut down this scanner.
*/
public synchronized void shutdown() {
stopping = true;
notify();
this.interrupt();
}
public synchronized void markSuspectBlock(ExtendedBlock block) {
if (stopping) {
LOG.debug("{}: Not scheduling suspect block {} for " +
"rescanning, because this volume scanner is stopping.", this, block);
return;
}
Boolean recent = recentSuspectBlocks.getIfPresent(block);
if (recent != null) {
LOG.debug("{}: Not scheduling suspect block {} for " +
"rescanning, because we rescanned it recently.", this, block);
return;
}
if (suspectBlocks.contains(block)) {
LOG.debug("{}: suspect block {} is already queued for " +
"rescanning.", this, block);
return;
}
suspectBlocks.add(block);
recentSuspectBlocks.put(block, true);
LOG.debug("{}: Scheduling suspect block {} for rescanning.", this, block);
notify(); // wake scanner thread.
}
/**
* Allow the scanner to scan the given block pool.
*
* @param bpid The block pool id.
*/
public synchronized void enableBlockPoolId(String bpid) {
for (BlockIterator iter : blockIters) {
if (iter.getBlockPoolId().equals(bpid)) {
LOG.warn("{}: already enabled scanning on block pool {}", this, bpid);
return;
}
}
BlockIterator iter = null;
try {
// Load a block iterator for the next block pool on the volume.
iter = volume.loadBlockIterator(bpid, BLOCK_ITERATOR_NAME);
LOG.trace("{}: loaded block iterator for {}.", this, bpid);
} catch (FileNotFoundException e) {
LOG.debug("{}: failed to load block iterator: " + e.getMessage(), this);
} catch (IOException e) {
LOG.warn("{}: failed to load block iterator.", this, e);
}
if (iter == null) {
iter = volume.newBlockIterator(bpid, BLOCK_ITERATOR_NAME);
LOG.trace("{}: created new block iterator for {}.", this, bpid);
}
iter.setMaxStalenessMs(conf.maxStalenessMs);
blockIters.add(iter);
notify();
}
/**
* Disallow the scanner from scanning the given block pool.
*
* @param bpid The block pool id.
*/
public synchronized void disableBlockPoolId(String bpid) {
Iterator<BlockIterator> i = blockIters.iterator();
while (i.hasNext()) {
BlockIterator iter = i.next();
if (iter.getBlockPoolId().equals(bpid)) {
LOG.trace("{}: disabling scanning on block pool {}", this, bpid);
i.remove();
IOUtils.cleanup(null, iter);
if (curBlockIter == iter) {
curBlockIter = null;
}
notify();
return;
}
}
LOG.warn("{}: can't remove block pool {}, because it was never " +
"added.", this, bpid);
}
@VisibleForTesting
Statistics getStatistics() {
synchronized (stats) {
return new Statistics(stats);
}
}
}
| 25,810 | 33.692204 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaNotFoundException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import java.io.IOException;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
/**
* Exception indicating that DataNode does not have a replica
* that matches the target block.
*/
public class ReplicaNotFoundException extends IOException {
private static final long serialVersionUID = 1L;
public final static String NON_RBW_REPLICA = "Cannot recover a non-RBW replica ";
public final static String UNFINALIZED_REPLICA =
"Cannot append to an unfinalized replica ";
public final static String UNFINALIZED_AND_NONRBW_REPLICA =
"Cannot recover append/close to a replica that's not FINALIZED and not RBW ";
public final static String NON_EXISTENT_REPLICA =
"Cannot append to a non-existent replica ";
public final static String UNEXPECTED_GS_REPLICA =
"Cannot append to a replica with unexpected generation stamp ";
public ReplicaNotFoundException() {
super();
}
public ReplicaNotFoundException(ExtendedBlock b) {
super("Replica not found for " + b);
}
public ReplicaNotFoundException(String msg) {
super(msg);
}
}
| 1,937 | 34.888889 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReportBadBlockAction.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import java.io.IOException;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.ipc.RemoteException;
/**
* ReportBadBlockAction is an instruction issued by {{BPOfferService}} to
* {{BPServiceActor}} to report bad block to namenode
*
*/
public class ReportBadBlockAction implements BPServiceActorAction {
private final ExtendedBlock block;
private final String storageUuid;
private final StorageType storageType;
public ReportBadBlockAction(ExtendedBlock block, String storageUuid,
StorageType storageType) {
this.block = block;
this.storageUuid = storageUuid;
this.storageType = storageType;
}
@Override
public void reportTo(DatanodeProtocolClientSideTranslatorPB bpNamenode,
DatanodeRegistration bpRegistration) throws BPServiceActorActionException {
if (bpRegistration == null) {
return;
}
DatanodeInfo[] dnArr = { new DatanodeInfo(bpRegistration) };
String[] uuids = { storageUuid };
StorageType[] types = { storageType };
LocatedBlock[] locatedBlock = { new LocatedBlock(block,
dnArr, uuids, types) };
try {
bpNamenode.reportBadBlocks(locatedBlock);
} catch (RemoteException re) {
DataNode.LOG.info("reportBadBlock encountered RemoteException for "
+ "block: " + block , re);
} catch (IOException e) {
throw new BPServiceActorActionException("Failed to report bad block "
+ block + " to namenode: ");
}
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((block == null) ? 0 : block.hashCode());
result = prime * result
+ ((storageType == null) ? 0 : storageType.hashCode());
result = prime * result
+ ((storageUuid == null) ? 0 : storageUuid.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || !(obj instanceof ReportBadBlockAction)) {
return false;
}
ReportBadBlockAction other = (ReportBadBlockAction) obj;
if (block == null) {
if (other.block != null) {
return false;
}
} else if (!block.equals(other.block)) {
return false;
}
if (storageType != other.storageType) {
return false;
}
if (storageUuid == null) {
if (other.storageUuid != null) {
return false;
}
} else if (!storageUuid.equals(other.storageUuid)) {
return false;
}
return true;
}
}
| 3,682 | 31.59292 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SCANNER_VOLUME_BYTES_PER_SECOND;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SCANNER_VOLUME_BYTES_PER_SECOND_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT;
import java.io.IOException;
import java.util.Map.Entry;
import java.util.TreeMap;
import java.util.concurrent.TimeUnit;
import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.datanode.VolumeScanner.ScanResultHandler;
import com.google.common.base.Preconditions;
import com.google.common.util.concurrent.Uninterruptibles;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
import org.apache.hadoop.io.IOUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
@InterfaceAudience.Private
public class BlockScanner {
public static final Logger LOG =
LoggerFactory.getLogger(BlockScanner.class);
/**
* The DataNode that this scanner is associated with.
*/
private final DataNode datanode;
/**
* Maps Storage IDs to VolumeScanner objects.
*/
private final TreeMap<String, VolumeScanner> scanners =
new TreeMap<String, VolumeScanner>();
/**
* The scanner configuration.
*/
private final Conf conf;
/**
* The cached scanner configuration.
*/
static class Conf {
// These are a few internal configuration keys used for unit tests.
// They can't be set unless the static boolean allowUnitTestSettings has
// been set to true.
@VisibleForTesting
static final String INTERNAL_DFS_DATANODE_SCAN_PERIOD_MS =
"internal.dfs.datanode.scan.period.ms.key";
@VisibleForTesting
static final String INTERNAL_VOLUME_SCANNER_SCAN_RESULT_HANDLER =
"internal.volume.scanner.scan.result.handler";
@VisibleForTesting
static final String INTERNAL_DFS_BLOCK_SCANNER_MAX_STALENESS_MS =
"internal.dfs.block.scanner.max_staleness.ms";
@VisibleForTesting
static final long INTERNAL_DFS_BLOCK_SCANNER_MAX_STALENESS_MS_DEFAULT =
TimeUnit.MILLISECONDS.convert(15, TimeUnit.MINUTES);
@VisibleForTesting
static final String INTERNAL_DFS_BLOCK_SCANNER_CURSOR_SAVE_INTERVAL_MS =
"dfs.block.scanner.cursor.save.interval.ms";
@VisibleForTesting
static final long
INTERNAL_DFS_BLOCK_SCANNER_CURSOR_SAVE_INTERVAL_MS_DEFAULT =
TimeUnit.MILLISECONDS.convert(10, TimeUnit.MINUTES);
static boolean allowUnitTestSettings = false;
final long targetBytesPerSec;
final long maxStalenessMs;
final long scanPeriodMs;
final long cursorSaveMs;
final Class<? extends ScanResultHandler> resultHandler;
private static long getUnitTestLong(Configuration conf, String key,
long defVal) {
if (allowUnitTestSettings) {
return conf.getLong(key, defVal);
} else {
return defVal;
}
}
/**
* Determine the configured block scanner interval.
*
* For compatibility with prior releases of HDFS, if the
* configured value is zero then the scan period is
* set to 3 weeks.
*
* If the configured value is less than zero then the scanner
* is disabled.
*
* @param conf Configuration object.
* @return block scan period in milliseconds.
*/
private static long getConfiguredScanPeriodMs(Configuration conf) {
long tempScanPeriodMs = getUnitTestLong(
conf, INTERNAL_DFS_DATANODE_SCAN_PERIOD_MS,
TimeUnit.MILLISECONDS.convert(conf.getLong(
DFS_DATANODE_SCAN_PERIOD_HOURS_KEY,
DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT), TimeUnit.HOURS));
if (tempScanPeriodMs == 0) {
tempScanPeriodMs = TimeUnit.MILLISECONDS.convert(
DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT, TimeUnit.HOURS);
}
return tempScanPeriodMs;
}
@SuppressWarnings("unchecked")
Conf(Configuration conf) {
this.targetBytesPerSec = Math.max(0L, conf.getLong(
DFS_BLOCK_SCANNER_VOLUME_BYTES_PER_SECOND,
DFS_BLOCK_SCANNER_VOLUME_BYTES_PER_SECOND_DEFAULT));
this.maxStalenessMs = Math.max(0L, getUnitTestLong(conf,
INTERNAL_DFS_BLOCK_SCANNER_MAX_STALENESS_MS,
INTERNAL_DFS_BLOCK_SCANNER_MAX_STALENESS_MS_DEFAULT));
this.scanPeriodMs = getConfiguredScanPeriodMs(conf);
this.cursorSaveMs = Math.max(0L, getUnitTestLong(conf,
INTERNAL_DFS_BLOCK_SCANNER_CURSOR_SAVE_INTERVAL_MS,
INTERNAL_DFS_BLOCK_SCANNER_CURSOR_SAVE_INTERVAL_MS_DEFAULT));
if (allowUnitTestSettings) {
this.resultHandler = (Class<? extends ScanResultHandler>)
conf.getClass(INTERNAL_VOLUME_SCANNER_SCAN_RESULT_HANDLER,
ScanResultHandler.class);
} else {
this.resultHandler = ScanResultHandler.class;
}
}
}
public BlockScanner(DataNode datanode, Configuration conf) {
this.datanode = datanode;
this.conf = new Conf(conf);
if (isEnabled()) {
LOG.info("Initialized block scanner with targetBytesPerSec {}",
this.conf.targetBytesPerSec);
} else {
LOG.info("Disabled block scanner.");
}
}
/**
* Returns true if the block scanner is enabled.<p/>
*
* If the block scanner is disabled, no volume scanners will be created, and
* no threads will start.
*/
public boolean isEnabled() {
return (conf.scanPeriodMs > 0) && (conf.targetBytesPerSec > 0);
}
/**
* Set up a scanner for the given block pool and volume.
*
* @param ref A reference to the volume.
*/
public synchronized void addVolumeScanner(FsVolumeReference ref) {
boolean success = false;
try {
FsVolumeSpi volume = ref.getVolume();
if (!isEnabled()) {
LOG.debug("Not adding volume scanner for {}, because the block " +
"scanner is disabled.", volume.getBasePath());
return;
}
VolumeScanner scanner = scanners.get(volume.getStorageID());
if (scanner != null) {
LOG.error("Already have a scanner for volume {}.",
volume.getBasePath());
return;
}
LOG.debug("Adding scanner for volume {} (StorageID {})",
volume.getBasePath(), volume.getStorageID());
scanner = new VolumeScanner(conf, datanode, ref);
scanner.start();
scanners.put(volume.getStorageID(), scanner);
success = true;
} finally {
if (!success) {
// If we didn't create a new VolumeScanner object, we don't
// need this reference to the volume.
IOUtils.cleanup(null, ref);
}
}
}
/**
* Stops and removes a volume scanner.<p/>
*
* This function will block until the volume scanner has stopped.
*
* @param volume The volume to remove.
*/
public synchronized void removeVolumeScanner(FsVolumeSpi volume) {
if (!isEnabled()) {
LOG.debug("Not removing volume scanner for {}, because the block " +
"scanner is disabled.", volume.getStorageID());
return;
}
VolumeScanner scanner = scanners.get(volume.getStorageID());
if (scanner == null) {
LOG.warn("No scanner found to remove for volumeId {}",
volume.getStorageID());
return;
}
LOG.info("Removing scanner for volume {} (StorageID {})",
volume.getBasePath(), volume.getStorageID());
scanner.shutdown();
scanners.remove(volume.getStorageID());
Uninterruptibles.joinUninterruptibly(scanner, 5, TimeUnit.MINUTES);
}
/**
* Stops and removes all volume scanners.<p/>
*
* This function will block until all the volume scanners have stopped.
*/
public synchronized void removeAllVolumeScanners() {
for (Entry<String, VolumeScanner> entry : scanners.entrySet()) {
entry.getValue().shutdown();
}
for (Entry<String, VolumeScanner> entry : scanners.entrySet()) {
Uninterruptibles.joinUninterruptibly(entry.getValue(),
5, TimeUnit.MINUTES);
}
scanners.clear();
}
/**
* Enable scanning a given block pool id.
*
* @param bpid The block pool id to enable scanning for.
*/
synchronized void enableBlockPoolId(String bpid) {
Preconditions.checkNotNull(bpid);
for (VolumeScanner scanner : scanners.values()) {
scanner.enableBlockPoolId(bpid);
}
}
/**
* Disable scanning a given block pool id.
*
* @param bpid The block pool id to disable scanning for.
*/
synchronized void disableBlockPoolId(String bpid) {
Preconditions.checkNotNull(bpid);
for (VolumeScanner scanner : scanners.values()) {
scanner.disableBlockPoolId(bpid);
}
}
@VisibleForTesting
synchronized VolumeScanner.Statistics getVolumeStats(String volumeId) {
VolumeScanner scanner = scanners.get(volumeId);
if (scanner == null) {
return null;
}
return scanner.getStatistics();
}
synchronized void printStats(StringBuilder p) {
// print out all bpids that we're scanning ?
for (Entry<String, VolumeScanner> entry : scanners.entrySet()) {
entry.getValue().printStats(p);
}
}
/**
* Mark a block as "suspect."
*
* This means that we should try to rescan it soon. Note that the
* VolumeScanner keeps a list of recently suspicious blocks, which
* it uses to avoid rescanning the same block over and over in a short
* time frame.
*
* @param storageId The ID of the storage where the block replica
* is being stored.
* @param block The block's ID and block pool id.
*/
synchronized void markSuspectBlock(String storageId, ExtendedBlock block) {
if (!isEnabled()) {
LOG.debug("Not scanning suspicious block {} on {}, because the block " +
"scanner is disabled.", block, storageId);
return;
}
VolumeScanner scanner = scanners.get(storageId);
if (scanner == null) {
// This could happen if the volume is in the process of being removed.
// The removal process shuts down the VolumeScanner, but the volume
// object stays around as long as there are references to it (which
// should not be that long.)
LOG.info("Not scanning suspicious block {} on {}, because there is no " +
"volume scanner for that storageId.", block, storageId);
return;
}
scanner.markSuspectBlock(block);
}
@InterfaceAudience.Private
public static class Servlet extends HttpServlet {
private static final long serialVersionUID = 1L;
@Override
public void doGet(HttpServletRequest request,
HttpServletResponse response) throws IOException {
response.setContentType("text/plain");
DataNode datanode = (DataNode)
getServletContext().getAttribute("datanode");
BlockScanner blockScanner = datanode.getBlockScanner();
StringBuilder buffer = new StringBuilder(8 * 1024);
if (!blockScanner.isEnabled()) {
LOG.warn("Periodic block scanner is not running");
buffer.append("Periodic block scanner is not running. " +
"Please check the datanode log if this is unexpected.");
} else {
buffer.append("Block Scanner Statistics\n\n");
blockScanner.printStats(buffer);
}
String resp = buffer.toString();
LOG.trace("Returned Servlet info {}", resp);
response.getWriter().write(resp);
}
}
}
| 12,799 | 34.068493 | 101 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActorAction.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
/**
* Base class for BPServiceActor class
* Issued by BPOfferSerivce class to tell BPServiceActor
* to take several actions.
*/
public interface BPServiceActorAction {
public void reportTo(DatanodeProtocolClientSideTranslatorPB bpNamenode,
DatanodeRegistration bpRegistration) throws BPServiceActorActionException;
}
| 1,335 | 39.484848 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.HardLink;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
import org.apache.hadoop.io.IOUtils;
import com.google.common.annotations.VisibleForTesting;
/**
* This class is used by datanodes to maintain meta data of its replicas.
* It provides a general interface for meta information of a replica.
*/
@InterfaceAudience.Private
abstract public class ReplicaInfo extends Block implements Replica {
/** volume where the replica belongs */
private FsVolumeSpi volume;
/** directory where block & meta files belong */
/**
* Base directory containing numerically-identified sub directories and
* possibly blocks.
*/
private File baseDir;
/**
* Whether or not this replica's parent directory includes subdirs, in which
* case we can generate them based on the replica's block ID
*/
private boolean hasSubdirs;
private static final Map<String, File> internedBaseDirs = new HashMap<String, File>();
/**
* Constructor
* @param block a block
* @param vol volume where replica is located
* @param dir directory path where block and meta files are located
*/
ReplicaInfo(Block block, FsVolumeSpi vol, File dir) {
this(block.getBlockId(), block.getNumBytes(),
block.getGenerationStamp(), vol, dir);
}
/**
* Constructor
* @param blockId block id
* @param len replica length
* @param genStamp replica generation stamp
* @param vol volume where replica is located
* @param dir directory path where block and meta files are located
*/
ReplicaInfo(long blockId, long len, long genStamp,
FsVolumeSpi vol, File dir) {
super(blockId, len, genStamp);
this.volume = vol;
setDirInternal(dir);
}
/**
* Copy constructor.
* @param from where to copy from
*/
ReplicaInfo(ReplicaInfo from) {
this(from, from.getVolume(), from.getDir());
}
/**
* Get the full path of this replica's data file
* @return the full path of this replica's data file
*/
public File getBlockFile() {
return new File(getDir(), getBlockName());
}
/**
* Get the full path of this replica's meta file
* @return the full path of this replica's meta file
*/
public File getMetaFile() {
return new File(getDir(),
DatanodeUtil.getMetaName(getBlockName(), getGenerationStamp()));
}
/**
* Get the volume where this replica is located on disk
* @return the volume where this replica is located on disk
*/
public FsVolumeSpi getVolume() {
return volume;
}
/**
* Set the volume where this replica is located on disk
*/
void setVolume(FsVolumeSpi vol) {
this.volume = vol;
}
/**
* Get the storageUuid of the volume that stores this replica.
*/
@Override
public String getStorageUuid() {
return volume.getStorageID();
}
/**
* Return the parent directory path where this replica is located
* @return the parent directory path where this replica is located
*/
File getDir() {
return hasSubdirs ? DatanodeUtil.idToBlockDir(baseDir,
getBlockId()) : baseDir;
}
/**
* Set the parent directory where this replica is located
* @param dir the parent directory where the replica is located
*/
public void setDir(File dir) {
setDirInternal(dir);
}
private void setDirInternal(File dir) {
if (dir == null) {
baseDir = null;
return;
}
ReplicaDirInfo dirInfo = parseBaseDir(dir);
this.hasSubdirs = dirInfo.hasSubidrs;
synchronized (internedBaseDirs) {
if (!internedBaseDirs.containsKey(dirInfo.baseDirPath)) {
// Create a new String path of this file and make a brand new File object
// to guarantee we drop the reference to the underlying char[] storage.
File baseDir = new File(dirInfo.baseDirPath);
internedBaseDirs.put(dirInfo.baseDirPath, baseDir);
}
this.baseDir = internedBaseDirs.get(dirInfo.baseDirPath);
}
}
@VisibleForTesting
public static class ReplicaDirInfo {
public String baseDirPath;
public boolean hasSubidrs;
public ReplicaDirInfo (String baseDirPath, boolean hasSubidrs) {
this.baseDirPath = baseDirPath;
this.hasSubidrs = hasSubidrs;
}
}
@VisibleForTesting
public static ReplicaDirInfo parseBaseDir(File dir) {
File currentDir = dir;
boolean hasSubdirs = false;
while (currentDir.getName().startsWith(DataStorage.BLOCK_SUBDIR_PREFIX)) {
hasSubdirs = true;
currentDir = currentDir.getParentFile();
}
return new ReplicaDirInfo(currentDir.getAbsolutePath(), hasSubdirs);
}
/**
* check if this replica has already been unlinked.
* @return true if the replica has already been unlinked
* or no need to be detached; false otherwise
*/
public boolean isUnlinked() {
return true; // no need to be unlinked
}
/**
* set that this replica is unlinked
*/
public void setUnlinked() {
// no need to be unlinked
}
/**
* Number of bytes reserved for this replica on disk.
*/
public long getBytesReserved() {
return 0;
}
/**
* Number of bytes originally reserved for this replica. The actual
* reservation is adjusted as data is written to disk.
*
* @return the number of bytes originally reserved for this replica.
*/
public long getOriginalBytesReserved() {
return 0;
}
/**
* Copy specified file into a temporary file. Then rename the
* temporary file to the original name. This will cause any
* hardlinks to the original file to be removed. The temporary
* files are created in the same directory. The temporary files will
* be recovered (especially on Windows) on datanode restart.
*/
private void unlinkFile(File file, Block b) throws IOException {
File tmpFile = DatanodeUtil.createTmpFile(b, DatanodeUtil.getUnlinkTmpFile(file));
try {
FileInputStream in = new FileInputStream(file);
try {
FileOutputStream out = new FileOutputStream(tmpFile);
try {
IOUtils.copyBytes(in, out, 16*1024);
} finally {
out.close();
}
} finally {
in.close();
}
if (file.length() != tmpFile.length()) {
throw new IOException("Copy of file " + file + " size " + file.length()+
" into file " + tmpFile +
" resulted in a size of " + tmpFile.length());
}
FileUtil.replaceFile(tmpFile, file);
} catch (IOException e) {
boolean done = tmpFile.delete();
if (!done) {
DataNode.LOG.info("detachFile failed to delete temporary file " +
tmpFile);
}
throw e;
}
}
/**
* Remove a hard link by copying the block to a temporary place and
* then moving it back
* @param numLinks number of hard links
* @return true if copy is successful;
* false if it is already detached or no need to be detached
* @throws IOException if there is any copy error
*/
public boolean unlinkBlock(int numLinks) throws IOException {
if (isUnlinked()) {
return false;
}
File file = getBlockFile();
if (file == null || getVolume() == null) {
throw new IOException("detachBlock:Block not found. " + this);
}
File meta = getMetaFile();
if (HardLink.getLinkCount(file) > numLinks) {
DataNode.LOG.info("CopyOnWrite for block " + this);
unlinkFile(file, this);
}
if (HardLink.getLinkCount(meta) > numLinks) {
unlinkFile(meta, this);
}
setUnlinked();
return true;
}
@Override //Object
public String toString() {
return getClass().getSimpleName()
+ ", " + super.toString()
+ ", " + getState()
+ "\n getNumBytes() = " + getNumBytes()
+ "\n getBytesOnDisk() = " + getBytesOnDisk()
+ "\n getVisibleLength()= " + getVisibleLength()
+ "\n getVolume() = " + getVolume()
+ "\n getBlockFile() = " + getBlockFile();
}
@Override
public boolean isOnTransientStorage() {
return volume.isTransientStorage();
}
}
| 9,414 | 28.888889 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockMetadataHeader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import java.io.BufferedInputStream;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.EOFException;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.DataChecksum;
import com.google.common.annotations.VisibleForTesting;
/**
* BlockMetadataHeader manages metadata for data blocks on Datanodes.
* This is not related to the Block related functionality in Namenode.
* The biggest part of data block metadata is CRC for the block.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class BlockMetadataHeader {
private static final Log LOG = LogFactory.getLog(BlockMetadataHeader.class);
public static final short VERSION = 1;
/**
* Header includes everything except the checksum(s) themselves.
* Version is two bytes. Following it is the DataChecksum
* that occupies 5 bytes.
*/
private final short version;
private DataChecksum checksum = null;
private static final HdfsConfiguration conf = new HdfsConfiguration();
@VisibleForTesting
public BlockMetadataHeader(short version, DataChecksum checksum) {
this.checksum = checksum;
this.version = version;
}
/** Get the version */
public short getVersion() {
return version;
}
/** Get the checksum */
public DataChecksum getChecksum() {
return checksum;
}
/**
* Read the checksum header from the meta file.
* @return the data checksum obtained from the header.
*/
public static DataChecksum readDataChecksum(File metaFile) throws IOException {
DataInputStream in = null;
try {
in = new DataInputStream(new BufferedInputStream(
new FileInputStream(metaFile), DFSUtil.getIoFileBufferSize(conf)));
return readDataChecksum(in, metaFile);
} finally {
IOUtils.closeStream(in);
}
}
/**
* Read the checksum header from the meta input stream.
* @return the data checksum obtained from the header.
*/
public static DataChecksum readDataChecksum(final DataInputStream metaIn,
final Object name) throws IOException {
// read and handle the common header here. For now just a version
final BlockMetadataHeader header = readHeader(metaIn);
if (header.getVersion() != VERSION) {
LOG.warn("Unexpected meta-file version for " + name
+ ": version in file is " + header.getVersion()
+ " but expected version is " + VERSION);
}
return header.getChecksum();
}
/**
* Read the header without changing the position of the FileChannel.
*
* @param fc The FileChannel to read.
* @return the Metadata Header.
* @throws IOException on error.
*/
public static BlockMetadataHeader preadHeader(FileChannel fc)
throws IOException {
final byte arr[] = new byte[getHeaderSize()];
ByteBuffer buf = ByteBuffer.wrap(arr);
while (buf.hasRemaining()) {
if (fc.read(buf, 0) <= 0) {
throw new EOFException("unexpected EOF while reading " +
"metadata file header");
}
}
short version = (short)((arr[0] << 8) | (arr[1] & 0xff));
DataChecksum dataChecksum = DataChecksum.newDataChecksum(arr, 2);
return new BlockMetadataHeader(version, dataChecksum);
}
/**
* This reads all the fields till the beginning of checksum.
* @return Metadata Header
* @throws IOException
*/
public static BlockMetadataHeader readHeader(DataInputStream in) throws IOException {
return readHeader(in.readShort(), in);
}
/**
* Reads header at the top of metadata file and returns the header.
*
* @return metadata header for the block
* @throws IOException
*/
public static BlockMetadataHeader readHeader(File file) throws IOException {
DataInputStream in = null;
try {
in = new DataInputStream(new BufferedInputStream(
new FileInputStream(file)));
return readHeader(in);
} finally {
IOUtils.closeStream(in);
}
}
/**
* Read the header at the beginning of the given block meta file.
* The current file position will be altered by this method.
* If an error occurs, the file is <em>not</em> closed.
*/
public static BlockMetadataHeader readHeader(RandomAccessFile raf) throws IOException {
byte[] buf = new byte[getHeaderSize()];
raf.seek(0);
raf.readFully(buf, 0, buf.length);
return readHeader(new DataInputStream(new ByteArrayInputStream(buf)));
}
// Version is already read.
private static BlockMetadataHeader readHeader(short version, DataInputStream in)
throws IOException {
DataChecksum checksum = DataChecksum.newDataChecksum(in);
return new BlockMetadataHeader(version, checksum);
}
/**
* This writes all the fields till the beginning of checksum.
* @param out DataOutputStream
* @throws IOException
*/
@VisibleForTesting
public static void writeHeader(DataOutputStream out,
BlockMetadataHeader header)
throws IOException {
out.writeShort(header.getVersion());
header.getChecksum().writeHeader(out);
}
/**
* Writes all the fields till the beginning of checksum.
* @throws IOException on error
*/
public static void writeHeader(DataOutputStream out, DataChecksum checksum)
throws IOException {
writeHeader(out, new BlockMetadataHeader(VERSION, checksum));
}
/**
* Returns the size of the header
*/
public static int getHeaderSize() {
return Short.SIZE/Byte.SIZE + DataChecksum.getChecksumHeaderSize();
}
}
| 6,979 | 31.924528 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaAlreadyExistsException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import java.io.IOException;
/**
* Exception indicating that the target block already exists
* and is not set to be recovered/overwritten.
*/
public class ReplicaAlreadyExistsException extends IOException {
private static final long serialVersionUID = 1L;
public ReplicaAlreadyExistsException() {
super();
}
public ReplicaAlreadyExistsException(String msg) {
super(msg);
}
}
| 1,259 | 31.307692 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.ERROR;
import static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.ERROR_ACCESS_TOKEN;
import static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.ERROR_INVALID;
import static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.ERROR_UNSUPPORTED;
import static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS;
import static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitFdResponse.USE_RECEIPT_VERIFICATION;
import static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitFdResponse.DO_NOT_USE_RECEIPT_VERIFICATION;
import static org.apache.hadoop.hdfs.server.datanode.DataNode.DN_CLIENTTRACE_FORMAT;
import static org.apache.hadoop.util.Time.monotonicNow;
import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.EOFException;
import java.io.FileDescriptor;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InterruptedIOException;
import java.io.OutputStream;
import java.net.InetSocketAddress;
import java.net.Socket;
import java.net.SocketException;
import java.net.SocketTimeoutException;
import java.nio.channels.ClosedChannelException;
import java.security.MessageDigest;
import java.util.Arrays;
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.logging.Log;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.ExtendedBlockId;
import org.apache.hadoop.hdfs.net.Peer;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil;
import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
import org.apache.hadoop.hdfs.protocol.datatransfer.Op;
import org.apache.hadoop.hdfs.protocol.datatransfer.Receiver;
import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory;
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.InvalidMagicNumberException;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
import org.apache.hadoop.hdfs.protocolPB.PBHelper;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.datanode.DataNode.ShortCircuitFdsUnsupportedException;
import org.apache.hadoop.hdfs.server.datanode.DataNode.ShortCircuitFdsVersionException;
import org.apache.hadoop.hdfs.server.datanode.ShortCircuitRegistry.NewShmInfo;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.SlotId;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.MD5Hash;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.net.unix.DomainSocket;
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.DataChecksum;
import com.google.common.base.Preconditions;
import com.google.protobuf.ByteString;
import org.apache.hadoop.util.Time;
/**
* Thread for processing incoming/outgoing data stream.
*/
class DataXceiver extends Receiver implements Runnable {
public static final Log LOG = DataNode.LOG;
static final Log ClientTraceLog = DataNode.ClientTraceLog;
private Peer peer;
private final String remoteAddress; // address of remote side
private final String remoteAddressWithoutPort; // only the address, no port
private final String localAddress; // local address of this daemon
private final DataNode datanode;
private final DNConf dnConf;
private final DataXceiverServer dataXceiverServer;
private final boolean connectToDnViaHostname;
private long opStartTime; //the start time of receiving an Op
private final InputStream socketIn;
private OutputStream socketOut;
private BlockReceiver blockReceiver = null;
private final int ioFileBufferSize;
private final int smallBufferSize;
/**
* Client Name used in previous operation. Not available on first request
* on the socket.
*/
private String previousOpClientName;
public static DataXceiver create(Peer peer, DataNode dn,
DataXceiverServer dataXceiverServer) throws IOException {
return new DataXceiver(peer, dn, dataXceiverServer);
}
private DataXceiver(Peer peer, DataNode datanode,
DataXceiverServer dataXceiverServer) throws IOException {
this.peer = peer;
this.dnConf = datanode.getDnConf();
this.socketIn = peer.getInputStream();
this.socketOut = peer.getOutputStream();
this.datanode = datanode;
this.dataXceiverServer = dataXceiverServer;
this.connectToDnViaHostname = datanode.getDnConf().connectToDnViaHostname;
this.ioFileBufferSize = DFSUtil.getIoFileBufferSize(datanode.getConf());
this.smallBufferSize = DFSUtil.getSmallBufferSize(datanode.getConf());
remoteAddress = peer.getRemoteAddressString();
final int colonIdx = remoteAddress.indexOf(':');
remoteAddressWithoutPort =
(colonIdx < 0) ? remoteAddress : remoteAddress.substring(0, colonIdx);
localAddress = peer.getLocalAddressString();
if (LOG.isDebugEnabled()) {
LOG.debug("Number of active connections is: "
+ datanode.getXceiverCount());
}
}
/**
* Update the current thread's name to contain the current status.
* Use this only after this receiver has started on its thread, i.e.,
* outside the constructor.
*/
private void updateCurrentThreadName(String status) {
StringBuilder sb = new StringBuilder();
sb.append("DataXceiver for client ");
if (previousOpClientName != null) {
sb.append(previousOpClientName).append(" at ");
}
sb.append(remoteAddress);
if (status != null) {
sb.append(" [").append(status).append("]");
}
Thread.currentThread().setName(sb.toString());
}
/** Return the datanode object. */
DataNode getDataNode() {return datanode;}
private OutputStream getOutputStream() {
return socketOut;
}
public void sendOOB() throws IOException, InterruptedException {
LOG.info("Sending OOB to peer: " + peer);
if(blockReceiver!=null)
blockReceiver.sendOOB();
}
/**
* Read/write data from/to the DataXceiverServer.
*/
@Override
public void run() {
int opsProcessed = 0;
Op op = null;
try {
dataXceiverServer.addPeer(peer, Thread.currentThread(), this);
peer.setWriteTimeout(datanode.getDnConf().socketWriteTimeout);
InputStream input = socketIn;
try {
IOStreamPair saslStreams = datanode.saslServer.receive(peer, socketOut,
socketIn, datanode.getXferAddress().getPort(),
datanode.getDatanodeId());
input = new BufferedInputStream(saslStreams.in,
smallBufferSize);
socketOut = saslStreams.out;
} catch (InvalidMagicNumberException imne) {
if (imne.isHandshake4Encryption()) {
LOG.info("Failed to read expected encryption handshake from client " +
"at " + peer.getRemoteAddressString() + ". Perhaps the client " +
"is running an older version of Hadoop which does not support " +
"encryption");
} else {
LOG.info("Failed to read expected SASL data transfer protection " +
"handshake from client at " + peer.getRemoteAddressString() +
". Perhaps the client is running an older version of Hadoop " +
"which does not support SASL data transfer protection");
}
return;
}
super.initialize(new DataInputStream(input));
// We process requests in a loop, and stay around for a short timeout.
// This optimistic behaviour allows the other end to reuse connections.
// Setting keepalive timeout to 0 disable this behavior.
do {
updateCurrentThreadName("Waiting for operation #" + (opsProcessed + 1));
try {
if (opsProcessed != 0) {
assert dnConf.socketKeepaliveTimeout > 0;
peer.setReadTimeout(dnConf.socketKeepaliveTimeout);
} else {
peer.setReadTimeout(dnConf.socketTimeout);
}
op = readOp();
} catch (InterruptedIOException ignored) {
// Time out while we wait for client rpc
break;
} catch (IOException err) {
// Since we optimistically expect the next op, it's quite normal to get EOF here.
if (opsProcessed > 0 &&
(err instanceof EOFException || err instanceof ClosedChannelException)) {
if (LOG.isDebugEnabled()) {
LOG.debug("Cached " + peer + " closing after " + opsProcessed + " ops");
}
} else {
incrDatanodeNetworkErrors();
throw err;
}
break;
}
// restore normal timeout
if (opsProcessed != 0) {
peer.setReadTimeout(dnConf.socketTimeout);
}
opStartTime = monotonicNow();
processOp(op);
++opsProcessed;
} while ((peer != null) &&
(!peer.isClosed() && dnConf.socketKeepaliveTimeout > 0));
} catch (Throwable t) {
String s = datanode.getDisplayName() + ":DataXceiver error processing "
+ ((op == null) ? "unknown" : op.name()) + " operation "
+ " src: " + remoteAddress + " dst: " + localAddress;
if (op == Op.WRITE_BLOCK && t instanceof ReplicaAlreadyExistsException) {
// For WRITE_BLOCK, it is okay if the replica already exists since
// client and replication may write the same block to the same datanode
// at the same time.
if (LOG.isTraceEnabled()) {
LOG.trace(s, t);
} else {
LOG.info(s + "; " + t);
}
} else if (op == Op.READ_BLOCK && t instanceof SocketTimeoutException) {
String s1 =
"Likely the client has stopped reading, disconnecting it";
s1 += " (" + s + ")";
if (LOG.isTraceEnabled()) {
LOG.trace(s1, t);
} else {
LOG.info(s1 + "; " + t);
}
} else {
LOG.error(s, t);
}
} finally {
if (LOG.isDebugEnabled()) {
LOG.debug(datanode.getDisplayName() + ":Number of active connections is: "
+ datanode.getXceiverCount());
}
updateCurrentThreadName("Cleaning up");
if (peer != null) {
dataXceiverServer.closePeer(peer);
IOUtils.closeStream(in);
}
}
}
@Override
public void requestShortCircuitFds(final ExtendedBlock blk,
final Token<BlockTokenIdentifier> token,
SlotId slotId, int maxVersion, boolean supportsReceiptVerification)
throws IOException {
updateCurrentThreadName("Passing file descriptors for block " + blk);
BlockOpResponseProto.Builder bld = BlockOpResponseProto.newBuilder();
FileInputStream fis[] = null;
SlotId registeredSlotId = null;
boolean success = false;
try {
try {
if (peer.getDomainSocket() == null) {
throw new IOException("You cannot pass file descriptors over " +
"anything but a UNIX domain socket.");
}
if (slotId != null) {
boolean isCached = datanode.data.
isCached(blk.getBlockPoolId(), blk.getBlockId());
datanode.shortCircuitRegistry.registerSlot(
ExtendedBlockId.fromExtendedBlock(blk), slotId, isCached);
registeredSlotId = slotId;
}
fis = datanode.requestShortCircuitFdsForRead(blk, token, maxVersion);
Preconditions.checkState(fis != null);
bld.setStatus(SUCCESS);
bld.setShortCircuitAccessVersion(DataNode.CURRENT_BLOCK_FORMAT_VERSION);
} catch (ShortCircuitFdsVersionException e) {
bld.setStatus(ERROR_UNSUPPORTED);
bld.setShortCircuitAccessVersion(DataNode.CURRENT_BLOCK_FORMAT_VERSION);
bld.setMessage(e.getMessage());
} catch (ShortCircuitFdsUnsupportedException e) {
bld.setStatus(ERROR_UNSUPPORTED);
bld.setMessage(e.getMessage());
} catch (InvalidToken e) {
bld.setStatus(ERROR_ACCESS_TOKEN);
bld.setMessage(e.getMessage());
} catch (IOException e) {
bld.setStatus(ERROR);
bld.setMessage(e.getMessage());
}
bld.build().writeDelimitedTo(socketOut);
if (fis != null) {
FileDescriptor fds[] = new FileDescriptor[fis.length];
for (int i = 0; i < fds.length; i++) {
fds[i] = fis[i].getFD();
}
byte buf[] = new byte[1];
if (supportsReceiptVerification) {
buf[0] = (byte)USE_RECEIPT_VERIFICATION.getNumber();
} else {
buf[0] = (byte)DO_NOT_USE_RECEIPT_VERIFICATION.getNumber();
}
DomainSocket sock = peer.getDomainSocket();
sock.sendFileDescriptors(fds, buf, 0, buf.length);
if (supportsReceiptVerification) {
LOG.trace("Reading receipt verification byte for " + slotId);
int val = sock.getInputStream().read();
if (val < 0) {
throw new EOFException();
}
} else {
LOG.trace("Receipt verification is not enabled on the DataNode. " +
"Not verifying " + slotId);
}
success = true;
}
} finally {
if ((!success) && (registeredSlotId != null)) {
LOG.info("Unregistering " + registeredSlotId + " because the " +
"requestShortCircuitFdsForRead operation failed.");
datanode.shortCircuitRegistry.unregisterSlot(registeredSlotId);
}
if (ClientTraceLog.isInfoEnabled()) {
DatanodeRegistration dnR = datanode.getDNRegistrationForBP(blk
.getBlockPoolId());
BlockSender.ClientTraceLog.info(String.format(
"src: 127.0.0.1, dest: 127.0.0.1, op: REQUEST_SHORT_CIRCUIT_FDS," +
" blockid: %s, srvID: %s, success: %b",
blk.getBlockId(), dnR.getDatanodeUuid(), success));
}
if (fis != null) {
IOUtils.cleanup(LOG, fis);
}
}
}
@Override
public void releaseShortCircuitFds(SlotId slotId) throws IOException {
boolean success = false;
try {
String error;
Status status;
try {
datanode.shortCircuitRegistry.unregisterSlot(slotId);
error = null;
status = Status.SUCCESS;
} catch (UnsupportedOperationException e) {
error = "unsupported operation";
status = Status.ERROR_UNSUPPORTED;
} catch (Throwable e) {
error = e.getMessage();
status = Status.ERROR_INVALID;
}
ReleaseShortCircuitAccessResponseProto.Builder bld =
ReleaseShortCircuitAccessResponseProto.newBuilder();
bld.setStatus(status);
if (error != null) {
bld.setError(error);
}
bld.build().writeDelimitedTo(socketOut);
success = true;
} finally {
if (ClientTraceLog.isInfoEnabled()) {
BlockSender.ClientTraceLog.info(String.format(
"src: 127.0.0.1, dest: 127.0.0.1, op: RELEASE_SHORT_CIRCUIT_FDS," +
" shmId: %016x%016x, slotIdx: %d, srvID: %s, success: %b",
slotId.getShmId().getHi(), slotId.getShmId().getLo(),
slotId.getSlotIdx(), datanode.getDatanodeUuid(), success));
}
}
}
private void sendShmErrorResponse(Status status, String error)
throws IOException {
ShortCircuitShmResponseProto.newBuilder().setStatus(status).
setError(error).build().writeDelimitedTo(socketOut);
}
private void sendShmSuccessResponse(DomainSocket sock, NewShmInfo shmInfo)
throws IOException {
DataNodeFaultInjector.get().sendShortCircuitShmResponse();
ShortCircuitShmResponseProto.newBuilder().setStatus(SUCCESS).
setId(PBHelper.convert(shmInfo.shmId)).build().
writeDelimitedTo(socketOut);
// Send the file descriptor for the shared memory segment.
byte buf[] = new byte[] { (byte)0 };
FileDescriptor shmFdArray[] =
new FileDescriptor[] { shmInfo.stream.getFD() };
sock.sendFileDescriptors(shmFdArray, buf, 0, buf.length);
}
@Override
public void requestShortCircuitShm(String clientName) throws IOException {
NewShmInfo shmInfo = null;
boolean success = false;
DomainSocket sock = peer.getDomainSocket();
try {
if (sock == null) {
sendShmErrorResponse(ERROR_INVALID, "Bad request from " +
peer + ": must request a shared " +
"memory segment over a UNIX domain socket.");
return;
}
try {
shmInfo = datanode.shortCircuitRegistry.
createNewMemorySegment(clientName, sock);
// After calling #{ShortCircuitRegistry#createNewMemorySegment}, the
// socket is managed by the DomainSocketWatcher, not the DataXceiver.
releaseSocket();
} catch (UnsupportedOperationException e) {
sendShmErrorResponse(ERROR_UNSUPPORTED,
"This datanode has not been configured to support " +
"short-circuit shared memory segments.");
return;
} catch (IOException e) {
sendShmErrorResponse(ERROR,
"Failed to create shared file descriptor: " + e.getMessage());
return;
}
sendShmSuccessResponse(sock, shmInfo);
success = true;
} finally {
if (ClientTraceLog.isInfoEnabled()) {
if (success) {
BlockSender.ClientTraceLog.info(String.format(
"cliID: %s, src: 127.0.0.1, dest: 127.0.0.1, " +
"op: REQUEST_SHORT_CIRCUIT_SHM," +
" shmId: %016x%016x, srvID: %s, success: true",
clientName, shmInfo.shmId.getHi(), shmInfo.shmId.getLo(),
datanode.getDatanodeUuid()));
} else {
BlockSender.ClientTraceLog.info(String.format(
"cliID: %s, src: 127.0.0.1, dest: 127.0.0.1, " +
"op: REQUEST_SHORT_CIRCUIT_SHM, " +
"shmId: n/a, srvID: %s, success: false",
clientName, datanode.getDatanodeUuid()));
}
}
if ((!success) && (peer == null)) {
// The socket is now managed by the DomainSocketWatcher. However,
// we failed to pass it to the client. We call shutdown() on the
// UNIX domain socket now. This will trigger the DomainSocketWatcher
// callback. The callback will close the domain socket.
// We don't want to close the socket here, since that might lead to
// bad behavior inside the poll() call. See HADOOP-11802 for details.
try {
LOG.warn("Failed to send success response back to the client. " +
"Shutting down socket for " + shmInfo.shmId + ".");
sock.shutdown();
} catch (IOException e) {
LOG.warn("Failed to shut down socket in error handler", e);
}
}
IOUtils.cleanup(null, shmInfo);
}
}
void releaseSocket() {
dataXceiverServer.releasePeer(peer);
peer = null;
}
@Override
public void readBlock(final ExtendedBlock block,
final Token<BlockTokenIdentifier> blockToken,
final String clientName,
final long blockOffset,
final long length,
final boolean sendChecksum,
final CachingStrategy cachingStrategy) throws IOException {
previousOpClientName = clientName;
long read = 0;
OutputStream baseStream = getOutputStream();
DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
baseStream, smallBufferSize));
checkAccess(out, true, block, blockToken,
Op.READ_BLOCK, BlockTokenIdentifier.AccessMode.READ);
// send the block
BlockSender blockSender = null;
DatanodeRegistration dnR =
datanode.getDNRegistrationForBP(block.getBlockPoolId());
final String clientTraceFmt =
clientName.length() > 0 && ClientTraceLog.isInfoEnabled()
? String.format(DN_CLIENTTRACE_FORMAT, localAddress, remoteAddress,
"%d", "HDFS_READ", clientName, "%d",
dnR.getDatanodeUuid(), block, "%d")
: dnR + " Served block " + block + " to " +
remoteAddress;
updateCurrentThreadName("Sending block " + block);
try {
try {
blockSender = new BlockSender(block, blockOffset, length,
true, false, sendChecksum, datanode, clientTraceFmt,
cachingStrategy);
} catch(IOException e) {
String msg = "opReadBlock " + block + " received exception " + e;
LOG.info(msg);
sendResponse(ERROR, msg);
throw e;
}
// send op status
writeSuccessWithChecksumInfo(blockSender, new DataOutputStream(getOutputStream()));
long beginRead = Time.monotonicNow();
read = blockSender.sendBlock(out, baseStream, null); // send data
long duration = Time.monotonicNow() - beginRead;
if (blockSender.didSendEntireByteRange()) {
// If we sent the entire range, then we should expect the client
// to respond with a Status enum.
try {
ClientReadStatusProto stat = ClientReadStatusProto.parseFrom(
PBHelper.vintPrefixed(in));
if (!stat.hasStatus()) {
LOG.warn("Client " + peer.getRemoteAddressString() +
" did not send a valid status code after reading. " +
"Will close connection.");
IOUtils.closeStream(out);
}
} catch (IOException ioe) {
LOG.debug("Error reading client status response. Will close connection.", ioe);
IOUtils.closeStream(out);
incrDatanodeNetworkErrors();
}
} else {
IOUtils.closeStream(out);
}
datanode.metrics.incrBytesRead((int) read);
datanode.metrics.incrBlocksRead();
datanode.metrics.incrTotalReadTime(duration);
} catch ( SocketException ignored ) {
if (LOG.isTraceEnabled()) {
LOG.trace(dnR + ":Ignoring exception while serving " + block + " to " +
remoteAddress, ignored);
}
// Its ok for remote side to close the connection anytime.
datanode.metrics.incrBlocksRead();
IOUtils.closeStream(out);
} catch ( IOException ioe ) {
/* What exactly should we do here?
* Earlier version shutdown() datanode if there is disk error.
*/
if (!(ioe instanceof SocketTimeoutException)) {
LOG.warn(dnR + ":Got exception while serving " + block + " to "
+ remoteAddress, ioe);
incrDatanodeNetworkErrors();
}
throw ioe;
} finally {
IOUtils.closeStream(blockSender);
}
//update metrics
datanode.metrics.addReadBlockOp(elapsed());
datanode.metrics.incrReadsFromClient(peer.isLocal(), read);
}
@Override
public void writeBlock(final ExtendedBlock block,
final StorageType storageType,
final Token<BlockTokenIdentifier> blockToken,
final String clientname,
final DatanodeInfo[] targets,
final StorageType[] targetStorageTypes,
final DatanodeInfo srcDataNode,
final BlockConstructionStage stage,
final int pipelineSize,
final long minBytesRcvd,
final long maxBytesRcvd,
final long latestGenerationStamp,
DataChecksum requestedChecksum,
CachingStrategy cachingStrategy,
boolean allowLazyPersist,
final boolean pinning,
final boolean[] targetPinnings) throws IOException {
previousOpClientName = clientname;
updateCurrentThreadName("Receiving block " + block);
final boolean isDatanode = clientname.length() == 0;
final boolean isClient = !isDatanode;
final boolean isTransfer = stage == BlockConstructionStage.TRANSFER_RBW
|| stage == BlockConstructionStage.TRANSFER_FINALIZED;
allowLazyPersist = allowLazyPersist &&
(dnConf.getAllowNonLocalLazyPersist() || peer.isLocal());
long size = 0;
// check single target for transfer-RBW/Finalized
if (isTransfer && targets.length > 0) {
throw new IOException(stage + " does not support multiple targets "
+ Arrays.asList(targets));
}
if (LOG.isDebugEnabled()) {
LOG.debug("opWriteBlock: stage=" + stage + ", clientname=" + clientname
+ "\n block =" + block + ", newGs=" + latestGenerationStamp
+ ", bytesRcvd=[" + minBytesRcvd + ", " + maxBytesRcvd + "]"
+ "\n targets=" + Arrays.asList(targets)
+ "; pipelineSize=" + pipelineSize + ", srcDataNode=" + srcDataNode
+ ", pinning=" + pinning);
LOG.debug("isDatanode=" + isDatanode
+ ", isClient=" + isClient
+ ", isTransfer=" + isTransfer);
LOG.debug("writeBlock receive buf size " + peer.getReceiveBufferSize() +
" tcp no delay " + peer.getTcpNoDelay());
}
// We later mutate block's generation stamp and length, but we need to
// forward the original version of the block to downstream mirrors, so
// make a copy here.
final ExtendedBlock originalBlock = new ExtendedBlock(block);
if (block.getNumBytes() == 0) {
block.setNumBytes(dataXceiverServer.estimateBlockSize);
}
LOG.info("Receiving " + block + " src: " + remoteAddress + " dest: "
+ localAddress);
// reply to upstream datanode or client
final DataOutputStream replyOut = getBufferedOutputStream();
checkAccess(replyOut, isClient, block, blockToken,
Op.WRITE_BLOCK, BlockTokenIdentifier.AccessMode.WRITE);
DataOutputStream mirrorOut = null; // stream to next target
DataInputStream mirrorIn = null; // reply from next target
Socket mirrorSock = null; // socket to next target
String mirrorNode = null; // the name:port of next target
String firstBadLink = ""; // first datanode that failed in connection setup
Status mirrorInStatus = SUCCESS;
final String storageUuid;
try {
if (isDatanode ||
stage != BlockConstructionStage.PIPELINE_CLOSE_RECOVERY) {
// open a block receiver
blockReceiver = getBlockReceiver(block, storageType, in,
peer.getRemoteAddressString(),
peer.getLocalAddressString(),
stage, latestGenerationStamp, minBytesRcvd, maxBytesRcvd,
clientname, srcDataNode, datanode, requestedChecksum,
cachingStrategy, allowLazyPersist, pinning);
storageUuid = blockReceiver.getStorageUuid();
} else {
storageUuid = datanode.data.recoverClose(
block, latestGenerationStamp, minBytesRcvd);
}
//
// Connect to downstream machine, if appropriate
//
if (targets.length > 0) {
InetSocketAddress mirrorTarget = null;
// Connect to backup machine
mirrorNode = targets[0].getXferAddr(connectToDnViaHostname);
if (LOG.isDebugEnabled()) {
LOG.debug("Connecting to datanode " + mirrorNode);
}
mirrorTarget = NetUtils.createSocketAddr(mirrorNode);
mirrorSock = datanode.newSocket();
try {
int timeoutValue = dnConf.socketTimeout
+ (HdfsServerConstants.READ_TIMEOUT_EXTENSION * targets.length);
int writeTimeout = dnConf.socketWriteTimeout +
(HdfsServerConstants.WRITE_TIMEOUT_EXTENSION * targets.length);
NetUtils.connect(mirrorSock, mirrorTarget, timeoutValue);
mirrorSock.setSoTimeout(timeoutValue);
mirrorSock.setSendBufferSize(HdfsConstants.DEFAULT_DATA_SOCKET_SIZE);
OutputStream unbufMirrorOut = NetUtils.getOutputStream(mirrorSock,
writeTimeout);
InputStream unbufMirrorIn = NetUtils.getInputStream(mirrorSock);
DataEncryptionKeyFactory keyFactory =
datanode.getDataEncryptionKeyFactoryForBlock(block);
IOStreamPair saslStreams = datanode.saslClient.socketSend(mirrorSock,
unbufMirrorOut, unbufMirrorIn, keyFactory, blockToken, targets[0]);
unbufMirrorOut = saslStreams.out;
unbufMirrorIn = saslStreams.in;
mirrorOut = new DataOutputStream(new BufferedOutputStream(unbufMirrorOut,
smallBufferSize));
mirrorIn = new DataInputStream(unbufMirrorIn);
if (targetPinnings != null && targetPinnings.length > 0) {
new Sender(mirrorOut).writeBlock(originalBlock, targetStorageTypes[0],
blockToken, clientname, targets, targetStorageTypes, srcDataNode,
stage, pipelineSize, minBytesRcvd, maxBytesRcvd,
latestGenerationStamp, requestedChecksum, cachingStrategy,
allowLazyPersist, targetPinnings[0], targetPinnings);
} else {
new Sender(mirrorOut).writeBlock(originalBlock, targetStorageTypes[0],
blockToken, clientname, targets, targetStorageTypes, srcDataNode,
stage, pipelineSize, minBytesRcvd, maxBytesRcvd,
latestGenerationStamp, requestedChecksum, cachingStrategy,
allowLazyPersist, false, targetPinnings);
}
mirrorOut.flush();
DataNodeFaultInjector.get().writeBlockAfterFlush();
// read connect ack (only for clients, not for replication req)
if (isClient) {
BlockOpResponseProto connectAck =
BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(mirrorIn));
mirrorInStatus = connectAck.getStatus();
firstBadLink = connectAck.getFirstBadLink();
if (LOG.isDebugEnabled() || mirrorInStatus != SUCCESS) {
LOG.info("Datanode " + targets.length +
" got response for connect ack " +
" from downstream datanode with firstbadlink as " +
firstBadLink);
}
}
} catch (IOException e) {
if (isClient) {
BlockOpResponseProto.newBuilder()
.setStatus(ERROR)
// NB: Unconditionally using the xfer addr w/o hostname
.setFirstBadLink(targets[0].getXferAddr())
.build()
.writeDelimitedTo(replyOut);
replyOut.flush();
}
IOUtils.closeStream(mirrorOut);
mirrorOut = null;
IOUtils.closeStream(mirrorIn);
mirrorIn = null;
IOUtils.closeSocket(mirrorSock);
mirrorSock = null;
if (isClient) {
LOG.error(datanode + ":Exception transfering block " +
block + " to mirror " + mirrorNode + ": " + e);
throw e;
} else {
LOG.info(datanode + ":Exception transfering " +
block + " to mirror " + mirrorNode +
"- continuing without the mirror", e);
incrDatanodeNetworkErrors();
}
}
}
// send connect-ack to source for clients and not transfer-RBW/Finalized
if (isClient && !isTransfer) {
if (LOG.isDebugEnabled() || mirrorInStatus != SUCCESS) {
LOG.info("Datanode " + targets.length +
" forwarding connect ack to upstream firstbadlink is " +
firstBadLink);
}
BlockOpResponseProto.newBuilder()
.setStatus(mirrorInStatus)
.setFirstBadLink(firstBadLink)
.build()
.writeDelimitedTo(replyOut);
replyOut.flush();
}
// receive the block and mirror to the next target
if (blockReceiver != null) {
String mirrorAddr = (mirrorSock == null) ? null : mirrorNode;
blockReceiver.receiveBlock(mirrorOut, mirrorIn, replyOut,
mirrorAddr, null, targets, false);
// send close-ack for transfer-RBW/Finalized
if (isTransfer) {
if (LOG.isTraceEnabled()) {
LOG.trace("TRANSFER: send close-ack");
}
writeResponse(SUCCESS, null, replyOut);
}
}
// update its generation stamp
if (isClient &&
stage == BlockConstructionStage.PIPELINE_CLOSE_RECOVERY) {
block.setGenerationStamp(latestGenerationStamp);
block.setNumBytes(minBytesRcvd);
}
// if this write is for a replication request or recovering
// a failed close for client, then confirm block. For other client-writes,
// the block is finalized in the PacketResponder.
if (isDatanode ||
stage == BlockConstructionStage.PIPELINE_CLOSE_RECOVERY) {
datanode.closeBlock(block, DataNode.EMPTY_DEL_HINT, storageUuid);
LOG.info("Received " + block + " src: " + remoteAddress + " dest: "
+ localAddress + " of size " + block.getNumBytes());
}
if(isClient) {
size = block.getNumBytes();
}
} catch (IOException ioe) {
LOG.info("opWriteBlock " + block + " received exception " + ioe);
incrDatanodeNetworkErrors();
throw ioe;
} finally {
// close all opened streams
IOUtils.closeStream(mirrorOut);
IOUtils.closeStream(mirrorIn);
IOUtils.closeStream(replyOut);
IOUtils.closeSocket(mirrorSock);
IOUtils.closeStream(blockReceiver);
blockReceiver = null;
}
//update metrics
datanode.getMetrics().addWriteBlockOp(elapsed());
datanode.getMetrics().incrWritesFromClient(peer.isLocal(), size);
}
@Override
public void transferBlock(final ExtendedBlock blk,
final Token<BlockTokenIdentifier> blockToken,
final String clientName,
final DatanodeInfo[] targets,
final StorageType[] targetStorageTypes) throws IOException {
checkAccess(socketOut, true, blk, blockToken,
Op.TRANSFER_BLOCK, BlockTokenIdentifier.AccessMode.COPY);
previousOpClientName = clientName;
updateCurrentThreadName(Op.TRANSFER_BLOCK + " " + blk);
final DataOutputStream out = new DataOutputStream(
getOutputStream());
try {
datanode.transferReplicaForPipelineRecovery(blk, targets,
targetStorageTypes, clientName);
writeResponse(Status.SUCCESS, null, out);
} catch (IOException ioe) {
LOG.info("transferBlock " + blk + " received exception " + ioe);
incrDatanodeNetworkErrors();
throw ioe;
} finally {
IOUtils.closeStream(out);
}
}
private MD5Hash calcPartialBlockChecksum(ExtendedBlock block,
long requestLength, DataChecksum checksum, DataInputStream checksumIn)
throws IOException {
final int bytesPerCRC = checksum.getBytesPerChecksum();
final int csize = checksum.getChecksumSize();
final byte[] buffer = new byte[4*1024];
MessageDigest digester = MD5Hash.getDigester();
long remaining = requestLength / bytesPerCRC * csize;
for (int toDigest = 0; remaining > 0; remaining -= toDigest) {
toDigest = checksumIn.read(buffer, 0,
(int) Math.min(remaining, buffer.length));
if (toDigest < 0) {
break;
}
digester.update(buffer, 0, toDigest);
}
int partialLength = (int) (requestLength % bytesPerCRC);
if (partialLength > 0) {
byte[] buf = new byte[partialLength];
final InputStream blockIn = datanode.data.getBlockInputStream(block,
requestLength - partialLength);
try {
// Get the CRC of the partialLength.
IOUtils.readFully(blockIn, buf, 0, partialLength);
} finally {
IOUtils.closeStream(blockIn);
}
checksum.update(buf, 0, partialLength);
byte[] partialCrc = new byte[csize];
checksum.writeValue(partialCrc, 0, true);
digester.update(partialCrc);
}
return new MD5Hash(digester.digest());
}
@Override
public void blockChecksum(final ExtendedBlock block,
final Token<BlockTokenIdentifier> blockToken) throws IOException {
final DataOutputStream out = new DataOutputStream(
getOutputStream());
checkAccess(out, true, block, blockToken,
Op.BLOCK_CHECKSUM, BlockTokenIdentifier.AccessMode.READ);
// client side now can specify a range of the block for checksum
long requestLength = block.getNumBytes();
Preconditions.checkArgument(requestLength >= 0);
long visibleLength = datanode.data.getReplicaVisibleLength(block);
boolean partialBlk = requestLength < visibleLength;
updateCurrentThreadName("Reading metadata for block " + block);
final LengthInputStream metadataIn = datanode.data
.getMetaDataInputStream(block);
final DataInputStream checksumIn = new DataInputStream(
new BufferedInputStream(metadataIn, ioFileBufferSize));
updateCurrentThreadName("Getting checksum for block " + block);
try {
//read metadata file
final BlockMetadataHeader header = BlockMetadataHeader
.readHeader(checksumIn);
final DataChecksum checksum = header.getChecksum();
final int csize = checksum.getChecksumSize();
final int bytesPerCRC = checksum.getBytesPerChecksum();
final long crcPerBlock = csize <= 0 ? 0 :
(metadataIn.getLength() - BlockMetadataHeader.getHeaderSize()) / csize;
final MD5Hash md5 = partialBlk && crcPerBlock > 0 ?
calcPartialBlockChecksum(block, requestLength, checksum, checksumIn)
: MD5Hash.digest(checksumIn);
if (LOG.isDebugEnabled()) {
LOG.debug("block=" + block + ", bytesPerCRC=" + bytesPerCRC
+ ", crcPerBlock=" + crcPerBlock + ", md5=" + md5);
}
//write reply
BlockOpResponseProto.newBuilder()
.setStatus(SUCCESS)
.setChecksumResponse(OpBlockChecksumResponseProto.newBuilder()
.setBytesPerCrc(bytesPerCRC)
.setCrcPerBlock(crcPerBlock)
.setMd5(ByteString.copyFrom(md5.getDigest()))
.setCrcType(PBHelper.convert(checksum.getChecksumType())))
.build()
.writeDelimitedTo(out);
out.flush();
} catch (IOException ioe) {
LOG.info("blockChecksum " + block + " received exception " + ioe);
incrDatanodeNetworkErrors();
throw ioe;
} finally {
IOUtils.closeStream(out);
IOUtils.closeStream(checksumIn);
IOUtils.closeStream(metadataIn);
}
//update metrics
datanode.metrics.addBlockChecksumOp(elapsed());
}
@Override
public void copyBlock(final ExtendedBlock block,
final Token<BlockTokenIdentifier> blockToken) throws IOException {
updateCurrentThreadName("Copying block " + block);
// Read in the header
if (datanode.isBlockTokenEnabled) {
try {
datanode.blockPoolTokenSecretManager.checkAccess(blockToken, null, block,
BlockTokenIdentifier.AccessMode.COPY);
} catch (InvalidToken e) {
LOG.warn("Invalid access token in request from " + remoteAddress
+ " for OP_COPY_BLOCK for block " + block + " : "
+ e.getLocalizedMessage());
sendResponse(ERROR_ACCESS_TOKEN, "Invalid access token");
return;
}
}
if (datanode.data.getPinning(block)) {
String msg = "Not able to copy block " + block.getBlockId() + " " +
"to " + peer.getRemoteAddressString() + " because it's pinned ";
LOG.info(msg);
sendResponse(ERROR, msg);
}
if (!dataXceiverServer.balanceThrottler.acquire()) { // not able to start
String msg = "Not able to copy block " + block.getBlockId() + " " +
"to " + peer.getRemoteAddressString() + " because threads " +
"quota is exceeded.";
LOG.info(msg);
sendResponse(ERROR, msg);
return;
}
BlockSender blockSender = null;
DataOutputStream reply = null;
boolean isOpSuccess = true;
try {
// check if the block exists or not
blockSender = new BlockSender(block, 0, -1, false, false, true, datanode,
null, CachingStrategy.newDropBehind());
// set up response stream
OutputStream baseStream = getOutputStream();
reply = new DataOutputStream(new BufferedOutputStream(
baseStream, smallBufferSize));
// send status first
writeSuccessWithChecksumInfo(blockSender, reply);
long beginRead = Time.monotonicNow();
// send block content to the target
long read = blockSender.sendBlock(reply, baseStream,
dataXceiverServer.balanceThrottler);
long duration = Time.monotonicNow() - beginRead;
datanode.metrics.incrBytesRead((int) read);
datanode.metrics.incrBlocksRead();
datanode.metrics.incrTotalReadTime(duration);
LOG.info("Copied " + block + " to " + peer.getRemoteAddressString());
} catch (IOException ioe) {
isOpSuccess = false;
LOG.info("opCopyBlock " + block + " received exception " + ioe);
incrDatanodeNetworkErrors();
throw ioe;
} finally {
dataXceiverServer.balanceThrottler.release();
if (isOpSuccess) {
try {
// send one last byte to indicate that the resource is cleaned.
reply.writeChar('d');
} catch (IOException ignored) {
}
}
IOUtils.closeStream(reply);
IOUtils.closeStream(blockSender);
}
//update metrics
datanode.metrics.addCopyBlockOp(elapsed());
}
@Override
public void replaceBlock(final ExtendedBlock block,
final StorageType storageType,
final Token<BlockTokenIdentifier> blockToken,
final String delHint,
final DatanodeInfo proxySource) throws IOException {
updateCurrentThreadName("Replacing block " + block + " from " + delHint);
/* read header */
if (datanode.isBlockTokenEnabled) {
try {
datanode.blockPoolTokenSecretManager.checkAccess(blockToken, null, block,
BlockTokenIdentifier.AccessMode.REPLACE);
} catch (InvalidToken e) {
LOG.warn("Invalid access token in request from " + remoteAddress
+ " for OP_REPLACE_BLOCK for block " + block + " : "
+ e.getLocalizedMessage());
sendResponse(ERROR_ACCESS_TOKEN, "Invalid access token");
return;
}
}
if (!dataXceiverServer.balanceThrottler.acquire()) { // not able to start
String msg = "Not able to receive block " + block.getBlockId() +
" from " + peer.getRemoteAddressString() + " because threads " +
"quota is exceeded.";
LOG.warn(msg);
sendResponse(ERROR, msg);
return;
}
Socket proxySock = null;
DataOutputStream proxyOut = null;
Status opStatus = SUCCESS;
String errMsg = null;
BlockReceiver blockReceiver = null;
DataInputStream proxyReply = null;
DataOutputStream replyOut = new DataOutputStream(getOutputStream());
boolean IoeDuringCopyBlockOperation = false;
try {
// Move the block to different storage in the same datanode
if (proxySource.equals(datanode.getDatanodeId())) {
ReplicaInfo oldReplica = datanode.data.moveBlockAcrossStorage(block,
storageType);
if (oldReplica != null) {
LOG.info("Moved " + block + " from StorageType "
+ oldReplica.getVolume().getStorageType() + " to " + storageType);
}
} else {
block.setNumBytes(dataXceiverServer.estimateBlockSize);
// get the output stream to the proxy
final String dnAddr = proxySource.getXferAddr(connectToDnViaHostname);
if (LOG.isDebugEnabled()) {
LOG.debug("Connecting to datanode " + dnAddr);
}
InetSocketAddress proxyAddr = NetUtils.createSocketAddr(dnAddr);
proxySock = datanode.newSocket();
NetUtils.connect(proxySock, proxyAddr, dnConf.socketTimeout);
proxySock.setSoTimeout(dnConf.socketTimeout);
OutputStream unbufProxyOut = NetUtils.getOutputStream(proxySock,
dnConf.socketWriteTimeout);
InputStream unbufProxyIn = NetUtils.getInputStream(proxySock);
DataEncryptionKeyFactory keyFactory =
datanode.getDataEncryptionKeyFactoryForBlock(block);
IOStreamPair saslStreams = datanode.saslClient.socketSend(proxySock,
unbufProxyOut, unbufProxyIn, keyFactory, blockToken, proxySource);
unbufProxyOut = saslStreams.out;
unbufProxyIn = saslStreams.in;
proxyOut = new DataOutputStream(new BufferedOutputStream(unbufProxyOut,
smallBufferSize));
proxyReply = new DataInputStream(new BufferedInputStream(unbufProxyIn,
ioFileBufferSize));
/* send request to the proxy */
IoeDuringCopyBlockOperation = true;
new Sender(proxyOut).copyBlock(block, blockToken);
IoeDuringCopyBlockOperation = false;
// receive the response from the proxy
BlockOpResponseProto copyResponse = BlockOpResponseProto.parseFrom(
PBHelper.vintPrefixed(proxyReply));
String logInfo = "copy block " + block + " from "
+ proxySock.getRemoteSocketAddress();
DataTransferProtoUtil.checkBlockOpStatus(copyResponse, logInfo);
// get checksum info about the block we're copying
ReadOpChecksumInfoProto checksumInfo = copyResponse.getReadOpChecksumInfo();
DataChecksum remoteChecksum = DataTransferProtoUtil.fromProto(
checksumInfo.getChecksum());
// open a block receiver and check if the block does not exist
blockReceiver = getBlockReceiver(block, storageType,
proxyReply, proxySock.getRemoteSocketAddress().toString(),
proxySock.getLocalSocketAddress().toString(),
null, 0, 0, 0, "", null, datanode, remoteChecksum,
CachingStrategy.newDropBehind(), false, false);
// receive a block
blockReceiver.receiveBlock(null, null, replyOut, null,
dataXceiverServer.balanceThrottler, null, true);
// notify name node
datanode.notifyNamenodeReceivedBlock(
block, delHint, blockReceiver.getStorageUuid());
LOG.info("Moved " + block + " from " + peer.getRemoteAddressString()
+ ", delHint=" + delHint);
}
} catch (IOException ioe) {
opStatus = ERROR;
errMsg = "opReplaceBlock " + block + " received exception " + ioe;
LOG.info(errMsg);
if (!IoeDuringCopyBlockOperation) {
// Don't double count IO errors
incrDatanodeNetworkErrors();
}
throw ioe;
} finally {
// receive the last byte that indicates the proxy released its thread resource
if (opStatus == SUCCESS && proxyReply != null) {
try {
proxyReply.readChar();
} catch (IOException ignored) {
}
}
// now release the thread resource
dataXceiverServer.balanceThrottler.release();
// send response back
try {
sendResponse(opStatus, errMsg);
} catch (IOException ioe) {
LOG.warn("Error writing reply back to " + peer.getRemoteAddressString());
incrDatanodeNetworkErrors();
}
IOUtils.closeStream(proxyOut);
IOUtils.closeStream(blockReceiver);
IOUtils.closeStream(proxyReply);
IOUtils.closeStream(replyOut);
}
//update metrics
datanode.metrics.addReplaceBlockOp(elapsed());
}
/**
* Separated for testing.
*/
@VisibleForTesting
BlockReceiver getBlockReceiver(
final ExtendedBlock block, final StorageType storageType,
final DataInputStream in,
final String inAddr, final String myAddr,
final BlockConstructionStage stage,
final long newGs, final long minBytesRcvd, final long maxBytesRcvd,
final String clientname, final DatanodeInfo srcDataNode,
final DataNode dn, DataChecksum requestedChecksum,
CachingStrategy cachingStrategy,
final boolean allowLazyPersist,
final boolean pinning) throws IOException {
return new BlockReceiver(block, storageType, in,
inAddr, myAddr, stage, newGs, minBytesRcvd, maxBytesRcvd,
clientname, srcDataNode, dn, requestedChecksum,
cachingStrategy, allowLazyPersist, pinning);
}
/**
* Separated for testing.
* @return
*/
@VisibleForTesting
DataOutputStream getBufferedOutputStream() {
return new DataOutputStream(
new BufferedOutputStream(getOutputStream(), smallBufferSize));
}
private long elapsed() {
return monotonicNow() - opStartTime;
}
/**
* Utility function for sending a response.
*
* @param status status message to write
* @param message message to send to the client or other DN
*/
private void sendResponse(Status status,
String message) throws IOException {
writeResponse(status, message, getOutputStream());
}
private static void writeResponse(Status status, String message, OutputStream out)
throws IOException {
BlockOpResponseProto.Builder response = BlockOpResponseProto.newBuilder()
.setStatus(status);
if (message != null) {
response.setMessage(message);
}
response.build().writeDelimitedTo(out);
out.flush();
}
private void writeSuccessWithChecksumInfo(BlockSender blockSender,
DataOutputStream out) throws IOException {
ReadOpChecksumInfoProto ckInfo = ReadOpChecksumInfoProto.newBuilder()
.setChecksum(DataTransferProtoUtil.toProto(blockSender.getChecksum()))
.setChunkOffset(blockSender.getOffset())
.build();
BlockOpResponseProto response = BlockOpResponseProto.newBuilder()
.setStatus(SUCCESS)
.setReadOpChecksumInfo(ckInfo)
.build();
response.writeDelimitedTo(out);
out.flush();
}
private void incrDatanodeNetworkErrors() {
datanode.incrDatanodeNetworkErrors(remoteAddressWithoutPort);
}
private void checkAccess(OutputStream out, final boolean reply,
final ExtendedBlock blk,
final Token<BlockTokenIdentifier> t,
final Op op,
final BlockTokenIdentifier.AccessMode mode) throws IOException {
if (datanode.isBlockTokenEnabled) {
if (LOG.isDebugEnabled()) {
LOG.debug("Checking block access token for block '" + blk.getBlockId()
+ "' with mode '" + mode + "'");
}
try {
datanode.blockPoolTokenSecretManager.checkAccess(t, null, blk, mode);
} catch(InvalidToken e) {
try {
if (reply) {
BlockOpResponseProto.Builder resp = BlockOpResponseProto.newBuilder()
.setStatus(ERROR_ACCESS_TOKEN);
if (mode == BlockTokenIdentifier.AccessMode.WRITE) {
DatanodeRegistration dnR =
datanode.getDNRegistrationForBP(blk.getBlockPoolId());
// NB: Unconditionally using the xfer addr w/o hostname
resp.setFirstBadLink(dnR.getXferAddr());
}
resp.build().writeDelimitedTo(out);
out.flush();
}
LOG.warn("Block token verification failed: op=" + op
+ ", remoteAddress=" + remoteAddress
+ ", message=" + e.getLocalizedMessage());
throw e;
} finally {
IOUtils.closeStream(out);
}
}
}
}
}
| 53,066 | 38.810203 | 126 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaWaitingToBeRecovered.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import java.io.File;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
/**
* This class represents a replica that is waiting to be recovered.
* After a datanode restart, any replica in "rbw" directory is loaded
* as a replica waiting to be recovered.
* A replica waiting to be recovered does not provision read nor
* participates in any pipeline recovery. It will become outdated if its
* client continues to write or be recovered as a result of
* lease recovery.
*/
public class ReplicaWaitingToBeRecovered extends ReplicaInfo {
private boolean unlinked; // copy-on-write done for block
/**
* Constructor
* @param blockId block id
* @param len replica length
* @param genStamp replica generation stamp
* @param vol volume where replica is located
* @param dir directory path where block and meta files are located
*/
public ReplicaWaitingToBeRecovered(long blockId, long len, long genStamp,
FsVolumeSpi vol, File dir) {
super(blockId, len, genStamp, vol, dir);
}
/**
* Constructor
* @param block a block
* @param vol volume where replica is located
* @param dir directory path where block and meta files are located
*/
public ReplicaWaitingToBeRecovered(Block block, FsVolumeSpi vol, File dir) {
super(block, vol, dir);
}
/**
* Copy constructor.
* @param from where to copy from
*/
public ReplicaWaitingToBeRecovered(ReplicaWaitingToBeRecovered from) {
super(from);
this.unlinked = from.isUnlinked();
}
@Override //ReplicaInfo
public ReplicaState getState() {
return ReplicaState.RWR;
}
@Override //ReplicaInfo
public boolean isUnlinked() {
return unlinked;
}
@Override //ReplicaInfo
public void setUnlinked() {
unlinked = true;
}
@Override //ReplicaInfo
public long getVisibleLength() {
return -1; //no bytes are visible
}
@Override
public long getBytesOnDisk() {
return getNumBytes();
}
@Override // Object
public boolean equals(Object o) {
return super.equals(o);
}
@Override // Object
public int hashCode() {
return super.hashCode();
}
@Override
public String toString() {
return super.toString()
+ "\n unlinked=" + unlinked;
}
}
| 3,251 | 28.297297 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipelineInterface.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import java.io.IOException;
import java.io.OutputStream;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams;
import org.apache.hadoop.util.DataChecksum;
/**
* This defines the interface of a replica in Pipeline that's being written to
*/
public interface ReplicaInPipelineInterface extends Replica {
/**
* Set the number of bytes received
* @param bytesReceived number of bytes received
*/
void setNumBytes(long bytesReceived);
/**
* Get the number of bytes acked
* @return the number of bytes acked
*/
long getBytesAcked();
/**
* Set the number bytes that have acked
* @param bytesAcked number bytes acked
*/
void setBytesAcked(long bytesAcked);
/**
* Release any disk space reserved for this replica.
*/
public void releaseAllBytesReserved();
/**
* store the checksum for the last chunk along with the data length
* @param dataLength number of bytes on disk
* @param lastChecksum - checksum bytes for the last chunk
*/
public void setLastChecksumAndDataLen(long dataLength, byte[] lastChecksum);
/**
* gets the last chunk checksum and the length of the block corresponding
* to that checksum
*/
public ChunkChecksum getLastChecksumAndDataLen();
/**
* Create output streams for writing to this replica,
* one for block file and one for CRC file
*
* @param isCreate if it is for creation
* @param requestedChecksum the checksum the writer would prefer to use
* @return output streams for writing
* @throws IOException if any error occurs
*/
public ReplicaOutputStreams createStreams(boolean isCreate,
DataChecksum requestedChecksum) throws IOException;
/**
* Create an output stream to write restart metadata in case of datanode
* shutting down for quick restart.
*
* @return output stream for writing.
* @throws IOException if any error occurs
*/
public OutputStream createRestartMetaStream() throws IOException;
}
| 2,856 | 31.83908 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaBeingWritten.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import java.io.File;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
/** This class represents replicas being written.
* Those are the replicas that
* are created in a pipeline initiated by a dfs client.
*/
public class ReplicaBeingWritten extends ReplicaInPipeline {
/**
* Constructor for a zero length replica
* @param blockId block id
* @param genStamp replica generation stamp
* @param vol volume where replica is located
* @param dir directory path where block and meta files are located
* @param bytesToReserve disk space to reserve for this replica, based on
* the estimated maximum block length.
*/
public ReplicaBeingWritten(long blockId, long genStamp,
FsVolumeSpi vol, File dir, long bytesToReserve) {
super(blockId, genStamp, vol, dir, bytesToReserve);
}
/**
* Constructor
* @param block a block
* @param vol volume where replica is located
* @param dir directory path where block and meta files are located
* @param writer a thread that is writing to this replica
*/
public ReplicaBeingWritten(Block block,
FsVolumeSpi vol, File dir, Thread writer) {
super( block, vol, dir, writer);
}
/**
* Constructor
* @param blockId block id
* @param len replica length
* @param genStamp replica generation stamp
* @param vol volume where replica is located
* @param dir directory path where block and meta files are located
* @param writer a thread that is writing to this replica
* @param bytesToReserve disk space to reserve for this replica, based on
* the estimated maximum block length.
*/
public ReplicaBeingWritten(long blockId, long len, long genStamp,
FsVolumeSpi vol, File dir, Thread writer, long bytesToReserve) {
super(blockId, len, genStamp, vol, dir, writer, bytesToReserve);
}
/**
* Copy constructor.
* @param from where to copy from
*/
public ReplicaBeingWritten(ReplicaBeingWritten from) {
super(from);
}
@Override
public long getVisibleLength() {
return getBytesAcked(); // all acked bytes are visible
}
@Override //ReplicaInfo
public ReplicaState getState() {
return ReplicaState.RBW;
}
@Override // Object
public boolean equals(Object o) {
return super.equals(o);
}
@Override // Object
public int hashCode() {
return super.hashCode();
}
}
| 3,419 | 32.861386 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StorageLocation.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import java.util.regex.Pattern;
import java.io.File;
import java.io.IOException;
import java.net.URI;
import java.util.regex.Matcher;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.util.StringUtils;
/**
* Encapsulates the URI and storage medium that together describe a
* storage directory.
* The default storage medium is assumed to be DISK, if none is specified.
*
*/
@InterfaceAudience.Private
public class StorageLocation {
final StorageType storageType;
final File file;
/** Regular expression that describes a storage uri with a storage type.
* e.g. [Disk]/storages/storage1/
*/
private static final Pattern regex = Pattern.compile("^\\[(\\w*)\\](.+)$");
private StorageLocation(StorageType storageType, URI uri) {
this.storageType = storageType;
if (uri.getScheme() == null ||
"file".equalsIgnoreCase(uri.getScheme())) {
// drop any (illegal) authority in the URI for backwards compatibility
this.file = new File(uri.getPath());
} else {
throw new IllegalArgumentException("Unsupported URI schema in " + uri);
}
}
public StorageType getStorageType() {
return this.storageType;
}
URI getUri() {
return file.toURI();
}
public File getFile() {
return this.file;
}
/**
* Attempt to parse a storage uri with storage class and URI. The storage
* class component of the uri is case-insensitive.
*
* @param rawLocation Location string of the format [type]uri, where [type] is
* optional.
* @return A StorageLocation object if successfully parsed, null otherwise.
* Does not throw any exceptions.
*/
public static StorageLocation parse(String rawLocation)
throws IOException, SecurityException {
Matcher matcher = regex.matcher(rawLocation);
StorageType storageType = StorageType.DEFAULT;
String location = rawLocation;
if (matcher.matches()) {
String classString = matcher.group(1);
location = matcher.group(2);
if (!classString.isEmpty()) {
storageType =
StorageType.valueOf(StringUtils.toUpperCase(classString));
}
}
return new StorageLocation(storageType, new Path(location).toUri());
}
@Override
public String toString() {
return "[" + storageType + "]" + file.toURI();
}
}
| 3,290 | 30.342857 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream;
/** Provide utility methods for Datanode. */
@InterfaceAudience.Private
public class DatanodeUtil {
public static final String UNLINK_BLOCK_SUFFIX = ".unlinked";
public static final String DISK_ERROR = "Possible disk error: ";
private static final String SEP = System.getProperty("file.separator");
/** Get the cause of an I/O exception if caused by a possible disk error
* @param ioe an I/O exception
* @return cause if the I/O exception is caused by a possible disk error;
* null otherwise.
*/
static IOException getCauseIfDiskError(IOException ioe) {
if (ioe.getMessage()!=null && ioe.getMessage().startsWith(DISK_ERROR)) {
return (IOException)ioe.getCause();
} else {
return null;
}
}
/**
* Create a new file.
* @throws IOException
* if the file already exists or if the file cannot be created.
*/
public static File createTmpFile(Block b, File f) throws IOException {
if (f.exists()) {
throw new IOException("Failed to create temporary file for " + b
+ ". File " + f + " should not be present, but is.");
}
// Create the zero-length temp file
final boolean fileCreated;
try {
fileCreated = f.createNewFile();
} catch (IOException ioe) {
throw new IOException(DISK_ERROR + "Failed to create " + f, ioe);
}
if (!fileCreated) {
throw new IOException("Failed to create temporary file for " + b
+ ". File " + f + " should be creatable, but is already present.");
}
return f;
}
/**
* @return the meta name given the block name and generation stamp.
*/
public static String getMetaName(String blockName, long generationStamp) {
return blockName + "_" + generationStamp + Block.METADATA_EXTENSION;
}
/** @return the unlink file. */
public static File getUnlinkTmpFile(File f) {
return new File(f.getParentFile(), f.getName()+UNLINK_BLOCK_SUFFIX);
}
/**
* Checks whether there are any files anywhere in the directory tree rooted
* at dir (directories don't count as files). dir must exist
* @return true if there are no files
* @throws IOException if unable to list subdirectories
*/
public static boolean dirNoFilesRecursive(File dir) throws IOException {
File[] contents = dir.listFiles();
if (contents == null) {
throw new IOException("Cannot list contents of " + dir);
}
for (File f : contents) {
if (!f.isDirectory() || (f.isDirectory() && !dirNoFilesRecursive(f))) {
return false;
}
}
return true;
}
/**
* Get the directory where a finalized block with this ID should be stored.
* Do not attempt to create the directory.
* @param root the root directory where finalized blocks are stored
* @param blockId
* @return
*/
public static File idToBlockDir(File root, long blockId) {
int d1 = (int)((blockId >> 16) & 0xff);
int d2 = (int)((blockId >> 8) & 0xff);
String path = DataStorage.BLOCK_SUBDIR_PREFIX + d1 + SEP +
DataStorage.BLOCK_SUBDIR_PREFIX + d2;
return new File(root, path);
}
/**
* @return the FileInputStream for the meta data of the given block.
* @throws FileNotFoundException
* if the file not found.
* @throws ClassCastException
* if the underlying input stream is not a FileInputStream.
*/
public static FileInputStream getMetaDataInputStream(
ExtendedBlock b, FsDatasetSpi<?> data) throws IOException {
final LengthInputStream lin = data.getMetaDataInputStream(b);
if (lin == null) {
throw new FileNotFoundException("Meta file for " + b + " not found.");
}
return (FileInputStream)lin.getWrappedStream();
}
}
| 4,998 | 34.964029 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ChunkChecksum.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
/**
* holder class that holds checksum bytes and the length in a block at which
* the checksum bytes end
*
* ex: length = 1023 and checksum is 4 bytes which is for 512 bytes, then
* the checksum applies for the last chunk, or bytes 512 - 1023
*/
public class ChunkChecksum {
private final long dataLength;
// can be null if not available
private final byte[] checksum;
public ChunkChecksum(long dataLength, byte[] checksum) {
this.dataLength = dataLength;
this.checksum = checksum;
}
public long getDataLength() {
return dataLength;
}
public byte[] getChecksum() {
return checksum;
}
}
| 1,491 | 31.434783 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeLayoutVersion.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import java.util.HashMap;
import java.util.Map;
import java.util.SortedSet;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.protocol.LayoutVersion.FeatureInfo;
import org.apache.hadoop.hdfs.protocol.LayoutVersion.LayoutFeature;
@InterfaceAudience.Private
public class DataNodeLayoutVersion {
/** Build layout version and corresponding feature matrix */
public final static Map<Integer, SortedSet<LayoutFeature>> FEATURES =
new HashMap<Integer, SortedSet<LayoutFeature>>();
public static final int CURRENT_LAYOUT_VERSION
= LayoutVersion.getCurrentLayoutVersion(Feature.values());
static{
LayoutVersion.updateMap(FEATURES, LayoutVersion.Feature.values());
LayoutVersion.updateMap(FEATURES, DataNodeLayoutVersion.Feature.values());
}
public static SortedSet<LayoutFeature> getFeatures(int lv) {
return FEATURES.get(lv);
}
public static boolean supports(final LayoutFeature f, final int lv) {
return LayoutVersion.supports(FEATURES, f, lv);
}
/**
* Enums for features that change the layout version.
* <br><br>
* To add a new layout version:
* <ul>
* <li>Define a new enum constant with a short enum name, the new layout version
* and description of the added feature.</li>
* <li>When adding a layout version with an ancestor that is not same as
* its immediate predecessor, use the constructor where a specific ancestor
* can be passed.
* </li>
* </ul>
*/
public static enum Feature implements LayoutFeature {
FIRST_LAYOUT(-55, -53, "First datanode layout", false),
BLOCKID_BASED_LAYOUT(-56,
"The block ID of a finalized block uniquely determines its position " +
"in the directory structure");
private final FeatureInfo info;
/**
* DataNodeFeature that is added at layout version {@code lv} - 1.
* @param lv new layout version with the addition of this feature
* @param description description of the feature
*/
Feature(final int lv, final String description) {
this(lv, lv + 1, description, false);
}
/**
* DataNode feature that is added at layout version {@code ancestoryLV}.
* @param lv new layout version with the addition of this feature
* @param ancestorLV layout version from which the new lv is derived from.
* @param description description of the feature
* @param reserved true when this is a layout version reserved for previous
* version
* @param features set of features that are to be enabled for this version
*/
Feature(final int lv, final int ancestorLV, final String description,
boolean reserved, Feature... features) {
info = new FeatureInfo(lv, ancestorLV, description, reserved, features);
}
@Override
public FeatureInfo getInfo() {
return info;
}
}
}
| 3,796 | 36.594059 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/Replica.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
/**
* This represents block replicas which are stored in DataNode.
*/
@InterfaceAudience.Private
public interface Replica {
/** Get the block ID */
public long getBlockId();
/** Get the generation stamp */
public long getGenerationStamp();
/**
* Get the replica state
* @return the replica state
*/
public ReplicaState getState();
/**
* Get the number of bytes received
* @return the number of bytes that have been received
*/
public long getNumBytes();
/**
* Get the number of bytes that have written to disk
* @return the number of bytes that have written to disk
*/
public long getBytesOnDisk();
/**
* Get the number of bytes that are visible to readers
* @return the number of bytes that are visible to readers
*/
public long getVisibleLength();
/**
* Return the storageUuid of the volume that stores this replica.
*/
public String getStorageUuid();
/**
* Return true if the target volume is backed by RAM.
*/
public boolean isOnTransientStorage();
}
| 2,042 | 29.044118 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import java.io.File;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
/**
* This class represents replicas that are under block recovery
* It has a recovery id that is equal to the generation stamp
* that the replica will be bumped to after recovery
* The recovery id is used to handle multiple concurrent block recoveries.
* A recovery with higher recovery id preempts recoveries with a lower id.
*
*/
public class ReplicaUnderRecovery extends ReplicaInfo {
private ReplicaInfo original; // the original replica that needs to be recovered
private long recoveryId; // recovery id; it is also the generation stamp
// that the replica will be bumped to after recovery
public ReplicaUnderRecovery(ReplicaInfo replica, long recoveryId) {
super(replica, replica.getVolume(), replica.getDir());
if ( replica.getState() != ReplicaState.FINALIZED &&
replica.getState() != ReplicaState.RBW &&
replica.getState() != ReplicaState.RWR ) {
throw new IllegalArgumentException("Cannot recover replica: " + replica);
}
this.original = replica;
this.recoveryId = recoveryId;
}
/**
* Copy constructor.
* @param from where to copy from
*/
public ReplicaUnderRecovery(ReplicaUnderRecovery from) {
super(from);
this.original = from.getOriginalReplica();
this.recoveryId = from.getRecoveryID();
}
/**
* Get the recovery id
* @return the generation stamp that the replica will be bumped to
*/
public long getRecoveryID() {
return recoveryId;
}
/**
* Set the recovery id
* @param recoveryId the new recoveryId
*/
public void setRecoveryID(long recoveryId) {
if (recoveryId > this.recoveryId) {
this.recoveryId = recoveryId;
} else {
throw new IllegalArgumentException("The new rcovery id: " + recoveryId
+ " must be greater than the current one: " + this.recoveryId);
}
}
/**
* Get the original replica that's under recovery
* @return the original replica under recovery
*/
public ReplicaInfo getOriginalReplica() {
return original;
}
@Override //ReplicaInfo
public boolean isUnlinked() {
return original.isUnlinked();
}
@Override //ReplicaInfo
public void setUnlinked() {
original.setUnlinked();
}
@Override //ReplicaInfo
public ReplicaState getState() {
return ReplicaState.RUR;
}
@Override
public long getVisibleLength() {
return original.getVisibleLength();
}
@Override
public long getBytesOnDisk() {
return original.getBytesOnDisk();
}
@Override //org.apache.hadoop.hdfs.protocol.Block
public void setBlockId(long blockId) {
super.setBlockId(blockId);
original.setBlockId(blockId);
}
@Override //org.apache.hadoop.hdfs.protocol.Block
public void setGenerationStamp(long gs) {
super.setGenerationStamp(gs);
original.setGenerationStamp(gs);
}
@Override //org.apache.hadoop.hdfs.protocol.Block
public void setNumBytes(long numBytes) {
super.setNumBytes(numBytes);
original.setNumBytes(numBytes);
}
@Override //ReplicaInfo
public void setDir(File dir) {
super.setDir(dir);
original.setDir(dir);
}
@Override //ReplicaInfo
void setVolume(FsVolumeSpi vol) {
super.setVolume(vol);
original.setVolume(vol);
}
@Override // Object
public boolean equals(Object o) {
return super.equals(o);
}
@Override // Object
public int hashCode() {
return super.hashCode();
}
@Override
public String toString() {
return super.toString()
+ "\n recoveryId=" + recoveryId
+ "\n original=" + original;
}
public ReplicaRecoveryInfo createInfo() {
return new ReplicaRecoveryInfo(original.getBlockId(),
original.getBytesOnDisk(), original.getGenerationStamp(),
original.getState());
}
}
| 4,894 | 28.311377 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.security.PrivilegedExceptionAction;
import java.util.*;
import java.util.concurrent.CopyOnWriteArrayList;
import org.apache.commons.logging.Log;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.security.UserGroupInformation;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
/**
* Manages the BPOfferService objects for the data node.
* Creation, removal, starting, stopping, shutdown on BPOfferService
* objects must be done via APIs in this class.
*/
@InterfaceAudience.Private
class BlockPoolManager {
private static final Log LOG = DataNode.LOG;
private final Map<String, BPOfferService> bpByNameserviceId =
Maps.newHashMap();
private final Map<String, BPOfferService> bpByBlockPoolId =
Maps.newHashMap();
private final List<BPOfferService> offerServices =
new CopyOnWriteArrayList<>();
private final DataNode dn;
//This lock is used only to ensure exclusion of refreshNamenodes
private final Object refreshNamenodesLock = new Object();
BlockPoolManager(DataNode dn) {
this.dn = dn;
}
synchronized void addBlockPool(BPOfferService bpos) {
Preconditions.checkArgument(offerServices.contains(bpos),
"Unknown BPOS: %s", bpos);
if (bpos.getBlockPoolId() == null) {
throw new IllegalArgumentException("Null blockpool id");
}
bpByBlockPoolId.put(bpos.getBlockPoolId(), bpos);
}
/**
* Returns a list of BPOfferService objects. The underlying list
* implementation is a CopyOnWriteArrayList so it can be safely
* iterated while BPOfferServices are being added or removed.
*
* Caution: The BPOfferService returned could be shutdown any time.
*/
synchronized List<BPOfferService> getAllNamenodeThreads() {
return Collections.unmodifiableList(offerServices);
}
synchronized BPOfferService get(String bpid) {
return bpByBlockPoolId.get(bpid);
}
synchronized void remove(BPOfferService t) {
offerServices.remove(t);
if (t.hasBlockPoolId()) {
// It's possible that the block pool never successfully registered
// with any NN, so it was never added it to this map
bpByBlockPoolId.remove(t.getBlockPoolId());
}
boolean removed = false;
for (Iterator<BPOfferService> it = bpByNameserviceId.values().iterator();
it.hasNext() && !removed;) {
BPOfferService bpos = it.next();
if (bpos == t) {
it.remove();
LOG.info("Removed " + bpos);
removed = true;
}
}
if (!removed) {
LOG.warn("Couldn't remove BPOS " + t + " from bpByNameserviceId map");
}
}
void shutDownAll(List<BPOfferService> bposList) throws InterruptedException {
for (BPOfferService bpos : bposList) {
bpos.stop(); //interrupts the threads
}
//now join
for (BPOfferService bpos : bposList) {
bpos.join();
}
}
synchronized void startAll() throws IOException {
try {
UserGroupInformation.getLoginUser().doAs(
new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
for (BPOfferService bpos : offerServices) {
bpos.start();
}
return null;
}
});
} catch (InterruptedException ex) {
IOException ioe = new IOException();
ioe.initCause(ex.getCause());
throw ioe;
}
}
void joinAll() {
for (BPOfferService bpos: this.getAllNamenodeThreads()) {
bpos.join();
}
}
void refreshNamenodes(Configuration conf)
throws IOException {
LOG.info("Refresh request received for nameservices: " + conf.get
(DFSConfigKeys.DFS_NAMESERVICES));
Map<String, Map<String, InetSocketAddress>> newAddressMap = DFSUtil
.getNNServiceRpcAddressesForCluster(conf);
synchronized (refreshNamenodesLock) {
doRefreshNamenodes(newAddressMap);
}
}
private void doRefreshNamenodes(
Map<String, Map<String, InetSocketAddress>> addrMap) throws IOException {
assert Thread.holdsLock(refreshNamenodesLock);
Set<String> toRefresh = Sets.newLinkedHashSet();
Set<String> toAdd = Sets.newLinkedHashSet();
Set<String> toRemove;
synchronized (this) {
// Step 1. For each of the new nameservices, figure out whether
// it's an update of the set of NNs for an existing NS,
// or an entirely new nameservice.
for (String nameserviceId : addrMap.keySet()) {
if (bpByNameserviceId.containsKey(nameserviceId)) {
toRefresh.add(nameserviceId);
} else {
toAdd.add(nameserviceId);
}
}
// Step 2. Any nameservices we currently have but are no longer present
// need to be removed.
toRemove = Sets.newHashSet(Sets.difference(
bpByNameserviceId.keySet(), addrMap.keySet()));
assert toRefresh.size() + toAdd.size() ==
addrMap.size() :
"toAdd: " + Joiner.on(",").useForNull("<default>").join(toAdd) +
" toRemove: " + Joiner.on(",").useForNull("<default>").join(toRemove) +
" toRefresh: " + Joiner.on(",").useForNull("<default>").join(toRefresh);
// Step 3. Start new nameservices
if (!toAdd.isEmpty()) {
LOG.info("Starting BPOfferServices for nameservices: " +
Joiner.on(",").useForNull("<default>").join(toAdd));
for (String nsToAdd : toAdd) {
ArrayList<InetSocketAddress> addrs =
Lists.newArrayList(addrMap.get(nsToAdd).values());
BPOfferService bpos = createBPOS(addrs);
bpByNameserviceId.put(nsToAdd, bpos);
offerServices.add(bpos);
}
}
startAll();
}
// Step 4. Shut down old nameservices. This happens outside
// of the synchronized(this) lock since they need to call
// back to .remove() from another thread
if (!toRemove.isEmpty()) {
LOG.info("Stopping BPOfferServices for nameservices: " +
Joiner.on(",").useForNull("<default>").join(toRemove));
for (String nsToRemove : toRemove) {
BPOfferService bpos = bpByNameserviceId.get(nsToRemove);
bpos.stop();
bpos.join();
// they will call remove on their own
}
}
// Step 5. Update nameservices whose NN list has changed
if (!toRefresh.isEmpty()) {
LOG.info("Refreshing list of NNs for nameservices: " +
Joiner.on(",").useForNull("<default>").join(toRefresh));
for (String nsToRefresh : toRefresh) {
BPOfferService bpos = bpByNameserviceId.get(nsToRefresh);
ArrayList<InetSocketAddress> addrs =
Lists.newArrayList(addrMap.get(nsToRefresh).values());
bpos.refreshNNList(addrs);
}
}
}
/**
* Extracted out for test purposes.
*/
protected BPOfferService createBPOS(List<InetSocketAddress> nnAddrs) {
return new BPOfferService(nnAddrs, dn);
}
}
| 8,207 | 32.639344 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import org.apache.hadoop.classification.InterfaceAudience;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCKREPORT_SPLIT_THRESHOLD_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCKREPORT_SPLIT_THRESHOLD_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CACHEREPORT_INTERVAL_MSEC_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CACHEREPORT_INTERVAL_MSEC_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_NON_LOCAL_LAZY_PERSIST;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_NON_LOCAL_LAZY_PERSIST_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SYNCONCLOSE_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SYNCONCLOSE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_TRANSFERTO_ALLOWED_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_TRANSFERTO_ALLOWED_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATA_ENCRYPTION_ALGORITHM_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_RESTART_REPLICA_EXPIRY_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_RESTART_REPLICA_EXPIRY_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.IGNORE_SECURE_PORTS_FOR_TESTING_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.IGNORE_SECURE_PORTS_FOR_TESTING_DEFAULT;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver;
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataTransferSaslUtil;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.security.SaslPropertiesResolver;
/**
* Simple class encapsulating all of the configuration that the DataNode
* loads at startup time.
*/
@InterfaceAudience.Private
public class DNConf {
final Configuration conf;
final int socketTimeout;
final int socketWriteTimeout;
final int socketKeepaliveTimeout;
final boolean transferToAllowed;
final boolean dropCacheBehindWrites;
final boolean syncBehindWrites;
final boolean syncBehindWritesInBackground;
final boolean dropCacheBehindReads;
final boolean syncOnClose;
final boolean encryptDataTransfer;
final boolean connectToDnViaHostname;
final long readaheadLength;
final long heartBeatInterval;
final long blockReportInterval;
final long blockReportSplitThreshold;
final long initialBlockReportDelayMs;
final long cacheReportInterval;
final long dfsclientSlowIoWarningThresholdMs;
final long datanodeSlowIoWarningThresholdMs;
final int writePacketSize;
final String minimumNameNodeVersion;
final String encryptionAlgorithm;
final SaslPropertiesResolver saslPropsResolver;
final TrustedChannelResolver trustedChannelResolver;
private final boolean ignoreSecurePortsForTesting;
final long xceiverStopTimeout;
final long restartReplicaExpiry;
final long maxLockedMemory;
// Allow LAZY_PERSIST writes from non-local clients?
private final boolean allowNonLocalLazyPersist;
public DNConf(Configuration conf) {
this.conf = conf;
socketTimeout = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY,
HdfsServerConstants.READ_TIMEOUT);
socketWriteTimeout = conf.getInt(DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY,
HdfsServerConstants.WRITE_TIMEOUT);
socketKeepaliveTimeout = conf.getInt(
DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY,
DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_DEFAULT);
/* Based on results on different platforms, we might need set the default
* to false on some of them. */
transferToAllowed = conf.getBoolean(
DFS_DATANODE_TRANSFERTO_ALLOWED_KEY,
DFS_DATANODE_TRANSFERTO_ALLOWED_DEFAULT);
writePacketSize = conf.getInt(DFS_CLIENT_WRITE_PACKET_SIZE_KEY,
DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT);
readaheadLength = conf.getLong(
DFSConfigKeys.DFS_DATANODE_READAHEAD_BYTES_KEY,
DFSConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT);
dropCacheBehindWrites = conf.getBoolean(
DFSConfigKeys.DFS_DATANODE_DROP_CACHE_BEHIND_WRITES_KEY,
DFSConfigKeys.DFS_DATANODE_DROP_CACHE_BEHIND_WRITES_DEFAULT);
syncBehindWrites = conf.getBoolean(
DFSConfigKeys.DFS_DATANODE_SYNC_BEHIND_WRITES_KEY,
DFSConfigKeys.DFS_DATANODE_SYNC_BEHIND_WRITES_DEFAULT);
syncBehindWritesInBackground = conf.getBoolean(
DFSConfigKeys.DFS_DATANODE_SYNC_BEHIND_WRITES_IN_BACKGROUND_KEY,
DFSConfigKeys.DFS_DATANODE_SYNC_BEHIND_WRITES_IN_BACKGROUND_DEFAULT);
dropCacheBehindReads = conf.getBoolean(
DFSConfigKeys.DFS_DATANODE_DROP_CACHE_BEHIND_READS_KEY,
DFSConfigKeys.DFS_DATANODE_DROP_CACHE_BEHIND_READS_DEFAULT);
connectToDnViaHostname = conf.getBoolean(
DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME,
DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME_DEFAULT);
this.blockReportInterval = conf.getLong(DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,
DFS_BLOCKREPORT_INTERVAL_MSEC_DEFAULT);
this.blockReportSplitThreshold = conf.getLong(DFS_BLOCKREPORT_SPLIT_THRESHOLD_KEY,
DFS_BLOCKREPORT_SPLIT_THRESHOLD_DEFAULT);
this.cacheReportInterval = conf.getLong(DFS_CACHEREPORT_INTERVAL_MSEC_KEY,
DFS_CACHEREPORT_INTERVAL_MSEC_DEFAULT);
this.dfsclientSlowIoWarningThresholdMs = conf.getLong(
DFSConfigKeys.DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_KEY,
DFSConfigKeys.DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_DEFAULT);
this.datanodeSlowIoWarningThresholdMs = conf.getLong(
DFSConfigKeys.DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_KEY,
DFSConfigKeys.DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_DEFAULT);
long initBRDelay = conf.getLong(
DFS_BLOCKREPORT_INITIAL_DELAY_KEY,
DFS_BLOCKREPORT_INITIAL_DELAY_DEFAULT) * 1000L;
if (initBRDelay >= blockReportInterval) {
initBRDelay = 0;
DataNode.LOG.info("dfs.blockreport.initialDelay is "
+ "greater than or equal to" + "dfs.blockreport.intervalMsec."
+ " Setting initial delay to 0 msec:");
}
initialBlockReportDelayMs = initBRDelay;
heartBeatInterval = conf.getLong(DFS_HEARTBEAT_INTERVAL_KEY,
DFS_HEARTBEAT_INTERVAL_DEFAULT) * 1000L;
// do we need to sync block file contents to disk when blockfile is closed?
this.syncOnClose = conf.getBoolean(DFS_DATANODE_SYNCONCLOSE_KEY,
DFS_DATANODE_SYNCONCLOSE_DEFAULT);
this.minimumNameNodeVersion = conf.get(DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_KEY,
DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_DEFAULT);
this.encryptDataTransfer = conf.getBoolean(DFS_ENCRYPT_DATA_TRANSFER_KEY,
DFS_ENCRYPT_DATA_TRANSFER_DEFAULT);
this.encryptionAlgorithm = conf.get(DFS_DATA_ENCRYPTION_ALGORITHM_KEY);
this.trustedChannelResolver = TrustedChannelResolver.getInstance(conf);
this.saslPropsResolver = DataTransferSaslUtil.getSaslPropertiesResolver(
conf);
this.ignoreSecurePortsForTesting = conf.getBoolean(
IGNORE_SECURE_PORTS_FOR_TESTING_KEY,
IGNORE_SECURE_PORTS_FOR_TESTING_DEFAULT);
this.xceiverStopTimeout = conf.getLong(
DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_KEY,
DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
this.maxLockedMemory = conf.getLong(
DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
DFS_DATANODE_MAX_LOCKED_MEMORY_DEFAULT);
this.restartReplicaExpiry = conf.getLong(
DFS_DATANODE_RESTART_REPLICA_EXPIRY_KEY,
DFS_DATANODE_RESTART_REPLICA_EXPIRY_DEFAULT) * 1000L;
this.allowNonLocalLazyPersist = conf.getBoolean(
DFS_DATANODE_NON_LOCAL_LAZY_PERSIST,
DFS_DATANODE_NON_LOCAL_LAZY_PERSIST_DEFAULT);
}
// We get minimumNameNodeVersion via a method so it can be mocked out in tests.
String getMinimumNameNodeVersion() {
return this.minimumNameNodeVersion;
}
/**
* Returns the configuration.
*
* @return Configuration the configuration
*/
public Configuration getConf() {
return conf;
}
/**
* Returns true if encryption enabled for DataTransferProtocol.
*
* @return boolean true if encryption enabled for DataTransferProtocol
*/
public boolean getEncryptDataTransfer() {
return encryptDataTransfer;
}
/**
* Returns encryption algorithm configured for DataTransferProtocol, or null
* if not configured.
*
* @return encryption algorithm configured for DataTransferProtocol
*/
public String getEncryptionAlgorithm() {
return encryptionAlgorithm;
}
public long getXceiverStopTimeout() {
return xceiverStopTimeout;
}
public long getMaxLockedMemory() {
return maxLockedMemory;
}
/**
* Returns the SaslPropertiesResolver configured for use with
* DataTransferProtocol, or null if not configured.
*
* @return SaslPropertiesResolver configured for use with DataTransferProtocol
*/
public SaslPropertiesResolver getSaslPropsResolver() {
return saslPropsResolver;
}
/**
* Returns the TrustedChannelResolver configured for use with
* DataTransferProtocol, or null if not configured.
*
* @return TrustedChannelResolver configured for use with DataTransferProtocol
*/
public TrustedChannelResolver getTrustedChannelResolver() {
return trustedChannelResolver;
}
/**
* Returns true if configuration is set to skip checking for proper
* port configuration in a secured cluster. This is only intended for use in
* dev testing.
*
* @return true if configured to skip checking secured port configuration
*/
public boolean getIgnoreSecurePortsForTesting() {
return ignoreSecurePortsForTesting;
}
public boolean getAllowNonLocalLazyPersist() {
return allowNonLocalLazyPersist;
}
}
| 12,374 | 42.882979 | 103 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import java.util.Map;
/**
*
* This is the JMX management interface for data node information
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public interface DataNodeMXBean {
/**
* Gets the version of Hadoop.
*
* @return the version of Hadoop
*/
public String getVersion();
/**
* Gets the rpc port.
*
* @return the rpc port
*/
public String getRpcPort();
/**
* Gets the http port.
*
* @return the http port
*/
public String getHttpPort();
/**
* Gets the namenode IP addresses
*
* @return the namenode IP addresses that the datanode is talking to
*/
public String getNamenodeAddresses();
/**
* Gets the information of each volume on the Datanode. Please
* see the implementation for the format of returned information.
*
* @return the volume info
*/
public String getVolumeInfo();
/**
* Gets the cluster id.
*
* @return the cluster id
*/
public String getClusterId();
/**
* Returns an estimate of the number of Datanode threads
* actively transferring blocks.
*/
public int getXceiverCount();
/**
* Gets the network error counts on a per-Datanode basis.
*/
public Map<String, Map<String, Long>> getDatanodeNetworkCounts();
}
| 2,261 | 25 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.ComparisonChain;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.util.concurrent.Futures;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.HardLink;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.io.nativeio.NativeIO;
import org.apache.hadoop.util.Daemon;
import org.apache.hadoop.util.DiskChecker;
import java.io.File;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.nio.channels.FileLock;
import java.nio.channels.OverlappingFileLockException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.TreeMap;
import java.util.concurrent.Callable;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
/**
* Data storage information file.
* <p>
* @see Storage
*/
@InterfaceAudience.Private
public class DataStorage extends Storage {
public final static String BLOCK_SUBDIR_PREFIX = "subdir";
final static String STORAGE_DIR_DETACHED = "detach";
public final static String STORAGE_DIR_RBW = "rbw";
public final static String STORAGE_DIR_FINALIZED = "finalized";
public final static String STORAGE_DIR_LAZY_PERSIST = "lazypersist";
public final static String STORAGE_DIR_TMP = "tmp";
/**
* Set of bpids for which 'trash' is currently enabled.
* When trash is enabled block files are moved under a separate
* 'trash' folder instead of being deleted right away. This can
* be useful during rolling upgrades, for example.
* The set is backed by a concurrent HashMap.
*
* Even if trash is enabled, it is not used if a layout upgrade
* is in progress for a storage directory i.e. if the previous
* directory exists.
*/
private Set<String> trashEnabledBpids;
/**
* Datanode UUID that this storage is currently attached to. This
* is the same as the legacy StorageID for datanodes that were
* upgraded from a pre-UUID version. For compatibility with prior
* versions of Datanodes we cannot make this field a UUID.
*/
private String datanodeUuid = null;
// Flag to ensure we only initialize storage once
private boolean initialized = false;
// Maps block pool IDs to block pool storage
private final Map<String, BlockPoolSliceStorage> bpStorageMap
= Collections.synchronizedMap(new HashMap<String, BlockPoolSliceStorage>());
DataStorage() {
super(NodeType.DATA_NODE);
trashEnabledBpids = Collections.newSetFromMap(
new ConcurrentHashMap<String, Boolean>());
}
public BlockPoolSliceStorage getBPStorage(String bpid) {
return bpStorageMap.get(bpid);
}
public DataStorage(StorageInfo storageInfo) {
super(storageInfo);
}
public synchronized String getDatanodeUuid() {
return datanodeUuid;
}
public synchronized void setDatanodeUuid(String newDatanodeUuid) {
this.datanodeUuid = newDatanodeUuid;
}
/** Create an ID for this storage.
* @return true if a new storage ID was generated.
* */
public synchronized boolean createStorageID(
StorageDirectory sd, boolean regenerateStorageIds) {
final String oldStorageID = sd.getStorageUuid();
if (oldStorageID == null || regenerateStorageIds) {
sd.setStorageUuid(DatanodeStorage.generateUuid());
LOG.info("Generated new storageID " + sd.getStorageUuid() +
" for directory " + sd.getRoot() +
(oldStorageID == null ? "" : (" to replace " + oldStorageID)));
return true;
}
return false;
}
/**
* Enable trash for the specified block pool storage. Even if trash is
* enabled by the caller, it is superseded by the 'previous' directory
* if a layout upgrade is in progress.
*/
public void enableTrash(String bpid) {
if (trashEnabledBpids.add(bpid)) {
LOG.info("Enabled trash for bpid " + bpid);
}
}
public void clearTrash(String bpid) {
if (trashEnabledBpids.contains(bpid)) {
getBPStorage(bpid).clearTrash();
trashEnabledBpids.remove(bpid);
LOG.info("Cleared trash for bpid " + bpid);
}
}
public boolean trashEnabled(String bpid) {
return trashEnabledBpids.contains(bpid);
}
public void setRollingUpgradeMarker(String bpid) throws IOException {
getBPStorage(bpid).setRollingUpgradeMarkers(storageDirs);
}
public void clearRollingUpgradeMarker(String bpid) throws IOException {
getBPStorage(bpid).clearRollingUpgradeMarkers(storageDirs);
}
/**
* If rolling upgrades are in progress then do not delete block files
* immediately. Instead we move the block files to an intermediate
* 'trash' directory. If there is a subsequent rollback, then the block
* files will be restored from trash.
*
* @return trash directory if rolling upgrade is in progress, null
* otherwise.
*/
public String getTrashDirectoryForBlockFile(String bpid, File blockFile) {
if (trashEnabledBpids.contains(bpid)) {
return getBPStorage(bpid).getTrashDirectory(blockFile);
}
return null;
}
/**
* VolumeBuilder holds the metadata (e.g., the storage directories) of the
* prepared volume returned from {@link prepareVolume()}. Calling {@link build()}
* to add the metadata to {@link DataStorage} so that this prepared volume can
* be active.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
static public class VolumeBuilder {
private DataStorage storage;
/** Volume level storage directory. */
private StorageDirectory sd;
/** Mapping from block pool ID to an array of storage directories. */
private Map<String, List<StorageDirectory>> bpStorageDirMap =
Maps.newHashMap();
@VisibleForTesting
public VolumeBuilder(DataStorage storage, StorageDirectory sd) {
this.storage = storage;
this.sd = sd;
}
public final StorageDirectory getStorageDirectory() {
return this.sd;
}
private void addBpStorageDirectories(String bpid,
List<StorageDirectory> dirs) {
bpStorageDirMap.put(bpid, dirs);
}
/**
* Add loaded metadata of a data volume to {@link DataStorage}.
*/
public void build() {
assert this.sd != null;
synchronized (storage) {
for (Map.Entry<String, List<StorageDirectory>> e :
bpStorageDirMap.entrySet()) {
final String bpid = e.getKey();
BlockPoolSliceStorage bpStorage = this.storage.bpStorageMap.get(bpid);
assert bpStorage != null;
for (StorageDirectory bpSd : e.getValue()) {
bpStorage.addStorageDir(bpSd);
}
}
storage.addStorageDir(sd);
}
}
}
private StorageDirectory loadStorageDirectory(DataNode datanode,
NamespaceInfo nsInfo, File dataDir, StartupOption startOpt)
throws IOException {
StorageDirectory sd = new StorageDirectory(dataDir, null, false);
try {
StorageState curState = sd.analyzeStorage(startOpt, this);
// sd is locked but not opened
switch (curState) {
case NORMAL:
break;
case NON_EXISTENT:
LOG.info("Storage directory " + dataDir + " does not exist");
throw new IOException("Storage directory " + dataDir
+ " does not exist");
case NOT_FORMATTED: // format
LOG.info("Storage directory " + dataDir + " is not formatted for "
+ nsInfo.getBlockPoolID());
LOG.info("Formatting ...");
format(sd, nsInfo, datanode.getDatanodeUuid());
break;
default: // recovery part is common
sd.doRecover(curState);
}
// 2. Do transitions
// Each storage directory is treated individually.
// During startup some of them can upgrade or roll back
// while others could be up-to-date for the regular startup.
doTransition(datanode, sd, nsInfo, startOpt);
// 3. Update successfully loaded storage.
setServiceLayoutVersion(getServiceLayoutVersion());
writeProperties(sd);
return sd;
} catch (IOException ioe) {
sd.unlock();
throw ioe;
}
}
/**
* Prepare a storage directory. It creates a builder which can be used to add
* to the volume. If the volume cannot be added, it is OK to discard the
* builder later.
*
* @param datanode DataNode object.
* @param volume the root path of a storage directory.
* @param nsInfos an array of namespace infos.
* @return a VolumeBuilder that holds the metadata of this storage directory
* and can be added to DataStorage later.
* @throws IOException if encounters I/O errors.
*
* Note that if there is IOException, the state of DataStorage is not modified.
*/
public VolumeBuilder prepareVolume(DataNode datanode, File volume,
List<NamespaceInfo> nsInfos) throws IOException {
if (containsStorageDir(volume)) {
final String errorMessage = "Storage directory is in use";
LOG.warn(errorMessage + ".");
throw new IOException(errorMessage);
}
StorageDirectory sd = loadStorageDirectory(
datanode, nsInfos.get(0), volume, StartupOption.HOTSWAP);
VolumeBuilder builder =
new VolumeBuilder(this, sd);
for (NamespaceInfo nsInfo : nsInfos) {
List<File> bpDataDirs = Lists.newArrayList();
bpDataDirs.add(BlockPoolSliceStorage.getBpRoot(
nsInfo.getBlockPoolID(), new File(volume, STORAGE_DIR_CURRENT)));
makeBlockPoolDataDir(bpDataDirs, null);
BlockPoolSliceStorage bpStorage;
final String bpid = nsInfo.getBlockPoolID();
synchronized (this) {
bpStorage = this.bpStorageMap.get(bpid);
if (bpStorage == null) {
bpStorage = new BlockPoolSliceStorage(
nsInfo.getNamespaceID(), bpid, nsInfo.getCTime(),
nsInfo.getClusterID());
addBlockPoolStorage(bpid, bpStorage);
}
}
builder.addBpStorageDirectories(
bpid, bpStorage.loadBpStorageDirectories(
datanode, nsInfo, bpDataDirs, StartupOption.HOTSWAP));
}
return builder;
}
/**
* Add a list of volumes to be managed by DataStorage. If the volume is empty,
* format it, otherwise recover it from previous transitions if required.
*
* @param datanode the reference to DataNode.
* @param nsInfo namespace information
* @param dataDirs array of data storage directories
* @param startOpt startup option
* @return a list of successfully loaded volumes.
* @throws IOException
*/
@VisibleForTesting
synchronized List<StorageLocation> addStorageLocations(DataNode datanode,
NamespaceInfo nsInfo, Collection<StorageLocation> dataDirs,
StartupOption startOpt) throws IOException {
final String bpid = nsInfo.getBlockPoolID();
List<StorageLocation> successVolumes = Lists.newArrayList();
for (StorageLocation dataDir : dataDirs) {
File root = dataDir.getFile();
if (!containsStorageDir(root)) {
try {
// It first ensures the datanode level format is completed.
StorageDirectory sd = loadStorageDirectory(
datanode, nsInfo, root, startOpt);
addStorageDir(sd);
} catch (IOException e) {
LOG.warn(e);
continue;
}
} else {
LOG.info("Storage directory " + dataDir + " has already been used.");
}
List<File> bpDataDirs = new ArrayList<File>();
bpDataDirs.add(BlockPoolSliceStorage.getBpRoot(bpid, new File(root,
STORAGE_DIR_CURRENT)));
try {
makeBlockPoolDataDir(bpDataDirs, null);
BlockPoolSliceStorage bpStorage = this.bpStorageMap.get(bpid);
if (bpStorage == null) {
bpStorage = new BlockPoolSliceStorage(
nsInfo.getNamespaceID(), bpid, nsInfo.getCTime(),
nsInfo.getClusterID());
}
bpStorage.recoverTransitionRead(datanode, nsInfo, bpDataDirs, startOpt);
addBlockPoolStorage(bpid, bpStorage);
} catch (IOException e) {
LOG.warn("Failed to add storage for block pool: " + bpid + " : "
+ e.getMessage());
continue;
}
successVolumes.add(dataDir);
}
return successVolumes;
}
/**
* Remove storage dirs from DataStorage. All storage dirs are removed even when the
* IOException is thrown.
*
* @param dirsToRemove a set of storage directories to be removed.
* @throws IOException if I/O error when unlocking storage directory.
*/
synchronized void removeVolumes(final Set<File> dirsToRemove)
throws IOException {
if (dirsToRemove.isEmpty()) {
return;
}
StringBuilder errorMsgBuilder = new StringBuilder();
for (Iterator<StorageDirectory> it = this.storageDirs.iterator();
it.hasNext(); ) {
StorageDirectory sd = it.next();
if (dirsToRemove.contains(sd.getRoot())) {
// Remove the block pool level storage first.
for (Map.Entry<String, BlockPoolSliceStorage> entry :
this.bpStorageMap.entrySet()) {
String bpid = entry.getKey();
BlockPoolSliceStorage bpsStorage = entry.getValue();
File bpRoot =
BlockPoolSliceStorage.getBpRoot(bpid, sd.getCurrentDir());
bpsStorage.remove(bpRoot.getAbsoluteFile());
}
it.remove();
try {
sd.unlock();
} catch (IOException e) {
LOG.warn(String.format(
"I/O error attempting to unlock storage directory %s.",
sd.getRoot()), e);
errorMsgBuilder.append(String.format("Failed to remove %s: %s%n",
sd.getRoot(), e.getMessage()));
}
}
}
if (errorMsgBuilder.length() > 0) {
throw new IOException(errorMsgBuilder.toString());
}
}
/**
* Analyze storage directories for a specific block pool.
* Recover from previous transitions if required.
* Perform fs state transition if necessary depending on the namespace info.
* Read storage info.
* <br>
* This method should be synchronized between multiple DN threads. Only the
* first DN thread does DN level storage dir recoverTransitionRead.
*
* @param datanode DataNode
* @param nsInfo Namespace info of namenode corresponding to the block pool
* @param dataDirs Storage directories
* @param startOpt startup option
* @throws IOException on error
*/
void recoverTransitionRead(DataNode datanode, NamespaceInfo nsInfo,
Collection<StorageLocation> dataDirs, StartupOption startOpt) throws IOException {
if (this.initialized) {
LOG.info("DataNode version: " + HdfsServerConstants.DATANODE_LAYOUT_VERSION
+ " and NameNode layout version: " + nsInfo.getLayoutVersion());
this.storageDirs = new ArrayList<StorageDirectory>(dataDirs.size());
// mark DN storage is initialized
this.initialized = true;
}
if (addStorageLocations(datanode, nsInfo, dataDirs, startOpt).isEmpty()) {
throw new IOException("All specified directories are failed to load.");
}
}
/**
* Create physical directory for block pools on the data node
*
* @param dataDirs
* List of data directories
* @param conf
* Configuration instance to use.
* @throws IOException on errors
*/
static void makeBlockPoolDataDir(Collection<File> dataDirs,
Configuration conf) throws IOException {
if (conf == null)
conf = new HdfsConfiguration();
LocalFileSystem localFS = FileSystem.getLocal(conf);
FsPermission permission = new FsPermission(conf.get(
DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY,
DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_DEFAULT));
for (File data : dataDirs) {
try {
DiskChecker.checkDir(localFS, new Path(data.toURI()), permission);
} catch ( IOException e ) {
LOG.warn("Invalid directory in: " + data.getCanonicalPath() + ": "
+ e.getMessage());
}
}
}
void format(StorageDirectory sd, NamespaceInfo nsInfo,
String datanodeUuid) throws IOException {
sd.clearDirectory(); // create directory
this.layoutVersion = HdfsServerConstants.DATANODE_LAYOUT_VERSION;
this.clusterID = nsInfo.getClusterID();
this.namespaceID = nsInfo.getNamespaceID();
this.cTime = 0;
setDatanodeUuid(datanodeUuid);
if (sd.getStorageUuid() == null) {
// Assign a new Storage UUID.
sd.setStorageUuid(DatanodeStorage.generateUuid());
}
writeProperties(sd);
}
/*
* Set ClusterID, StorageID, StorageType, CTime into
* DataStorage VERSION file.
* Always called just before writing the properties to
* the VERSION file.
*/
@Override
protected void setPropertiesFromFields(Properties props,
StorageDirectory sd
) throws IOException {
props.setProperty("storageType", storageType.toString());
props.setProperty("clusterID", clusterID);
props.setProperty("cTime", String.valueOf(cTime));
props.setProperty("layoutVersion", String.valueOf(layoutVersion));
props.setProperty("storageID", sd.getStorageUuid());
String datanodeUuid = getDatanodeUuid();
if (datanodeUuid != null) {
props.setProperty("datanodeUuid", datanodeUuid);
}
// Set NamespaceID in version before federation
if (!DataNodeLayoutVersion.supports(
LayoutVersion.Feature.FEDERATION, layoutVersion)) {
props.setProperty("namespaceID", String.valueOf(namespaceID));
}
}
/*
* Read ClusterID, StorageID, StorageType, CTime from
* DataStorage VERSION file and verify them.
* Always called just after reading the properties from the VERSION file.
*/
@Override
protected void setFieldsFromProperties(Properties props, StorageDirectory sd)
throws IOException {
setFieldsFromProperties(props, sd, false, 0);
}
private void setFieldsFromProperties(Properties props, StorageDirectory sd,
boolean overrideLayoutVersion, int toLayoutVersion) throws IOException {
if (overrideLayoutVersion) {
this.layoutVersion = toLayoutVersion;
} else {
setLayoutVersion(props, sd);
}
setcTime(props, sd);
checkStorageType(props, sd);
setClusterId(props, layoutVersion, sd);
// Read NamespaceID in version before federation
if (!DataNodeLayoutVersion.supports(
LayoutVersion.Feature.FEDERATION, layoutVersion)) {
setNamespaceID(props, sd);
}
// valid storage id, storage id may be empty
String ssid = props.getProperty("storageID");
if (ssid == null) {
throw new InconsistentFSStateException(sd.getRoot(), "file "
+ STORAGE_FILE_VERSION + " is invalid.");
}
String sid = sd.getStorageUuid();
if (!(sid == null || sid.equals("") ||
ssid.equals("") || sid.equals(ssid))) {
throw new InconsistentFSStateException(sd.getRoot(),
"has incompatible storage Id.");
}
if (sid == null) { // update id only if it was null
sd.setStorageUuid(ssid);
}
// Update the datanode UUID if present.
if (props.getProperty("datanodeUuid") != null) {
String dnUuid = props.getProperty("datanodeUuid");
if (getDatanodeUuid() == null) {
setDatanodeUuid(dnUuid);
} else if (getDatanodeUuid().compareTo(dnUuid) != 0) {
throw new InconsistentFSStateException(sd.getRoot(),
"Root " + sd.getRoot() + ": DatanodeUuid=" + dnUuid +
", does not match " + getDatanodeUuid() + " from other" +
" StorageDirectory.");
}
}
}
@Override
public boolean isPreUpgradableLayout(StorageDirectory sd) throws IOException {
File oldF = new File(sd.getRoot(), "storage");
if (!oldF.exists()) {
return false;
}
// check the layout version inside the storage file
// Lock and Read old storage file
try (RandomAccessFile oldFile = new RandomAccessFile(oldF, "rws");
FileLock oldLock = oldFile.getChannel().tryLock()) {
if (null == oldLock) {
LOG.error("Unable to acquire file lock on path " + oldF.toString());
throw new OverlappingFileLockException();
}
oldFile.seek(0);
int oldVersion = oldFile.readInt();
if (oldVersion < LAST_PRE_UPGRADE_LAYOUT_VERSION) {
return false;
}
}
return true;
}
/** Read VERSION file for rollback */
void readProperties(StorageDirectory sd, int rollbackLayoutVersion)
throws IOException {
Properties props = readPropertiesFile(sd.getVersionFile());
setFieldsFromProperties(props, sd, true, rollbackLayoutVersion);
}
/**
* Analize which and whether a transition of the fs state is required
* and perform it if necessary.
*
* Rollback if the rollback startup option was specified.
* Upgrade if this.LV > LAYOUT_VERSION
* Regular startup if this.LV = LAYOUT_VERSION
*
* @param datanode Datanode to which this storage belongs to
* @param sd storage directory
* @param nsInfo namespace info
* @param startOpt startup option
* @throws IOException
*/
private void doTransition( DataNode datanode,
StorageDirectory sd,
NamespaceInfo nsInfo,
StartupOption startOpt
) throws IOException {
if (startOpt == StartupOption.ROLLBACK) {
doRollback(sd, nsInfo); // rollback if applicable
}
readProperties(sd);
checkVersionUpgradable(this.layoutVersion);
assert this.layoutVersion >= HdfsServerConstants.DATANODE_LAYOUT_VERSION :
"Future version is not allowed";
boolean federationSupported =
DataNodeLayoutVersion.supports(
LayoutVersion.Feature.FEDERATION, layoutVersion);
// For pre-federation version - validate the namespaceID
if (!federationSupported &&
getNamespaceID() != nsInfo.getNamespaceID()) {
throw new IOException("Incompatible namespaceIDs in "
+ sd.getRoot().getCanonicalPath() + ": namenode namespaceID = "
+ nsInfo.getNamespaceID() + "; datanode namespaceID = "
+ getNamespaceID());
}
// For version that supports federation, validate clusterID
if (federationSupported
&& !getClusterID().equals(nsInfo.getClusterID())) {
throw new IOException("Incompatible clusterIDs in "
+ sd.getRoot().getCanonicalPath() + ": namenode clusterID = "
+ nsInfo.getClusterID() + "; datanode clusterID = " + getClusterID());
}
// Clusters previously upgraded from layout versions earlier than
// ADD_DATANODE_AND_STORAGE_UUIDS failed to correctly generate a
// new storage ID. We check for that and fix it now.
boolean haveValidStorageId =
DataNodeLayoutVersion.supports(
LayoutVersion.Feature.ADD_DATANODE_AND_STORAGE_UUIDS, layoutVersion) &&
DatanodeStorage.isValidStorageId(sd.getStorageUuid());
// regular start up.
if (this.layoutVersion == HdfsServerConstants.DATANODE_LAYOUT_VERSION) {
createStorageID(sd, !haveValidStorageId);
return; // regular startup
}
// do upgrade
if (this.layoutVersion > HdfsServerConstants.DATANODE_LAYOUT_VERSION) {
doUpgrade(datanode, sd, nsInfo); // upgrade
createStorageID(sd, !haveValidStorageId);
return;
}
// layoutVersion < DATANODE_LAYOUT_VERSION. I.e. stored layout version is newer
// than the version supported by datanode. This should have been caught
// in readProperties(), even if rollback was not carried out or somehow
// failed.
throw new IOException("BUG: The stored LV = " + this.getLayoutVersion()
+ " is newer than the supported LV = "
+ HdfsServerConstants.DATANODE_LAYOUT_VERSION);
}
/**
* Upgrade -- Move current storage into a backup directory,
* and hardlink all its blocks into the new current directory.
*
* Upgrade from pre-0.22 to 0.22 or later release e.g. 0.19/0.20/ => 0.22/0.23
* <ul>
* <li> If <SD>/previous exists then delete it </li>
* <li> Rename <SD>/current to <SD>/previous.tmp </li>
* <li>Create new <SD>/current/<bpid>/current directory<li>
* <ul>
* <li> Hard links for block files are created from <SD>/previous.tmp
* to <SD>/current/<bpid>/current </li>
* <li> Saves new version file in <SD>/current/<bpid>/current directory </li>
* </ul>
* <li> Rename <SD>/previous.tmp to <SD>/previous </li>
* </ul>
*
* There should be only ONE namenode in the cluster for first
* time upgrade to 0.22
* @param sd storage directory
* @throws IOException on error
*/
void doUpgrade(DataNode datanode, StorageDirectory sd, NamespaceInfo nsInfo)
throws IOException {
// If the existing on-disk layout version supportes federation, simply
// update its layout version.
if (DataNodeLayoutVersion.supports(
LayoutVersion.Feature.FEDERATION, layoutVersion)) {
// The VERSION file is already read in. Override the layoutVersion
// field and overwrite the file. The upgrade work is handled by
// {@link BlockPoolSliceStorage#doUpgrade}
LOG.info("Updating layout version from " + layoutVersion + " to "
+ HdfsServerConstants.DATANODE_LAYOUT_VERSION + " for storage "
+ sd.getRoot());
layoutVersion = HdfsServerConstants.DATANODE_LAYOUT_VERSION;
writeProperties(sd);
return;
}
LOG.info("Upgrading storage directory " + sd.getRoot()
+ ".\n old LV = " + this.getLayoutVersion()
+ "; old CTime = " + this.getCTime()
+ ".\n new LV = " + HdfsServerConstants.DATANODE_LAYOUT_VERSION
+ "; new CTime = " + nsInfo.getCTime());
File curDir = sd.getCurrentDir();
File prevDir = sd.getPreviousDir();
File bbwDir = new File(sd.getRoot(), Storage.STORAGE_1_BBW);
assert curDir.exists() : "Data node current directory must exist.";
// Cleanup directory "detach"
cleanupDetachDir(new File(curDir, STORAGE_DIR_DETACHED));
// 1. delete <SD>/previous dir before upgrading
if (prevDir.exists())
deleteDir(prevDir);
// get previous.tmp directory, <SD>/previous.tmp
File tmpDir = sd.getPreviousTmp();
assert !tmpDir.exists() :
"Data node previous.tmp directory must not exist.";
// 2. Rename <SD>/current to <SD>/previous.tmp
rename(curDir, tmpDir);
// 3. Format BP and hard link blocks from previous directory
File curBpDir = BlockPoolSliceStorage.getBpRoot(nsInfo.getBlockPoolID(), curDir);
BlockPoolSliceStorage bpStorage = new BlockPoolSliceStorage(nsInfo.getNamespaceID(),
nsInfo.getBlockPoolID(), nsInfo.getCTime(), nsInfo.getClusterID());
bpStorage.format(curDir, nsInfo);
linkAllBlocks(datanode, tmpDir, bbwDir, new File(curBpDir,
STORAGE_DIR_CURRENT));
// 4. Write version file under <SD>/current
layoutVersion = HdfsServerConstants.DATANODE_LAYOUT_VERSION;
clusterID = nsInfo.getClusterID();
writeProperties(sd);
// 5. Rename <SD>/previous.tmp to <SD>/previous
rename(tmpDir, prevDir);
LOG.info("Upgrade of " + sd.getRoot()+ " is complete");
addBlockPoolStorage(nsInfo.getBlockPoolID(), bpStorage);
}
/**
* Cleanup the detachDir.
*
* If the directory is not empty report an error;
* Otherwise remove the directory.
*
* @param detachDir detach directory
* @throws IOException if the directory is not empty or it can not be removed
*/
private void cleanupDetachDir(File detachDir) throws IOException {
if (!DataNodeLayoutVersion.supports(
LayoutVersion.Feature.APPEND_RBW_DIR, layoutVersion) &&
detachDir.exists() && detachDir.isDirectory() ) {
if (FileUtil.list(detachDir).length != 0 ) {
throw new IOException("Detached directory " + detachDir +
" is not empty. Please manually move each file under this " +
"directory to the finalized directory if the finalized " +
"directory tree does not have the file.");
} else if (!detachDir.delete()) {
throw new IOException("Cannot remove directory " + detachDir);
}
}
}
/**
* Rolling back to a snapshot in previous directory by moving it to current
* directory.
* Rollback procedure:
* <br>
* If previous directory exists:
* <ol>
* <li> Rename current to removed.tmp </li>
* <li> Rename previous to current </li>
* <li> Remove removed.tmp </li>
* </ol>
*
* If previous directory does not exist and the current version supports
* federation, perform a simple rollback of layout version. This does not
* involve saving/restoration of actual data.
*/
void doRollback( StorageDirectory sd,
NamespaceInfo nsInfo
) throws IOException {
File prevDir = sd.getPreviousDir();
// This is a regular startup or a post-federation rollback
if (!prevDir.exists()) {
if (DataNodeLayoutVersion.supports(LayoutVersion.Feature.FEDERATION,
HdfsServerConstants.DATANODE_LAYOUT_VERSION)) {
readProperties(sd, HdfsServerConstants.DATANODE_LAYOUT_VERSION);
writeProperties(sd);
LOG.info("Layout version rolled back to "
+ HdfsServerConstants.DATANODE_LAYOUT_VERSION + " for storage "
+ sd.getRoot());
}
return;
}
DataStorage prevInfo = new DataStorage();
prevInfo.readPreviousVersionProperties(sd);
// We allow rollback to a state, which is either consistent with
// the namespace state or can be further upgraded to it.
if (!(prevInfo.getLayoutVersion() >= HdfsServerConstants.DATANODE_LAYOUT_VERSION
&& prevInfo.getCTime() <= nsInfo.getCTime())) // cannot rollback
throw new InconsistentFSStateException(sd.getRoot(),
"Cannot rollback to a newer state.\nDatanode previous state: LV = "
+ prevInfo.getLayoutVersion() + " CTime = " + prevInfo.getCTime()
+ " is newer than the namespace state: LV = "
+ HdfsServerConstants.DATANODE_LAYOUT_VERSION + " CTime = "
+ nsInfo.getCTime());
LOG.info("Rolling back storage directory " + sd.getRoot()
+ ".\n target LV = " + HdfsServerConstants.DATANODE_LAYOUT_VERSION
+ "; target CTime = " + nsInfo.getCTime());
File tmpDir = sd.getRemovedTmp();
assert !tmpDir.exists() : "removed.tmp directory must not exist.";
// rename current to tmp
File curDir = sd.getCurrentDir();
assert curDir.exists() : "Current directory must exist.";
rename(curDir, tmpDir);
// rename previous to current
rename(prevDir, curDir);
// delete tmp dir
deleteDir(tmpDir);
LOG.info("Rollback of " + sd.getRoot() + " is complete");
}
/**
* Finalize procedure deletes an existing snapshot.
* <ol>
* <li>Rename previous to finalized.tmp directory</li>
* <li>Fully delete the finalized.tmp directory</li>
* </ol>
*
* Do nothing, if previous directory does not exist
*/
void doFinalize(StorageDirectory sd) throws IOException {
File prevDir = sd.getPreviousDir();
if (!prevDir.exists())
return; // already discarded
final String dataDirPath = sd.getRoot().getCanonicalPath();
LOG.info("Finalizing upgrade for storage directory "
+ dataDirPath
+ ".\n cur LV = " + this.getLayoutVersion()
+ "; cur CTime = " + this.getCTime());
assert sd.getCurrentDir().exists() : "Current directory must exist.";
final File tmpDir = sd.getFinalizedTmp();//finalized.tmp directory
final File bbwDir = new File(sd.getRoot(), Storage.STORAGE_1_BBW);
// 1. rename previous to finalized.tmp
rename(prevDir, tmpDir);
// 2. delete finalized.tmp dir in a separate thread
// Also delete the blocksBeingWritten from HDFS 1.x and earlier, if
// it exists.
new Daemon(new Runnable() {
@Override
public void run() {
try {
deleteDir(tmpDir);
if (bbwDir.exists()) {
deleteDir(bbwDir);
}
} catch(IOException ex) {
LOG.error("Finalize upgrade for " + dataDirPath + " failed", ex);
}
LOG.info("Finalize upgrade for " + dataDirPath + " is complete");
}
@Override
public String toString() { return "Finalize " + dataDirPath; }
}).start();
}
/*
* Finalize the upgrade for a block pool
* This also empties trash created during rolling upgrade and disables
* trash functionality.
*/
void finalizeUpgrade(String bpID) throws IOException {
// To handle finalizing a snapshot taken at datanode level while
// upgrading to federation, if datanode level snapshot previous exists,
// then finalize it. Else finalize the corresponding BP.
for (StorageDirectory sd : storageDirs) {
File prevDir = sd.getPreviousDir();
if (prevDir.exists()) {
// data node level storage finalize
doFinalize(sd);
} else {
// block pool storage finalize using specific bpID
BlockPoolSliceStorage bpStorage = bpStorageMap.get(bpID);
bpStorage.doFinalize(sd.getCurrentDir());
}
}
}
/**
* Hardlink all finalized and RBW blocks in fromDir to toDir
*
* @param fromDir The directory where the 'from' snapshot is stored
* @param fromBbwDir In HDFS 1.x, the directory where blocks
* that are under construction are stored.
* @param toDir The current data directory
*
* @throws IOException If error occurs during hardlink
*/
private void linkAllBlocks(DataNode datanode, File fromDir, File fromBbwDir,
File toDir) throws IOException {
HardLink hardLink = new HardLink();
// do the link
int diskLayoutVersion = this.getLayoutVersion();
if (DataNodeLayoutVersion.supports(
LayoutVersion.Feature.APPEND_RBW_DIR, diskLayoutVersion)) {
// hardlink finalized blocks in tmpDir/finalized
linkBlocks(datanode, new File(fromDir, STORAGE_DIR_FINALIZED),
new File(toDir, STORAGE_DIR_FINALIZED), diskLayoutVersion, hardLink);
// hardlink rbw blocks in tmpDir/rbw
linkBlocks(datanode, new File(fromDir, STORAGE_DIR_RBW),
new File(toDir, STORAGE_DIR_RBW), diskLayoutVersion, hardLink);
} else { // pre-RBW version
// hardlink finalized blocks in tmpDir
linkBlocks(datanode, fromDir, new File(toDir, STORAGE_DIR_FINALIZED),
diskLayoutVersion, hardLink);
if (fromBbwDir.exists()) {
/*
* We need to put the 'blocksBeingWritten' from HDFS 1.x into the rbw
* directory. It's a little messy, because the blocksBeingWriten was
* NOT underneath the 'current' directory in those releases. See
* HDFS-3731 for details.
*/
linkBlocks(datanode, fromBbwDir,
new File(toDir, STORAGE_DIR_RBW), diskLayoutVersion, hardLink);
}
}
LOG.info( hardLink.linkStats.report() );
}
private static class LinkArgs {
File src;
File dst;
LinkArgs(File src, File dst) {
this.src = src;
this.dst = dst;
}
}
static void linkBlocks(DataNode datanode, File from, File to, int oldLV,
HardLink hl) throws IOException {
boolean upgradeToIdBasedLayout = false;
// If we are upgrading from a version older than the one where we introduced
// block ID-based layout AND we're working with the finalized directory,
// we'll need to upgrade from the old flat layout to the block ID-based one
if (oldLV > DataNodeLayoutVersion.Feature.BLOCKID_BASED_LAYOUT.getInfo().
getLayoutVersion() && to.getName().equals(STORAGE_DIR_FINALIZED)) {
upgradeToIdBasedLayout = true;
}
final ArrayList<LinkArgs> idBasedLayoutSingleLinks = Lists.newArrayList();
linkBlocksHelper(from, to, oldLV, hl, upgradeToIdBasedLayout, to,
idBasedLayoutSingleLinks);
// Detect and remove duplicate entries.
final ArrayList<LinkArgs> duplicates =
findDuplicateEntries(idBasedLayoutSingleLinks);
if (!duplicates.isEmpty()) {
LOG.error("There are " + duplicates.size() + " duplicate block " +
"entries within the same volume.");
removeDuplicateEntries(idBasedLayoutSingleLinks, duplicates);
}
int numLinkWorkers = datanode.getConf().getInt(
DFSConfigKeys.DFS_DATANODE_BLOCK_ID_LAYOUT_UPGRADE_THREADS_KEY,
DFSConfigKeys.DFS_DATANODE_BLOCK_ID_LAYOUT_UPGRADE_THREADS);
ExecutorService linkWorkers = Executors.newFixedThreadPool(numLinkWorkers);
final int step = idBasedLayoutSingleLinks.size() / numLinkWorkers + 1;
List<Future<Void>> futures = Lists.newArrayList();
for (int i = 0; i < idBasedLayoutSingleLinks.size(); i += step) {
final int iCopy = i;
futures.add(linkWorkers.submit(new Callable<Void>() {
@Override
public Void call() throws IOException {
int upperBound = Math.min(iCopy + step,
idBasedLayoutSingleLinks.size());
for (int j = iCopy; j < upperBound; j++) {
LinkArgs cur = idBasedLayoutSingleLinks.get(j);
HardLink.createHardLink(cur.src, cur.dst);
}
return null;
}
}));
}
linkWorkers.shutdown();
for (Future<Void> f : futures) {
Futures.get(f, IOException.class);
}
}
/**
* Find duplicate entries with an array of LinkArgs.
* Duplicate entries are entries with the same last path component.
*/
static ArrayList<LinkArgs> findDuplicateEntries(ArrayList<LinkArgs> all) {
// Find duplicates by sorting the list by the final path component.
Collections.sort(all, new Comparator<LinkArgs>() {
/**
* Compare two LinkArgs objects, such that objects with the same
* terminal source path components are grouped together.
*/
@Override
public int compare(LinkArgs a, LinkArgs b) {
return ComparisonChain.start().
compare(a.src.getName(), b.src.getName()).
compare(a.src, b.src).
compare(a.dst, b.dst).
result();
}
});
final ArrayList<LinkArgs> duplicates = Lists.newArrayList();
Long prevBlockId = null;
boolean prevWasMeta = false;
boolean addedPrev = false;
for (int i = 0; i < all.size(); i++) {
LinkArgs args = all.get(i);
long blockId = Block.getBlockId(args.src.getName());
boolean isMeta = Block.isMetaFilename(args.src.getName());
if ((prevBlockId == null) ||
(prevBlockId.longValue() != blockId)) {
prevBlockId = blockId;
addedPrev = false;
} else if (isMeta == prevWasMeta) {
// If we saw another file for the same block ID previously,
// and it had the same meta-ness as this file, we have a
// duplicate.
duplicates.add(args);
if (!addedPrev) {
duplicates.add(all.get(i - 1));
}
addedPrev = true;
} else {
addedPrev = false;
}
prevWasMeta = isMeta;
}
return duplicates;
}
/**
* Remove duplicate entries from the list.
* We do this by choosing:
* 1. the entries with the highest genstamp (this takes priority),
* 2. the entries with the longest block files,
* 3. arbitrarily, if neither #1 nor #2 gives a clear winner.
*
* Block and metadata files form a pair-- if you take a metadata file from
* one subdirectory, you must also take the block file from that
* subdirectory.
*/
private static void removeDuplicateEntries(ArrayList<LinkArgs> all,
ArrayList<LinkArgs> duplicates) {
// Maps blockId -> metadata file with highest genstamp
TreeMap<Long, List<LinkArgs>> highestGenstamps =
new TreeMap<Long, List<LinkArgs>>();
for (LinkArgs duplicate : duplicates) {
if (!Block.isMetaFilename(duplicate.src.getName())) {
continue;
}
long blockId = Block.getBlockId(duplicate.src.getName());
List<LinkArgs> prevHighest = highestGenstamps.get(blockId);
if (prevHighest == null) {
List<LinkArgs> highest = new LinkedList<LinkArgs>();
highest.add(duplicate);
highestGenstamps.put(blockId, highest);
continue;
}
long prevGenstamp =
Block.getGenerationStamp(prevHighest.get(0).src.getName());
long genstamp = Block.getGenerationStamp(duplicate.src.getName());
if (genstamp < prevGenstamp) {
continue;
}
if (genstamp > prevGenstamp) {
prevHighest.clear();
}
prevHighest.add(duplicate);
}
// Remove data / metadata entries that don't have the highest genstamp
// from the duplicates list.
for (Iterator<LinkArgs> iter = duplicates.iterator(); iter.hasNext(); ) {
LinkArgs duplicate = iter.next();
long blockId = Block.getBlockId(duplicate.src.getName());
List<LinkArgs> highest = highestGenstamps.get(blockId);
if (highest != null) {
boolean found = false;
for (LinkArgs high : highest) {
if (high.src.getParent().equals(duplicate.src.getParent())) {
found = true;
break;
}
}
if (!found) {
LOG.warn("Unexpectedly low genstamp on " +
duplicate.src.getAbsolutePath() + ".");
iter.remove();
}
}
}
// Find the longest block files
// We let the "last guy win" here, since we're only interested in
// preserving one block file / metadata file pair.
TreeMap<Long, LinkArgs> longestBlockFiles = new TreeMap<Long, LinkArgs>();
for (LinkArgs duplicate : duplicates) {
if (Block.isMetaFilename(duplicate.src.getName())) {
continue;
}
long blockId = Block.getBlockId(duplicate.src.getName());
LinkArgs prevLongest = longestBlockFiles.get(blockId);
if (prevLongest == null) {
longestBlockFiles.put(blockId, duplicate);
continue;
}
long blockLength = duplicate.src.length();
long prevBlockLength = prevLongest.src.length();
if (blockLength < prevBlockLength) {
LOG.warn("Unexpectedly short length on " +
duplicate.src.getAbsolutePath() + ".");
continue;
}
if (blockLength > prevBlockLength) {
LOG.warn("Unexpectedly short length on " +
prevLongest.src.getAbsolutePath() + ".");
}
longestBlockFiles.put(blockId, duplicate);
}
// Remove data / metadata entries that aren't the longest, or weren't
// arbitrarily selected by us.
for (Iterator<LinkArgs> iter = all.iterator(); iter.hasNext(); ) {
LinkArgs args = iter.next();
long blockId = Block.getBlockId(args.src.getName());
LinkArgs bestDuplicate = longestBlockFiles.get(blockId);
if (bestDuplicate == null) {
continue; // file has no duplicates
}
if (!bestDuplicate.src.getParent().equals(args.src.getParent())) {
LOG.warn("Discarding " + args.src.getAbsolutePath() + ".");
iter.remove();
}
}
}
static void linkBlocksHelper(File from, File to, int oldLV, HardLink hl,
boolean upgradeToIdBasedLayout, File blockRoot,
List<LinkArgs> idBasedLayoutSingleLinks) throws IOException {
if (!from.exists()) {
return;
}
if (!from.isDirectory()) {
HardLink.createHardLink(from, to);
hl.linkStats.countSingleLinks++;
return;
}
// from is a directory
hl.linkStats.countDirs++;
String[] blockNames = from.list(new java.io.FilenameFilter() {
@Override
public boolean accept(File dir, String name) {
return name.startsWith(Block.BLOCK_FILE_PREFIX);
}
});
// If we are upgrading to block ID-based layout, we don't want to recreate
// any subdirs from the source that contain blocks, since we have a new
// directory structure
if (!upgradeToIdBasedLayout || !to.getName().startsWith(
BLOCK_SUBDIR_PREFIX)) {
if (!to.mkdirs())
throw new IOException("Cannot create directory " + to);
}
// Block files just need hard links with the same file names
// but a different directory
if (blockNames.length > 0) {
if (upgradeToIdBasedLayout) {
for (String blockName : blockNames) {
long blockId = Block.getBlockId(blockName);
File blockLocation = DatanodeUtil.idToBlockDir(blockRoot, blockId);
if (!blockLocation.exists()) {
if (!blockLocation.mkdirs()) {
throw new IOException("Failed to mkdirs " + blockLocation);
}
}
idBasedLayoutSingleLinks.add(new LinkArgs(new File(from, blockName),
new File(blockLocation, blockName)));
hl.linkStats.countSingleLinks++;
}
} else {
HardLink.createHardLinkMult(from, blockNames, to);
hl.linkStats.countMultLinks++;
hl.linkStats.countFilesMultLinks += blockNames.length;
}
} else {
hl.linkStats.countEmptyDirs++;
}
// Now take care of the rest of the files and subdirectories
String[] otherNames = from.list(new java.io.FilenameFilter() {
@Override
public boolean accept(File dir, String name) {
return name.startsWith(BLOCK_SUBDIR_PREFIX);
}
});
for(int i = 0; i < otherNames.length; i++)
linkBlocksHelper(new File(from, otherNames[i]),
new File(to, otherNames[i]), oldLV, hl, upgradeToIdBasedLayout,
blockRoot, idBasedLayoutSingleLinks);
}
/**
* Add bpStorage into bpStorageMap
*/
private void addBlockPoolStorage(String bpID, BlockPoolSliceStorage bpStorage
) {
if (!this.bpStorageMap.containsKey(bpID)) {
this.bpStorageMap.put(bpID, bpStorage);
}
}
synchronized void removeBlockPoolStorage(String bpId) {
bpStorageMap.remove(bpId);
}
}
| 48,397 | 36.372973 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import java.io.File;
import java.io.IOException;
import java.util.Arrays;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
import org.apache.hadoop.util.Daemon;
import org.apache.hadoop.util.Time;
/**
* Periodically scans the data directories for block and block metadata files.
* Reconciles the differences with block information maintained in the dataset.
*/
@InterfaceAudience.Private
public class DirectoryScanner implements Runnable {
private static final Log LOG = LogFactory.getLog(DirectoryScanner.class);
private final FsDatasetSpi<?> dataset;
private final ExecutorService reportCompileThreadPool;
private final ScheduledExecutorService masterThread;
private final long scanPeriodMsecs;
private volatile boolean shouldRun = false;
private boolean retainDiffs = false;
private final DataNode datanode;
final ScanInfoPerBlockPool diffs = new ScanInfoPerBlockPool();
final Map<String, Stats> stats = new HashMap<String, Stats>();
/**
* Allow retaining diffs for unit test and analysis
* @param b - defaults to false (off)
*/
void setRetainDiffs(boolean b) {
retainDiffs = b;
}
/** Stats tracked for reporting and testing, per blockpool */
static class Stats {
final String bpid;
long totalBlocks = 0;
long missingMetaFile = 0;
long missingBlockFile = 0;
long missingMemoryBlocks = 0;
long mismatchBlocks = 0;
long duplicateBlocks = 0;
public Stats(String bpid) {
this.bpid = bpid;
}
@Override
public String toString() {
return "BlockPool " + bpid
+ " Total blocks: " + totalBlocks + ", missing metadata files:"
+ missingMetaFile + ", missing block files:" + missingBlockFile
+ ", missing blocks in memory:" + missingMemoryBlocks
+ ", mismatched blocks:" + mismatchBlocks;
}
}
static class ScanInfoPerBlockPool extends
HashMap<String, LinkedList<ScanInfo>> {
private static final long serialVersionUID = 1L;
ScanInfoPerBlockPool() {super();}
ScanInfoPerBlockPool(int sz) {super(sz);}
/**
* Merges {@code that} ScanInfoPerBlockPool into this one
*/
public void addAll(ScanInfoPerBlockPool that) {
if (that == null) return;
for (Entry<String, LinkedList<ScanInfo>> entry : that.entrySet()) {
String bpid = entry.getKey();
LinkedList<ScanInfo> list = entry.getValue();
if (this.containsKey(bpid)) {
//merge that per-bpid linked list with this one
this.get(bpid).addAll(list);
} else {
//add that new bpid and its linked list to this
this.put(bpid, list);
}
}
}
/**
* Convert all the LinkedList values in this ScanInfoPerBlockPool map
* into sorted arrays, and return a new map of these arrays per blockpool
* @return a map of ScanInfo arrays per blockpool
*/
public Map<String, ScanInfo[]> toSortedArrays() {
Map<String, ScanInfo[]> result =
new HashMap<String, ScanInfo[]>(this.size());
for (Entry<String, LinkedList<ScanInfo>> entry : this.entrySet()) {
String bpid = entry.getKey();
LinkedList<ScanInfo> list = entry.getValue();
// convert list to array
ScanInfo[] record = list.toArray(new ScanInfo[list.size()]);
// Sort array based on blockId
Arrays.sort(record);
result.put(bpid, record);
}
return result;
}
}
/**
* Tracks the files and other information related to a block on the disk
* Missing file is indicated by setting the corresponding member
* to null.
*
* Because millions of these structures may be created, we try to save
* memory here. So instead of storing full paths, we store path suffixes.
* The block file, if it exists, will have a path like this:
* <volume_base_path>/<block_path>
* So we don't need to store the volume path, since we already know what the
* volume is.
*
* The metadata file, if it exists, will have a path like this:
* <volume_base_path>/<block_path>_<genstamp>.meta
* So if we have a block file, there isn't any need to store the block path
* again.
*
* The accessor functions take care of these manipulations.
*/
static class ScanInfo implements Comparable<ScanInfo> {
private final long blockId;
/**
* The block file path, relative to the volume's base directory.
* If there was no block file found, this may be null. If 'vol'
* is null, then this is the full path of the block file.
*/
private final String blockSuffix;
/**
* The suffix of the meta file path relative to the block file.
* If blockSuffix is null, then this will be the entire path relative
* to the volume base directory, or an absolute path if vol is also
* null.
*/
private final String metaSuffix;
private final FsVolumeSpi volume;
/**
* Get the file's length in async block scan
*/
private final long blockFileLength;
private final static Pattern CONDENSED_PATH_REGEX =
Pattern.compile("(?<!^)(\\\\|/){2,}");
private final static String QUOTED_FILE_SEPARATOR =
Matcher.quoteReplacement(File.separator);
/**
* Get the most condensed version of the path.
*
* For example, the condensed version of /foo//bar is /foo/bar
* Unlike {@link File#getCanonicalPath()}, this will never perform I/O
* on the filesystem.
*/
private static String getCondensedPath(String path) {
return CONDENSED_PATH_REGEX.matcher(path).
replaceAll(QUOTED_FILE_SEPARATOR);
}
/**
* Get a path suffix.
*
* @param f The file to get the suffix for.
* @param prefix The prefix we're stripping off.
*
* @return A suffix such that prefix + suffix = path to f
*/
private static String getSuffix(File f, String prefix) {
String fullPath = getCondensedPath(f.getAbsolutePath());
if (fullPath.startsWith(prefix)) {
return fullPath.substring(prefix.length());
}
throw new RuntimeException(prefix + " is not a prefix of " + fullPath);
}
ScanInfo(long blockId, File blockFile, File metaFile, FsVolumeSpi vol) {
this.blockId = blockId;
String condensedVolPath = vol == null ? null :
getCondensedPath(vol.getBasePath());
this.blockSuffix = blockFile == null ? null :
getSuffix(blockFile, condensedVolPath);
this.blockFileLength = (blockFile != null) ? blockFile.length() : 0;
if (metaFile == null) {
this.metaSuffix = null;
} else if (blockFile == null) {
this.metaSuffix = getSuffix(metaFile, condensedVolPath);
} else {
this.metaSuffix = getSuffix(metaFile,
condensedVolPath + blockSuffix);
}
this.volume = vol;
}
File getBlockFile() {
return (blockSuffix == null) ? null :
new File(volume.getBasePath(), blockSuffix);
}
long getBlockFileLength() {
return blockFileLength;
}
File getMetaFile() {
if (metaSuffix == null) {
return null;
} else if (blockSuffix == null) {
return new File(volume.getBasePath(), metaSuffix);
} else {
return new File(volume.getBasePath(), blockSuffix + metaSuffix);
}
}
long getBlockId() {
return blockId;
}
FsVolumeSpi getVolume() {
return volume;
}
@Override // Comparable
public int compareTo(ScanInfo b) {
if (blockId < b.blockId) {
return -1;
} else if (blockId == b.blockId) {
return 0;
} else {
return 1;
}
}
@Override // Object
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof ScanInfo)) {
return false;
}
return blockId == ((ScanInfo) o).blockId;
}
@Override // Object
public int hashCode() {
return (int)(blockId^(blockId>>>32));
}
public long getGenStamp() {
return metaSuffix != null ? Block.getGenerationStamp(
getMetaFile().getName()) :
HdfsConstants.GRANDFATHER_GENERATION_STAMP;
}
}
DirectoryScanner(DataNode datanode, FsDatasetSpi<?> dataset, Configuration conf) {
this.datanode = datanode;
this.dataset = dataset;
int interval = conf.getInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY,
DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_DEFAULT);
scanPeriodMsecs = interval * 1000L; //msec
int threads =
conf.getInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_THREADS_KEY,
DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_THREADS_DEFAULT);
reportCompileThreadPool = Executors.newFixedThreadPool(threads,
new Daemon.DaemonFactory());
masterThread = new ScheduledThreadPoolExecutor(1,
new Daemon.DaemonFactory());
}
void start() {
shouldRun = true;
long offset = ThreadLocalRandom.current().nextInt(
(int) (scanPeriodMsecs/1000L)) * 1000L; //msec
long firstScanTime = Time.now() + offset;
LOG.info("Periodic Directory Tree Verification scan starting at "
+ firstScanTime + " with interval " + scanPeriodMsecs);
masterThread.scheduleAtFixedRate(this, offset, scanPeriodMsecs,
TimeUnit.MILLISECONDS);
}
// for unit test
boolean getRunStatus() {
return shouldRun;
}
private void clear() {
diffs.clear();
stats.clear();
}
/**
* Main program loop for DirectoryScanner
* Runs "reconcile()" periodically under the masterThread.
*/
@Override
public void run() {
try {
if (!shouldRun) {
//shutdown has been activated
LOG.warn("this cycle terminating immediately because 'shouldRun' has been deactivated");
return;
}
//We're are okay to run - do it
reconcile();
} catch (Exception e) {
//Log and continue - allows Executor to run again next cycle
LOG.error("Exception during DirectoryScanner execution - will continue next cycle", e);
} catch (Error er) {
//Non-recoverable error - re-throw after logging the problem
LOG.error("System Error during DirectoryScanner execution - permanently terminating periodic scanner", er);
throw er;
}
}
void shutdown() {
if (!shouldRun) {
LOG.warn("DirectoryScanner: shutdown has been called, but periodic scanner not started");
} else {
LOG.warn("DirectoryScanner: shutdown has been called");
}
shouldRun = false;
if (masterThread != null) masterThread.shutdown();
if (reportCompileThreadPool != null) reportCompileThreadPool.shutdown();
if (masterThread != null) {
try {
masterThread.awaitTermination(1, TimeUnit.MINUTES);
} catch (InterruptedException e) {
LOG.error("interrupted while waiting for masterThread to " +
"terminate", e);
}
}
if (reportCompileThreadPool != null) {
try {
reportCompileThreadPool.awaitTermination(1, TimeUnit.MINUTES);
} catch (InterruptedException e) {
LOG.error("interrupted while waiting for reportCompileThreadPool to " +
"terminate", e);
}
}
if (!retainDiffs) clear();
}
/**
* Reconcile differences between disk and in-memory blocks
*/
void reconcile() throws IOException {
scan();
for (Entry<String, LinkedList<ScanInfo>> entry : diffs.entrySet()) {
String bpid = entry.getKey();
LinkedList<ScanInfo> diff = entry.getValue();
for (ScanInfo info : diff) {
dataset.checkAndUpdate(bpid, info.getBlockId(), info.getBlockFile(),
info.getMetaFile(), info.getVolume());
}
}
if (!retainDiffs) clear();
}
/**
* Scan for the differences between disk and in-memory blocks
* Scan only the "finalized blocks" lists of both disk and memory.
*/
void scan() {
clear();
Map<String, ScanInfo[]> diskReport = getDiskReport();
// Hold FSDataset lock to prevent further changes to the block map
synchronized(dataset) {
for (Entry<String, ScanInfo[]> entry : diskReport.entrySet()) {
String bpid = entry.getKey();
ScanInfo[] blockpoolReport = entry.getValue();
Stats statsRecord = new Stats(bpid);
stats.put(bpid, statsRecord);
LinkedList<ScanInfo> diffRecord = new LinkedList<ScanInfo>();
diffs.put(bpid, diffRecord);
statsRecord.totalBlocks = blockpoolReport.length;
List<FinalizedReplica> bl = dataset.getFinalizedBlocks(bpid);
FinalizedReplica[] memReport = bl.toArray(new FinalizedReplica[bl.size()]);
Arrays.sort(memReport); // Sort based on blockId
int d = 0; // index for blockpoolReport
int m = 0; // index for memReprot
while (m < memReport.length && d < blockpoolReport.length) {
FinalizedReplica memBlock = memReport[m];
ScanInfo info = blockpoolReport[d];
if (info.getBlockId() < memBlock.getBlockId()) {
if (!dataset.isDeletingBlock(bpid, info.getBlockId())) {
// Block is missing in memory
statsRecord.missingMemoryBlocks++;
addDifference(diffRecord, statsRecord, info);
}
d++;
continue;
}
if (info.getBlockId() > memBlock.getBlockId()) {
// Block is missing on the disk
addDifference(diffRecord, statsRecord,
memBlock.getBlockId(), info.getVolume());
m++;
continue;
}
// Block file and/or metadata file exists on the disk
// Block exists in memory
if (info.getBlockFile() == null) {
// Block metadata file exits and block file is missing
addDifference(diffRecord, statsRecord, info);
} else if (info.getGenStamp() != memBlock.getGenerationStamp()
|| info.getBlockFileLength() != memBlock.getNumBytes()) {
// Block metadata file is missing or has wrong generation stamp,
// or block file length is different than expected
statsRecord.mismatchBlocks++;
addDifference(diffRecord, statsRecord, info);
} else if (info.getBlockFile().compareTo(memBlock.getBlockFile()) != 0) {
// volumeMap record and on-disk files don't match.
statsRecord.duplicateBlocks++;
addDifference(diffRecord, statsRecord, info);
}
d++;
if (d < blockpoolReport.length) {
// There may be multiple on-disk records for the same block, don't increment
// the memory record pointer if so.
ScanInfo nextInfo = blockpoolReport[Math.min(d, blockpoolReport.length - 1)];
if (nextInfo.getBlockId() != info.blockId) {
++m;
}
} else {
++m;
}
}
while (m < memReport.length) {
FinalizedReplica current = memReport[m++];
addDifference(diffRecord, statsRecord,
current.getBlockId(), current.getVolume());
}
while (d < blockpoolReport.length) {
if (!dataset.isDeletingBlock(bpid, blockpoolReport[d].getBlockId())) {
statsRecord.missingMemoryBlocks++;
addDifference(diffRecord, statsRecord, blockpoolReport[d]);
}
d++;
}
LOG.info(statsRecord.toString());
} //end for
} //end synchronized
}
/**
* Block is found on the disk. In-memory block is missing or does not match
* the block on the disk
*/
private void addDifference(LinkedList<ScanInfo> diffRecord,
Stats statsRecord, ScanInfo info) {
statsRecord.missingMetaFile += info.getMetaFile() == null ? 1 : 0;
statsRecord.missingBlockFile += info.getBlockFile() == null ? 1 : 0;
diffRecord.add(info);
}
/** Block is not found on the disk */
private void addDifference(LinkedList<ScanInfo> diffRecord,
Stats statsRecord, long blockId,
FsVolumeSpi vol) {
statsRecord.missingBlockFile++;
statsRecord.missingMetaFile++;
diffRecord.add(new ScanInfo(blockId, null, null, vol));
}
/** Get lists of blocks on the disk sorted by blockId, per blockpool */
private Map<String, ScanInfo[]> getDiskReport() {
ScanInfoPerBlockPool list = new ScanInfoPerBlockPool();
ScanInfoPerBlockPool[] dirReports = null;
// First get list of data directories
try (FsDatasetSpi.FsVolumeReferences volumes =
dataset.getFsVolumeReferences()) {
// Use an array since the threads may return out of order and
// compilersInProgress#keySet may return out of order as well.
dirReports = new ScanInfoPerBlockPool[volumes.size()];
Map<Integer, Future<ScanInfoPerBlockPool>> compilersInProgress =
new HashMap<Integer, Future<ScanInfoPerBlockPool>>();
for (int i = 0; i < volumes.size(); i++) {
ReportCompiler reportCompiler =
new ReportCompiler(datanode, volumes.get(i));
Future<ScanInfoPerBlockPool> result =
reportCompileThreadPool.submit(reportCompiler);
compilersInProgress.put(i, result);
}
for (Entry<Integer, Future<ScanInfoPerBlockPool>> report :
compilersInProgress.entrySet()) {
try {
dirReports[report.getKey()] = report.getValue().get();
} catch (Exception ex) {
LOG.error("Error compiling report", ex);
// Propagate ex to DataBlockScanner to deal with
throw new RuntimeException(ex);
}
}
} catch (IOException e) {
LOG.error("Unexpected IOException by closing FsVolumeReference", e);
}
if (dirReports != null) {
// Compile consolidated report for all the volumes
for (ScanInfoPerBlockPool report : dirReports) {
list.addAll(report);
}
}
return list.toSortedArrays();
}
private static boolean isBlockMetaFile(String blockId, String metaFile) {
return metaFile.startsWith(blockId)
&& metaFile.endsWith(Block.METADATA_EXTENSION);
}
private static class ReportCompiler
implements Callable<ScanInfoPerBlockPool> {
private final FsVolumeSpi volume;
private final DataNode datanode;
public ReportCompiler(DataNode datanode, FsVolumeSpi volume) {
this.datanode = datanode;
this.volume = volume;
}
@Override
public ScanInfoPerBlockPool call() throws Exception {
String[] bpList = volume.getBlockPoolList();
ScanInfoPerBlockPool result = new ScanInfoPerBlockPool(bpList.length);
for (String bpid : bpList) {
LinkedList<ScanInfo> report = new LinkedList<ScanInfo>();
File bpFinalizedDir = volume.getFinalizedDir(bpid);
result.put(bpid,
compileReport(volume, bpFinalizedDir, bpFinalizedDir, report));
}
return result;
}
/** Compile list {@link ScanInfo} for the blocks in the directory <dir> */
private LinkedList<ScanInfo> compileReport(FsVolumeSpi vol,
File bpFinalizedDir, File dir, LinkedList<ScanInfo> report) {
File[] files;
try {
files = FileUtil.listFiles(dir);
} catch (IOException ioe) {
LOG.warn("Exception occured while compiling report: ", ioe);
// Initiate a check on disk failure.
datanode.checkDiskErrorAsync();
// Ignore this directory and proceed.
return report;
}
Arrays.sort(files);
/*
* Assumption: In the sorted list of files block file appears immediately
* before block metadata file. This is true for the current naming
* convention for block file blk_<blockid> and meta file
* blk_<blockid>_<genstamp>.meta
*/
for (int i = 0; i < files.length; i++) {
if (files[i].isDirectory()) {
compileReport(vol, bpFinalizedDir, files[i], report);
continue;
}
if (!Block.isBlockFilename(files[i])) {
if (isBlockMetaFile(Block.BLOCK_FILE_PREFIX, files[i].getName())) {
long blockId = Block.getBlockId(files[i].getName());
verifyFileLocation(files[i].getParentFile(), bpFinalizedDir,
blockId);
report.add(new ScanInfo(blockId, null, files[i], vol));
}
continue;
}
File blockFile = files[i];
long blockId = Block.filename2id(blockFile.getName());
File metaFile = null;
// Skip all the files that start with block name until
// getting to the metafile for the block
while (i + 1 < files.length && files[i + 1].isFile()
&& files[i + 1].getName().startsWith(blockFile.getName())) {
i++;
if (isBlockMetaFile(blockFile.getName(), files[i].getName())) {
metaFile = files[i];
break;
}
}
verifyFileLocation(blockFile.getParentFile(), bpFinalizedDir,
blockId);
report.add(new ScanInfo(blockId, blockFile, metaFile, vol));
}
return report;
}
/**
* Verify whether the actual directory location of block file has the
* expected directory path computed using its block ID.
*/
private void verifyFileLocation(File actualBlockDir,
File bpFinalizedDir, long blockId) {
File blockDir = DatanodeUtil.idToBlockDir(bpFinalizedDir, blockId);
if (actualBlockDir.compareTo(blockDir) != 0) {
LOG.warn("Block: " + blockId
+ " has to be upgraded to block ID-based layout");
}
}
}
}
| 23,748 | 34.288262 | 113 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedReplica.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import java.io.File;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
/**
* This class describes a replica that has been finalized.
*/
public class FinalizedReplica extends ReplicaInfo {
private boolean unlinked; // copy-on-write done for block
/**
* Constructor
* @param blockId block id
* @param len replica length
* @param genStamp replica generation stamp
* @param vol volume where replica is located
* @param dir directory path where block and meta files are located
*/
public FinalizedReplica(long blockId, long len, long genStamp,
FsVolumeSpi vol, File dir) {
super(blockId, len, genStamp, vol, dir);
}
/**
* Constructor
* @param block a block
* @param vol volume where replica is located
* @param dir directory path where block and meta files are located
*/
public FinalizedReplica(Block block, FsVolumeSpi vol, File dir) {
super(block, vol, dir);
}
/**
* Copy constructor.
* @param from where to copy construct from
*/
public FinalizedReplica(FinalizedReplica from) {
super(from);
this.unlinked = from.isUnlinked();
}
@Override // ReplicaInfo
public ReplicaState getState() {
return ReplicaState.FINALIZED;
}
@Override // ReplicaInfo
public boolean isUnlinked() {
return unlinked;
}
@Override // ReplicaInfo
public void setUnlinked() {
unlinked = true;
}
@Override
public long getVisibleLength() {
return getNumBytes(); // all bytes are visible
}
@Override
public long getBytesOnDisk() {
return getNumBytes();
}
@Override // Object
public boolean equals(Object o) {
return super.equals(o);
}
@Override // Object
public int hashCode() {
return super.hashCode();
}
@Override
public String toString() {
return super.toString()
+ "\n unlinked =" + unlinked;
}
}
| 2,892 | 26.552381 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.classification.InterfaceAudience;
import java.io.IOException;
/**
* Used for injecting faults in DFSClient and DFSOutputStream tests.
* Calls into this are a no-op in production code.
*/
@VisibleForTesting
@InterfaceAudience.Private
public class DataNodeFaultInjector {
public static DataNodeFaultInjector instance = new DataNodeFaultInjector();
public static DataNodeFaultInjector get() {
return instance;
}
public void getHdfsBlocksMetadata() {}
public void writeBlockAfterFlush() throws IOException {}
public void sendShortCircuitShmResponse() throws IOException {}
}
| 1,535 | 33.133333 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.HardLink;
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.util.Daemon;
import java.io.File;
import java.io.IOException;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import java.util.Properties;
import java.util.Set;
import java.util.Collections;
import java.util.concurrent.ConcurrentHashMap;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* Manages storage for the set of BlockPoolSlices which share a particular
* block pool id, on this DataNode.
*
* This class supports the following functionality:
* <ol>
* <li> Formatting a new block pool storage</li>
* <li> Recovering a storage state to a consistent state (if possible></li>
* <li> Taking a snapshot of the block pool during upgrade</li>
* <li> Rolling back a block pool to a previous snapshot</li>
* <li> Finalizing block storage by deletion of a snapshot</li>
* </ul>
*
* @see Storage
*/
@InterfaceAudience.Private
public class BlockPoolSliceStorage extends Storage {
static final String TRASH_ROOT_DIR = "trash";
/**
* A marker file that is created on each root directory if a rolling upgrade
* is in progress. The NN does not inform the DN when a rolling upgrade is
* finalized. All the DN can infer is whether or not a rolling upgrade is
* currently in progress. When the rolling upgrade is not in progress:
* 1. If the marker file is present, then a rolling upgrade just completed.
* If a 'previous' directory exists, it can be deleted now.
* 2. If the marker file is absent, then a regular upgrade may be in
* progress. Do not delete the 'previous' directory.
*/
static final String ROLLING_UPGRADE_MARKER_FILE = "RollingUpgradeInProgress";
private static final String BLOCK_POOL_ID_PATTERN_BASE =
Pattern.quote(File.separator) +
"BP-\\d+-\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}-\\d+" +
Pattern.quote(File.separator);
private static final Pattern BLOCK_POOL_PATH_PATTERN = Pattern.compile(
"^(.*)(" + BLOCK_POOL_ID_PATTERN_BASE + ")(.*)$");
private static final Pattern BLOCK_POOL_CURRENT_PATH_PATTERN = Pattern.compile(
"^(.*)(" + BLOCK_POOL_ID_PATTERN_BASE + ")(" + STORAGE_DIR_CURRENT + ")(.*)$");
private static final Pattern BLOCK_POOL_TRASH_PATH_PATTERN = Pattern.compile(
"^(.*)(" + BLOCK_POOL_ID_PATTERN_BASE + ")(" + TRASH_ROOT_DIR + ")(.*)$");
private String blockpoolID = ""; // id of the blockpool
public BlockPoolSliceStorage(StorageInfo storageInfo, String bpid) {
super(storageInfo);
blockpoolID = bpid;
}
/**
* These maps are used as an optimization to avoid one filesystem operation
* per storage on each heartbeat response.
*/
private static Set<String> storagesWithRollingUpgradeMarker;
private static Set<String> storagesWithoutRollingUpgradeMarker;
BlockPoolSliceStorage(int namespaceID, String bpID, long cTime,
String clusterId) {
super(NodeType.DATA_NODE);
this.namespaceID = namespaceID;
this.blockpoolID = bpID;
this.cTime = cTime;
this.clusterID = clusterId;
storagesWithRollingUpgradeMarker = Collections.newSetFromMap(
new ConcurrentHashMap<String, Boolean>());
storagesWithoutRollingUpgradeMarker = Collections.newSetFromMap(
new ConcurrentHashMap<String, Boolean>());
}
private BlockPoolSliceStorage() {
super(NodeType.DATA_NODE);
storagesWithRollingUpgradeMarker = Collections.newSetFromMap(
new ConcurrentHashMap<String, Boolean>());
storagesWithoutRollingUpgradeMarker = Collections.newSetFromMap(
new ConcurrentHashMap<String, Boolean>());
}
// Expose visibility for VolumeBuilder#commit().
public void addStorageDir(StorageDirectory sd) {
super.addStorageDir(sd);
}
/**
* Load one storage directory. Recover from previous transitions if required.
*
* @param datanode datanode instance
* @param nsInfo namespace information
* @param dataDir the root path of the storage directory
* @param startOpt startup option
* @return the StorageDirectory successfully loaded.
* @throws IOException
*/
private StorageDirectory loadStorageDirectory(DataNode datanode,
NamespaceInfo nsInfo, File dataDir, StartupOption startOpt) throws IOException {
StorageDirectory sd = new StorageDirectory(dataDir, null, true);
try {
StorageState curState = sd.analyzeStorage(startOpt, this);
// sd is locked but not opened
switch (curState) {
case NORMAL:
break;
case NON_EXISTENT:
LOG.info("Block pool storage directory " + dataDir + " does not exist");
throw new IOException("Storage directory " + dataDir
+ " does not exist");
case NOT_FORMATTED: // format
LOG.info("Block pool storage directory " + dataDir
+ " is not formatted for " + nsInfo.getBlockPoolID());
LOG.info("Formatting ...");
format(sd, nsInfo);
break;
default: // recovery part is common
sd.doRecover(curState);
}
// 2. Do transitions
// Each storage directory is treated individually.
// During startup some of them can upgrade or roll back
// while others could be up-to-date for the regular startup.
doTransition(datanode, sd, nsInfo, startOpt);
if (getCTime() != nsInfo.getCTime()) {
throw new IOException(
"Data-node and name-node CTimes must be the same.");
}
// 3. Update successfully loaded storage.
setServiceLayoutVersion(getServiceLayoutVersion());
writeProperties(sd);
return sd;
} catch (IOException ioe) {
sd.unlock();
throw ioe;
}
}
/**
* Analyze and load storage directories. Recover from previous transitions if
* required.
*
* The block pool storages are either all analyzed or none of them is loaded.
* Therefore, a failure on loading any block pool storage results a faulty
* data volume.
*
* @param datanode Datanode to which this storage belongs to
* @param nsInfo namespace information
* @param dataDirs storage directories of block pool
* @param startOpt startup option
* @return an array of loaded block pool directories.
* @throws IOException on error
*/
List<StorageDirectory> loadBpStorageDirectories(
DataNode datanode, NamespaceInfo nsInfo,
Collection<File> dataDirs, StartupOption startOpt) throws IOException {
List<StorageDirectory> succeedDirs = Lists.newArrayList();
try {
for (File dataDir : dataDirs) {
if (containsStorageDir(dataDir)) {
throw new IOException(
"BlockPoolSliceStorage.recoverTransitionRead: " +
"attempt to load an used block storage: " + dataDir);
}
StorageDirectory sd =
loadStorageDirectory(datanode, nsInfo, dataDir, startOpt);
succeedDirs.add(sd);
}
} catch (IOException e) {
LOG.warn("Failed to analyze storage directories for block pool "
+ nsInfo.getBlockPoolID(), e);
throw e;
}
return succeedDirs;
}
/**
* Analyze storage directories. Recover from previous transitions if required.
*
* The block pool storages are either all analyzed or none of them is loaded.
* Therefore, a failure on loading any block pool storage results a faulty
* data volume.
*
* @param datanode Datanode to which this storage belongs to
* @param nsInfo namespace information
* @param dataDirs storage directories of block pool
* @param startOpt startup option
* @throws IOException on error
*/
void recoverTransitionRead(DataNode datanode, NamespaceInfo nsInfo,
Collection<File> dataDirs, StartupOption startOpt) throws IOException {
LOG.info("Analyzing storage directories for bpid " + nsInfo.getBlockPoolID());
for (StorageDirectory sd : loadBpStorageDirectories(
datanode, nsInfo, dataDirs, startOpt)) {
addStorageDir(sd);
}
}
/**
* Format a block pool slice storage.
* @param dnCurDir DataStorage current directory
* @param nsInfo the name space info
* @throws IOException Signals that an I/O exception has occurred.
*/
void format(File dnCurDir, NamespaceInfo nsInfo) throws IOException {
File curBpDir = getBpRoot(nsInfo.getBlockPoolID(), dnCurDir);
StorageDirectory bpSdir = new StorageDirectory(curBpDir);
format(bpSdir, nsInfo);
}
/**
* Format a block pool slice storage.
* @param bpSdir the block pool storage
* @param nsInfo the name space info
* @throws IOException Signals that an I/O exception has occurred.
*/
private void format(StorageDirectory bpSdir, NamespaceInfo nsInfo) throws IOException {
LOG.info("Formatting block pool " + blockpoolID + " directory "
+ bpSdir.getCurrentDir());
bpSdir.clearDirectory(); // create directory
this.layoutVersion = HdfsServerConstants.DATANODE_LAYOUT_VERSION;
this.cTime = nsInfo.getCTime();
this.namespaceID = nsInfo.getNamespaceID();
this.blockpoolID = nsInfo.getBlockPoolID();
writeProperties(bpSdir);
}
/**
* Remove block pool level storage directory.
* @param absPathToRemove the absolute path of the root for the block pool
* level storage to remove.
*/
void remove(File absPathToRemove) {
Preconditions.checkArgument(absPathToRemove.isAbsolute());
LOG.info("Removing block level storage: " + absPathToRemove);
for (Iterator<StorageDirectory> it = this.storageDirs.iterator();
it.hasNext(); ) {
StorageDirectory sd = it.next();
if (sd.getRoot().getAbsoluteFile().equals(absPathToRemove)) {
it.remove();
break;
}
}
}
/**
* Set layoutVersion, namespaceID and blockpoolID into block pool storage
* VERSION file
*/
@Override
protected void setPropertiesFromFields(Properties props, StorageDirectory sd)
throws IOException {
props.setProperty("layoutVersion", String.valueOf(layoutVersion));
props.setProperty("namespaceID", String.valueOf(namespaceID));
props.setProperty("blockpoolID", blockpoolID);
props.setProperty("cTime", String.valueOf(cTime));
}
/** Validate and set block pool ID */
private void setBlockPoolID(File storage, String bpid)
throws InconsistentFSStateException {
if (bpid == null || bpid.equals("")) {
throw new InconsistentFSStateException(storage, "file "
+ STORAGE_FILE_VERSION + " is invalid.");
}
if (!blockpoolID.equals("") && !blockpoolID.equals(bpid)) {
throw new InconsistentFSStateException(storage,
"Unexpected blockpoolID " + bpid + ". Expected " + blockpoolID);
}
blockpoolID = bpid;
}
@Override
protected void setFieldsFromProperties(Properties props, StorageDirectory sd)
throws IOException {
setLayoutVersion(props, sd);
setNamespaceID(props, sd);
setcTime(props, sd);
String sbpid = props.getProperty("blockpoolID");
setBlockPoolID(sd.getRoot(), sbpid);
}
/**
* Analyze whether a transition of the BP state is required and
* perform it if necessary.
* <br>
* Rollback if previousLV >= LAYOUT_VERSION && prevCTime <= namenode.cTime.
* Upgrade if this.LV > LAYOUT_VERSION || this.cTime < namenode.cTime Regular
* startup if this.LV = LAYOUT_VERSION && this.cTime = namenode.cTime
*
* @param sd storage directory <SD>/current/<bpid>
* @param nsInfo namespace info
* @param startOpt startup option
* @throws IOException
*/
private void doTransition(DataNode datanode, StorageDirectory sd,
NamespaceInfo nsInfo, StartupOption startOpt) throws IOException {
if (startOpt == StartupOption.ROLLBACK && sd.getPreviousDir().exists()) {
Preconditions.checkState(!getTrashRootDir(sd).exists(),
sd.getPreviousDir() + " and " + getTrashRootDir(sd) + " should not " +
" both be present.");
doRollback(sd, nsInfo); // rollback if applicable
} else if (startOpt == StartupOption.ROLLBACK &&
!sd.getPreviousDir().exists()) {
// Restore all the files in the trash. The restored files are retained
// during rolling upgrade rollback. They are deleted during rolling
// upgrade downgrade.
int restored = restoreBlockFilesFromTrash(getTrashRootDir(sd));
LOG.info("Restored " + restored + " block files from trash.");
}
readProperties(sd);
checkVersionUpgradable(this.layoutVersion);
assert this.layoutVersion >= HdfsServerConstants.DATANODE_LAYOUT_VERSION
: "Future version is not allowed";
if (getNamespaceID() != nsInfo.getNamespaceID()) {
throw new IOException("Incompatible namespaceIDs in "
+ sd.getRoot().getCanonicalPath() + ": namenode namespaceID = "
+ nsInfo.getNamespaceID() + "; datanode namespaceID = "
+ getNamespaceID());
}
if (!blockpoolID.equals(nsInfo.getBlockPoolID())) {
throw new IOException("Incompatible blockpoolIDs in "
+ sd.getRoot().getCanonicalPath() + ": namenode blockpoolID = "
+ nsInfo.getBlockPoolID() + "; datanode blockpoolID = "
+ blockpoolID);
}
if (this.layoutVersion == HdfsServerConstants.DATANODE_LAYOUT_VERSION
&& this.cTime == nsInfo.getCTime()) {
return; // regular startup
}
if (this.layoutVersion > HdfsServerConstants.DATANODE_LAYOUT_VERSION) {
int restored = restoreBlockFilesFromTrash(getTrashRootDir(sd));
LOG.info("Restored " + restored + " block files from trash " +
"before the layout upgrade. These blocks will be moved to " +
"the previous directory during the upgrade");
}
if (this.layoutVersion > HdfsServerConstants.DATANODE_LAYOUT_VERSION
|| this.cTime < nsInfo.getCTime()) {
doUpgrade(datanode, sd, nsInfo); // upgrade
return;
}
// layoutVersion == LAYOUT_VERSION && this.cTime > nsInfo.cTime
// must shutdown
throw new IOException("Datanode state: LV = " + this.getLayoutVersion()
+ " CTime = " + this.getCTime()
+ " is newer than the namespace state: LV = "
+ nsInfo.getLayoutVersion() + " CTime = " + nsInfo.getCTime());
}
/**
* Upgrade to any release after 0.22 (0.22 included) release e.g. 0.22 => 0.23
* Upgrade procedure is as follows:
* <ol>
* <li>If <SD>/current/<bpid>/previous exists then delete it</li>
* <li>Rename <SD>/current/<bpid>/current to
* <SD>/current/bpid/current/previous.tmp</li>
* <li>Create new <SD>current/<bpid>/current directory</li>
* <ol>
* <li>Hard links for block files are created from previous.tmp to current</li>
* <li>Save new version file in current directory</li>
* </ol>
* <li>Rename previous.tmp to previous</li> </ol>
*
* @param bpSd storage directory <SD>/current/<bpid>
* @param nsInfo Namespace Info from the namenode
* @throws IOException on error
*/
void doUpgrade(DataNode datanode, StorageDirectory bpSd, NamespaceInfo nsInfo)
throws IOException {
// Upgrading is applicable only to release with federation or after
if (!DataNodeLayoutVersion.supports(
LayoutVersion.Feature.FEDERATION, layoutVersion)) {
return;
}
LOG.info("Upgrading block pool storage directory " + bpSd.getRoot()
+ ".\n old LV = " + this.getLayoutVersion() + "; old CTime = "
+ this.getCTime() + ".\n new LV = " + HdfsServerConstants.DATANODE_LAYOUT_VERSION
+ "; new CTime = " + nsInfo.getCTime());
// get <SD>/previous directory
String dnRoot = getDataNodeStorageRoot(bpSd.getRoot().getCanonicalPath());
StorageDirectory dnSdStorage = new StorageDirectory(new File(dnRoot));
File dnPrevDir = dnSdStorage.getPreviousDir();
// If <SD>/previous directory exists delete it
if (dnPrevDir.exists()) {
deleteDir(dnPrevDir);
}
File bpCurDir = bpSd.getCurrentDir();
File bpPrevDir = bpSd.getPreviousDir();
assert bpCurDir.exists() : "BP level current directory must exist.";
cleanupDetachDir(new File(bpCurDir, DataStorage.STORAGE_DIR_DETACHED));
// 1. Delete <SD>/current/<bpid>/previous dir before upgrading
if (bpPrevDir.exists()) {
deleteDir(bpPrevDir);
}
File bpTmpDir = bpSd.getPreviousTmp();
assert !bpTmpDir.exists() : "previous.tmp directory must not exist.";
// 2. Rename <SD>/current/<bpid>/current to
// <SD>/current/<bpid>/previous.tmp
rename(bpCurDir, bpTmpDir);
// 3. Create new <SD>/current with block files hardlinks and VERSION
linkAllBlocks(datanode, bpTmpDir, bpCurDir);
this.layoutVersion = HdfsServerConstants.DATANODE_LAYOUT_VERSION;
assert this.namespaceID == nsInfo.getNamespaceID()
: "Data-node and name-node layout versions must be the same.";
this.cTime = nsInfo.getCTime();
writeProperties(bpSd);
// 4.rename <SD>/current/<bpid>/previous.tmp to
// <SD>/current/<bpid>/previous
rename(bpTmpDir, bpPrevDir);
LOG.info("Upgrade of block pool " + blockpoolID + " at " + bpSd.getRoot()
+ " is complete");
}
/**
* Cleanup the detachDir.
*
* If the directory is not empty report an error; Otherwise remove the
* directory.
*
* @param detachDir detach directory
* @throws IOException if the directory is not empty or it can not be removed
*/
private void cleanupDetachDir(File detachDir) throws IOException {
if (!DataNodeLayoutVersion.supports(
LayoutVersion.Feature.APPEND_RBW_DIR, layoutVersion)
&& detachDir.exists() && detachDir.isDirectory()) {
if (FileUtil.list(detachDir).length != 0) {
throw new IOException("Detached directory " + detachDir
+ " is not empty. Please manually move each file under this "
+ "directory to the finalized directory if the finalized "
+ "directory tree does not have the file.");
} else if (!detachDir.delete()) {
throw new IOException("Cannot remove directory " + detachDir);
}
}
}
/**
* Restore all files from the trash directory to their corresponding
* locations under current/
*/
private int restoreBlockFilesFromTrash(File trashRoot)
throws IOException {
int filesRestored = 0;
File[] children = trashRoot.exists() ? trashRoot.listFiles() : null;
if (children == null) {
return 0;
}
File restoreDirectory = null;
for (File child : children) {
if (child.isDirectory()) {
// Recurse to process subdirectories.
filesRestored += restoreBlockFilesFromTrash(child);
continue;
}
if (restoreDirectory == null) {
restoreDirectory = new File(getRestoreDirectory(child));
if (!restoreDirectory.exists() && !restoreDirectory.mkdirs()) {
throw new IOException("Failed to create directory " + restoreDirectory);
}
}
final File newChild = new File(restoreDirectory, child.getName());
if (newChild.exists() && newChild.length() >= child.length()) {
// Failsafe - we should not hit this case but let's make sure
// we never overwrite a newer version of a block file with an
// older version.
LOG.info("Not overwriting " + newChild + " with smaller file from " +
"trash directory. This message can be safely ignored.");
} else if (!child.renameTo(newChild)) {
throw new IOException("Failed to rename " + child + " to " + newChild);
} else {
++filesRestored;
}
}
FileUtil.fullyDelete(trashRoot);
return filesRestored;
}
/*
* Roll back to old snapshot at the block pool level
* If previous directory exists:
* <ol>
* <li>Rename <SD>/current/<bpid>/current to removed.tmp</li>
* <li>Rename * <SD>/current/<bpid>/previous to current</li>
* <li>Remove removed.tmp</li>
* </ol>
*
* Do nothing if previous directory does not exist.
* @param bpSd Block pool storage directory at <SD>/current/<bpid>
*/
void doRollback(StorageDirectory bpSd, NamespaceInfo nsInfo)
throws IOException {
File prevDir = bpSd.getPreviousDir();
// regular startup if previous dir does not exist
if (!prevDir.exists())
return;
// read attributes out of the VERSION file of previous directory
BlockPoolSliceStorage prevInfo = new BlockPoolSliceStorage();
prevInfo.readPreviousVersionProperties(bpSd);
// We allow rollback to a state, which is either consistent with
// the namespace state or can be further upgraded to it.
// In another word, we can only roll back when ( storedLV >= software LV)
// && ( DN.previousCTime <= NN.ctime)
if (!(prevInfo.getLayoutVersion() >= HdfsServerConstants.DATANODE_LAYOUT_VERSION &&
prevInfo.getCTime() <= nsInfo.getCTime())) { // cannot rollback
throw new InconsistentFSStateException(bpSd.getRoot(),
"Cannot rollback to a newer state.\nDatanode previous state: LV = "
+ prevInfo.getLayoutVersion() + " CTime = " + prevInfo.getCTime()
+ " is newer than the namespace state: LV = "
+ HdfsServerConstants.DATANODE_LAYOUT_VERSION + " CTime = " + nsInfo.getCTime());
}
LOG.info("Rolling back storage directory " + bpSd.getRoot()
+ ".\n target LV = " + nsInfo.getLayoutVersion()
+ "; target CTime = " + nsInfo.getCTime());
File tmpDir = bpSd.getRemovedTmp();
assert !tmpDir.exists() : "removed.tmp directory must not exist.";
// 1. rename current to tmp
File curDir = bpSd.getCurrentDir();
assert curDir.exists() : "Current directory must exist.";
rename(curDir, tmpDir);
// 2. rename previous to current
rename(prevDir, curDir);
// 3. delete removed.tmp dir
deleteDir(tmpDir);
LOG.info("Rollback of " + bpSd.getRoot() + " is complete");
}
/*
* Finalize the block pool storage by deleting <BP>/previous directory
* that holds the snapshot.
*/
void doFinalize(File dnCurDir) throws IOException {
File bpRoot = getBpRoot(blockpoolID, dnCurDir);
StorageDirectory bpSd = new StorageDirectory(bpRoot);
// block pool level previous directory
File prevDir = bpSd.getPreviousDir();
if (!prevDir.exists()) {
return; // already finalized
}
final String dataDirPath = bpSd.getRoot().getCanonicalPath();
LOG.info("Finalizing upgrade for storage directory " + dataDirPath
+ ".\n cur LV = " + this.getLayoutVersion() + "; cur CTime = "
+ this.getCTime());
assert bpSd.getCurrentDir().exists() : "Current directory must exist.";
// rename previous to finalized.tmp
final File tmpDir = bpSd.getFinalizedTmp();
rename(prevDir, tmpDir);
// delete finalized.tmp dir in a separate thread
new Daemon(new Runnable() {
@Override
public void run() {
try {
deleteDir(tmpDir);
} catch (IOException ex) {
LOG.error("Finalize upgrade for " + dataDirPath + " failed.", ex);
}
LOG.info("Finalize upgrade for " + dataDirPath + " is complete.");
}
@Override
public String toString() {
return "Finalize " + dataDirPath;
}
}).start();
}
/**
* Hardlink all finalized and RBW blocks in fromDir to toDir
*
* @param fromDir directory where the snapshot is stored
* @param toDir the current data directory
* @throws IOException if error occurs during hardlink
*/
private void linkAllBlocks(DataNode datanode, File fromDir, File toDir)
throws IOException {
// do the link
int diskLayoutVersion = this.getLayoutVersion();
// hardlink finalized blocks in tmpDir
HardLink hardLink = new HardLink();
DataStorage.linkBlocks(datanode, new File(fromDir, DataStorage.STORAGE_DIR_FINALIZED),
new File(toDir,DataStorage.STORAGE_DIR_FINALIZED), diskLayoutVersion, hardLink);
DataStorage.linkBlocks(datanode, new File(fromDir, DataStorage.STORAGE_DIR_RBW),
new File(toDir, DataStorage.STORAGE_DIR_RBW), diskLayoutVersion, hardLink);
LOG.info( hardLink.linkStats.report() );
}
/**
* gets the data node storage directory based on block pool storage
*/
private static String getDataNodeStorageRoot(String bpRoot) {
Matcher matcher = BLOCK_POOL_PATH_PATTERN.matcher(bpRoot);
if (matcher.matches()) {
// return the data node root directory
return matcher.group(1);
}
return bpRoot;
}
@Override
public String toString() {
return super.toString() + ";bpid=" + blockpoolID;
}
/**
* Get a block pool storage root based on data node storage root
* @param bpID block pool ID
* @param dnCurDir data node storage root directory
* @return root directory for block pool storage
*/
public static File getBpRoot(String bpID, File dnCurDir) {
return new File(dnCurDir, bpID);
}
@Override
public boolean isPreUpgradableLayout(StorageDirectory sd) throws IOException {
return false;
}
private File getTrashRootDir(StorageDirectory sd) {
return new File(sd.getRoot(), TRASH_ROOT_DIR);
}
/**
* Determine whether we can use trash for the given blockFile. Trash
* is disallowed if a 'previous' directory exists for the
* storage directory containing the block.
*/
@VisibleForTesting
public boolean isTrashAllowed(File blockFile) {
Matcher matcher = BLOCK_POOL_CURRENT_PATH_PATTERN.matcher(blockFile.getParent());
String previousDir = matcher.replaceFirst("$1$2" + STORAGE_DIR_PREVIOUS);
return !(new File(previousDir)).exists();
}
/**
* Get a target subdirectory under trash/ for a given block file that is being
* deleted.
*
* The subdirectory structure under trash/ mirrors that under current/ to keep
* implicit memory of where the files are to be restored (if necessary).
*
* @return the trash directory for a given block file that is being deleted.
*/
public String getTrashDirectory(File blockFile) {
if (isTrashAllowed(blockFile)) {
Matcher matcher = BLOCK_POOL_CURRENT_PATH_PATTERN.matcher(blockFile.getParent());
String trashDirectory = matcher.replaceFirst("$1$2" + TRASH_ROOT_DIR + "$4");
return trashDirectory;
}
return null;
}
/**
* Get a target subdirectory under current/ for a given block file that is being
* restored from trash.
*
* The subdirectory structure under trash/ mirrors that under current/ to keep
* implicit memory of where the files are to be restored.
*
* @return the target directory to restore a previously deleted block file.
*/
@VisibleForTesting
String getRestoreDirectory(File blockFile) {
Matcher matcher = BLOCK_POOL_TRASH_PATH_PATTERN.matcher(blockFile.getParent());
String restoreDirectory = matcher.replaceFirst("$1$2" + STORAGE_DIR_CURRENT + "$4");
LOG.info("Restoring " + blockFile + " to " + restoreDirectory);
return restoreDirectory;
}
/**
* Delete all files and directories in the trash directories.
*/
public void clearTrash() {
for (StorageDirectory sd : storageDirs) {
File trashRoot = getTrashRootDir(sd);
Preconditions.checkState(!(trashRoot.exists() && sd.getPreviousDir().exists()));
FileUtil.fullyDelete(trashRoot);
LOG.info("Cleared trash for storage directory " + sd);
}
}
/** trash is enabled if at least one storage directory contains trash root */
@VisibleForTesting
public boolean trashEnabled() {
for (StorageDirectory sd : storageDirs) {
if (getTrashRootDir(sd).exists()) {
return true;
}
}
return false;
}
/**
* Create a rolling upgrade marker file for each BP storage root, if it
* does not exist already.
*/
public void setRollingUpgradeMarkers(List<StorageDirectory> dnStorageDirs)
throws IOException {
for (StorageDirectory sd : dnStorageDirs) {
File bpRoot = getBpRoot(blockpoolID, sd.getCurrentDir());
File markerFile = new File(bpRoot, ROLLING_UPGRADE_MARKER_FILE);
if (!storagesWithRollingUpgradeMarker.contains(bpRoot.toString())) {
if (!markerFile.exists() && markerFile.createNewFile()) {
LOG.info("Created " + markerFile);
} else {
LOG.info(markerFile + " already exists.");
}
storagesWithRollingUpgradeMarker.add(bpRoot.toString());
storagesWithoutRollingUpgradeMarker.remove(bpRoot.toString());
}
}
}
/**
* Check whether the rolling upgrade marker file exists for each BP storage
* root. If it does exist, then the marker file is cleared and more
* importantly the layout upgrade is finalized.
*/
public void clearRollingUpgradeMarkers(List<StorageDirectory> dnStorageDirs)
throws IOException {
for (StorageDirectory sd : dnStorageDirs) {
File bpRoot = getBpRoot(blockpoolID, sd.getCurrentDir());
File markerFile = new File(bpRoot, ROLLING_UPGRADE_MARKER_FILE);
if (!storagesWithoutRollingUpgradeMarker.contains(bpRoot.toString())) {
if (markerFile.exists()) {
LOG.info("Deleting " + markerFile);
doFinalize(sd.getCurrentDir());
if (!markerFile.delete()) {
LOG.warn("Failed to delete " + markerFile);
}
}
storagesWithoutRollingUpgradeMarker.add(bpRoot.toString());
storagesWithRollingUpgradeMarker.remove(bpRoot.toString());
}
}
}
}
| 31,088 | 37.619876 | 95 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import java.io.BufferedInputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.FileDescriptor;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.SocketException;
import java.net.SocketTimeoutException;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.util.Arrays;
import org.apache.commons.logging.Log;
import org.apache.hadoop.fs.ChecksumException;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream;
import org.apache.hadoop.hdfs.util.DataTransferThrottler;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.ReadaheadPool.ReadaheadRequest;
import org.apache.hadoop.io.nativeio.NativeIO;
import org.apache.hadoop.net.SocketOutputStream;
import org.apache.hadoop.util.DataChecksum;
import org.apache.htrace.Sampler;
import org.apache.htrace.Trace;
import org.apache.htrace.TraceScope;
import static org.apache.hadoop.io.nativeio.NativeIO.POSIX.POSIX_FADV_DONTNEED;
import static org.apache.hadoop.io.nativeio.NativeIO.POSIX.POSIX_FADV_SEQUENTIAL;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
/**
* Reads a block from the disk and sends it to a recipient.
*
* Data sent from the BlockeSender in the following format:
* <br><b>Data format:</b> <pre>
* +--------------------------------------------------+
* | ChecksumHeader | Sequence of data PACKETS... |
* +--------------------------------------------------+
* </pre>
* <b>ChecksumHeader format:</b> <pre>
* +--------------------------------------------------+
* | 1 byte CHECKSUM_TYPE | 4 byte BYTES_PER_CHECKSUM |
* +--------------------------------------------------+
* </pre>
* An empty packet is sent to mark the end of block and read completion.
*
* PACKET Contains a packet header, checksum and data. Amount of data
* carried is set by BUFFER_SIZE.
* <pre>
* +-----------------------------------------------------+
* | Variable length header. See {@link PacketHeader} |
* +-----------------------------------------------------+
* | x byte checksum data. x is defined below |
* +-----------------------------------------------------+
* | actual data ...... |
* +-----------------------------------------------------+
*
* Data is made of Chunks. Each chunk is of length <= BYTES_PER_CHECKSUM.
* A checksum is calculated for each chunk.
*
* x = (length of data + BYTE_PER_CHECKSUM - 1)/BYTES_PER_CHECKSUM *
* CHECKSUM_SIZE
*
* CHECKSUM_SIZE depends on CHECKSUM_TYPE (usually, 4 for CRC32)
* </pre>
*
* The client reads data until it receives a packet with
* "LastPacketInBlock" set to true or with a zero length. If there is
* no checksum error, it replies to DataNode with OP_STATUS_CHECKSUM_OK.
*/
class BlockSender implements java.io.Closeable {
static final Log LOG = DataNode.LOG;
static final Log ClientTraceLog = DataNode.ClientTraceLog;
private static final boolean is32Bit =
System.getProperty("sun.arch.data.model").equals("32");
/**
* Minimum buffer used while sending data to clients. Used only if
* transferTo() is enabled. 64KB is not that large. It could be larger, but
* not sure if there will be much more improvement.
*/
private static final int MIN_BUFFER_WITH_TRANSFERTO = 64*1024;
private static final int IO_FILE_BUFFER_SIZE;
static {
HdfsConfiguration conf = new HdfsConfiguration();
IO_FILE_BUFFER_SIZE = DFSUtil.getIoFileBufferSize(conf);
}
private static final int TRANSFERTO_BUFFER_SIZE = Math.max(
IO_FILE_BUFFER_SIZE, MIN_BUFFER_WITH_TRANSFERTO);
/** the block to read from */
private final ExtendedBlock block;
/** Stream to read block data from */
private InputStream blockIn;
/** updated while using transferTo() */
private long blockInPosition = -1;
/** Stream to read checksum */
private DataInputStream checksumIn;
/** Checksum utility */
private final DataChecksum checksum;
/** Initial position to read */
private long initialOffset;
/** Current position of read */
private long offset;
/** Position of last byte to read from block file */
private final long endOffset;
/** Number of bytes in chunk used for computing checksum */
private final int chunkSize;
/** Number bytes of checksum computed for a chunk */
private final int checksumSize;
/** If true, failure to read checksum is ignored */
private final boolean corruptChecksumOk;
/** Sequence number of packet being sent */
private long seqno;
/** Set to true if transferTo is allowed for sending data to the client */
private final boolean transferToAllowed;
/** Set to true once entire requested byte range has been sent to the client */
private boolean sentEntireByteRange;
/** When true, verify checksum while reading from checksum file */
private final boolean verifyChecksum;
/** Format used to print client trace log messages */
private final String clientTraceFmt;
private volatile ChunkChecksum lastChunkChecksum = null;
private DataNode datanode;
/** The file descriptor of the block being sent */
private FileDescriptor blockInFd;
/** The reference to the volume where the block is located */
private FsVolumeReference volumeRef;
// Cache-management related fields
private final long readaheadLength;
private ReadaheadRequest curReadahead;
private final boolean alwaysReadahead;
private final boolean dropCacheBehindLargeReads;
private final boolean dropCacheBehindAllReads;
private long lastCacheDropOffset;
@VisibleForTesting
static long CACHE_DROP_INTERVAL_BYTES = 1024 * 1024; // 1MB
/**
* See {{@link BlockSender#isLongRead()}
*/
private static final long LONG_READ_THRESHOLD_BYTES = 256 * 1024;
/**
* Constructor
*
* @param block Block that is being read
* @param startOffset starting offset to read from
* @param length length of data to read
* @param corruptChecksumOk if true, corrupt checksum is okay
* @param verifyChecksum verify checksum while reading the data
* @param sendChecksum send checksum to client.
* @param datanode datanode from which the block is being read
* @param clientTraceFmt format string used to print client trace logs
* @throws IOException
*/
BlockSender(ExtendedBlock block, long startOffset, long length,
boolean corruptChecksumOk, boolean verifyChecksum,
boolean sendChecksum, DataNode datanode, String clientTraceFmt,
CachingStrategy cachingStrategy)
throws IOException {
try {
this.block = block;
this.corruptChecksumOk = corruptChecksumOk;
this.verifyChecksum = verifyChecksum;
this.clientTraceFmt = clientTraceFmt;
/*
* If the client asked for the cache to be dropped behind all reads,
* we honor that. Otherwise, we use the DataNode defaults.
* When using DataNode defaults, we use a heuristic where we only
* drop the cache for large reads.
*/
if (cachingStrategy.getDropBehind() == null) {
this.dropCacheBehindAllReads = false;
this.dropCacheBehindLargeReads =
datanode.getDnConf().dropCacheBehindReads;
} else {
this.dropCacheBehindAllReads =
this.dropCacheBehindLargeReads =
cachingStrategy.getDropBehind().booleanValue();
}
/*
* Similarly, if readahead was explicitly requested, we always do it.
* Otherwise, we read ahead based on the DataNode settings, and only
* when the reads are large.
*/
if (cachingStrategy.getReadahead() == null) {
this.alwaysReadahead = false;
this.readaheadLength = datanode.getDnConf().readaheadLength;
} else {
this.alwaysReadahead = true;
this.readaheadLength = cachingStrategy.getReadahead().longValue();
}
this.datanode = datanode;
if (verifyChecksum) {
// To simplify implementation, callers may not specify verification
// without sending.
Preconditions.checkArgument(sendChecksum,
"If verifying checksum, currently must also send it.");
}
final Replica replica;
final long replicaVisibleLength;
synchronized(datanode.data) {
replica = getReplica(block, datanode);
replicaVisibleLength = replica.getVisibleLength();
}
// if there is a write in progress
ChunkChecksum chunkChecksum = null;
if (replica instanceof ReplicaBeingWritten) {
final ReplicaBeingWritten rbw = (ReplicaBeingWritten)replica;
waitForMinLength(rbw, startOffset + length);
chunkChecksum = rbw.getLastChecksumAndDataLen();
}
if (replica.getGenerationStamp() < block.getGenerationStamp()) {
throw new IOException("Replica gen stamp < block genstamp, block="
+ block + ", replica=" + replica);
} else if (replica.getGenerationStamp() > block.getGenerationStamp()) {
if (DataNode.LOG.isDebugEnabled()) {
DataNode.LOG.debug("Bumping up the client provided"
+ " block's genstamp to latest " + replica.getGenerationStamp()
+ " for block " + block);
}
block.setGenerationStamp(replica.getGenerationStamp());
}
if (replicaVisibleLength < 0) {
throw new IOException("Replica is not readable, block="
+ block + ", replica=" + replica);
}
if (DataNode.LOG.isDebugEnabled()) {
DataNode.LOG.debug("block=" + block + ", replica=" + replica);
}
// transferToFully() fails on 32 bit platforms for block sizes >= 2GB,
// use normal transfer in those cases
this.transferToAllowed = datanode.getDnConf().transferToAllowed &&
(!is32Bit || length <= Integer.MAX_VALUE);
// Obtain a reference before reading data
this.volumeRef = datanode.data.getVolume(block).obtainReference();
/*
* (corruptChecksumOK, meta_file_exist): operation
* True, True: will verify checksum
* True, False: No verify, e.g., need to read data from a corrupted file
* False, True: will verify checksum
* False, False: throws IOException file not found
*/
DataChecksum csum = null;
if (verifyChecksum || sendChecksum) {
LengthInputStream metaIn = null;
boolean keepMetaInOpen = false;
try {
metaIn = datanode.data.getMetaDataInputStream(block);
if (!corruptChecksumOk || metaIn != null) {
if (metaIn == null) {
//need checksum but meta-data not found
throw new FileNotFoundException("Meta-data not found for " +
block);
}
// The meta file will contain only the header if the NULL checksum
// type was used, or if the replica was written to transient storage.
// Checksum verification is not performed for replicas on transient
// storage. The header is important for determining the checksum
// type later when lazy persistence copies the block to non-transient
// storage and computes the checksum.
if (metaIn.getLength() > BlockMetadataHeader.getHeaderSize()) {
checksumIn = new DataInputStream(new BufferedInputStream(
metaIn, IO_FILE_BUFFER_SIZE));
csum = BlockMetadataHeader.readDataChecksum(checksumIn, block);
keepMetaInOpen = true;
}
} else {
LOG.warn("Could not find metadata file for " + block);
}
} finally {
if (!keepMetaInOpen) {
IOUtils.closeStream(metaIn);
}
}
}
if (csum == null) {
// The number of bytes per checksum here determines the alignment
// of reads: we always start reading at a checksum chunk boundary,
// even if the checksum type is NULL. So, choosing too big of a value
// would risk sending too much unnecessary data. 512 (1 disk sector)
// is likely to result in minimal extra IO.
csum = DataChecksum.newDataChecksum(DataChecksum.Type.NULL, 512);
}
/*
* If chunkSize is very large, then the metadata file is mostly
* corrupted. For now just truncate bytesPerchecksum to blockLength.
*/
int size = csum.getBytesPerChecksum();
if (size > 10*1024*1024 && size > replicaVisibleLength) {
csum = DataChecksum.newDataChecksum(csum.getChecksumType(),
Math.max((int)replicaVisibleLength, 10*1024*1024));
size = csum.getBytesPerChecksum();
}
chunkSize = size;
checksum = csum;
checksumSize = checksum.getChecksumSize();
length = length < 0 ? replicaVisibleLength : length;
// end is either last byte on disk or the length for which we have a
// checksum
long end = chunkChecksum != null ? chunkChecksum.getDataLength()
: replica.getBytesOnDisk();
if (startOffset < 0 || startOffset > end
|| (length + startOffset) > end) {
String msg = " Offset " + startOffset + " and length " + length
+ " don't match block " + block + " ( blockLen " + end + " )";
LOG.warn(datanode.getDNRegistrationForBP(block.getBlockPoolId()) +
":sendBlock() : " + msg);
throw new IOException(msg);
}
// Ensure read offset is position at the beginning of chunk
offset = startOffset - (startOffset % chunkSize);
if (length >= 0) {
// Ensure endOffset points to end of chunk.
long tmpLen = startOffset + length;
if (tmpLen % chunkSize != 0) {
tmpLen += (chunkSize - tmpLen % chunkSize);
}
if (tmpLen < end) {
// will use on-disk checksum here since the end is a stable chunk
end = tmpLen;
} else if (chunkChecksum != null) {
// last chunk is changing. flag that we need to use in-memory checksum
this.lastChunkChecksum = chunkChecksum;
}
}
endOffset = end;
// seek to the right offsets
if (offset > 0 && checksumIn != null) {
long checksumSkip = (offset / chunkSize) * checksumSize;
// note blockInStream is seeked when created below
if (checksumSkip > 0) {
// Should we use seek() for checksum file as well?
IOUtils.skipFully(checksumIn, checksumSkip);
}
}
seqno = 0;
if (DataNode.LOG.isDebugEnabled()) {
DataNode.LOG.debug("replica=" + replica);
}
blockIn = datanode.data.getBlockInputStream(block, offset); // seek to offset
if (blockIn instanceof FileInputStream) {
blockInFd = ((FileInputStream)blockIn).getFD();
} else {
blockInFd = null;
}
} catch (IOException ioe) {
IOUtils.closeStream(this);
IOUtils.closeStream(blockIn);
throw ioe;
}
}
/**
* close opened files.
*/
@Override
public void close() throws IOException {
if (blockInFd != null &&
((dropCacheBehindAllReads) ||
(dropCacheBehindLargeReads && isLongRead()))) {
try {
NativeIO.POSIX.getCacheManipulator().posixFadviseIfPossible(
block.getBlockName(), blockInFd, lastCacheDropOffset,
offset - lastCacheDropOffset, POSIX_FADV_DONTNEED);
} catch (Exception e) {
LOG.warn("Unable to drop cache on file close", e);
}
}
if (curReadahead != null) {
curReadahead.cancel();
}
IOException ioe = null;
if(checksumIn!=null) {
try {
checksumIn.close(); // close checksum file
} catch (IOException e) {
ioe = e;
}
checksumIn = null;
}
if(blockIn!=null) {
try {
blockIn.close(); // close data file
} catch (IOException e) {
ioe = e;
}
blockIn = null;
blockInFd = null;
}
if (volumeRef != null) {
IOUtils.cleanup(null, volumeRef);
volumeRef = null;
}
// throw IOException if there is any
if(ioe!= null) {
throw ioe;
}
}
private static Replica getReplica(ExtendedBlock block, DataNode datanode)
throws ReplicaNotFoundException {
Replica replica = datanode.data.getReplica(block.getBlockPoolId(),
block.getBlockId());
if (replica == null) {
throw new ReplicaNotFoundException(block);
}
return replica;
}
/**
* Wait for rbw replica to reach the length
* @param rbw replica that is being written to
* @param len minimum length to reach
* @throws IOException on failing to reach the len in given wait time
*/
private static void waitForMinLength(ReplicaBeingWritten rbw, long len)
throws IOException {
// Wait for 3 seconds for rbw replica to reach the minimum length
for (int i = 0; i < 30 && rbw.getBytesOnDisk() < len; i++) {
try {
Thread.sleep(100);
} catch (InterruptedException ie) {
throw new IOException(ie);
}
}
long bytesOnDisk = rbw.getBytesOnDisk();
if (bytesOnDisk < len) {
throw new IOException(
String.format("Need %d bytes, but only %d bytes available", len,
bytesOnDisk));
}
}
/**
* Converts an IOExcpetion (not subclasses) to SocketException.
* This is typically done to indicate to upper layers that the error
* was a socket error rather than often more serious exceptions like
* disk errors.
*/
private static IOException ioeToSocketException(IOException ioe) {
if (ioe.getClass().equals(IOException.class)) {
// "se" could be a new class in stead of SocketException.
IOException se = new SocketException("Original Exception : " + ioe);
se.initCause(ioe);
/* Change the stacktrace so that original trace is not truncated
* when printed.*/
se.setStackTrace(ioe.getStackTrace());
return se;
}
// otherwise just return the same exception.
return ioe;
}
/**
* @param datalen Length of data
* @return number of chunks for data of given size
*/
private int numberOfChunks(long datalen) {
return (int) ((datalen + chunkSize - 1)/chunkSize);
}
/**
* Sends a packet with up to maxChunks chunks of data.
*
* @param pkt buffer used for writing packet data
* @param maxChunks maximum number of chunks to send
* @param out stream to send data to
* @param transferTo use transferTo to send data
* @param throttler used for throttling data transfer bandwidth
*/
private int sendPacket(ByteBuffer pkt, int maxChunks, OutputStream out,
boolean transferTo, DataTransferThrottler throttler) throws IOException {
int dataLen = (int) Math.min(endOffset - offset,
(chunkSize * (long) maxChunks));
int numChunks = numberOfChunks(dataLen); // Number of chunks be sent in the packet
int checksumDataLen = numChunks * checksumSize;
int packetLen = dataLen + checksumDataLen + 4;
boolean lastDataPacket = offset + dataLen == endOffset && dataLen > 0;
// The packet buffer is organized as follows:
// _______HHHHCCCCD?D?D?D?
// ^ ^
// | \ checksumOff
// \ headerOff
// _ padding, since the header is variable-length
// H = header and length prefixes
// C = checksums
// D? = data, if transferTo is false.
int headerLen = writePacketHeader(pkt, dataLen, packetLen);
// Per above, the header doesn't start at the beginning of the
// buffer
int headerOff = pkt.position() - headerLen;
int checksumOff = pkt.position();
byte[] buf = pkt.array();
if (checksumSize > 0 && checksumIn != null) {
readChecksum(buf, checksumOff, checksumDataLen);
// write in progress that we need to use to get last checksum
if (lastDataPacket && lastChunkChecksum != null) {
int start = checksumOff + checksumDataLen - checksumSize;
byte[] updatedChecksum = lastChunkChecksum.getChecksum();
if (updatedChecksum != null) {
System.arraycopy(updatedChecksum, 0, buf, start, checksumSize);
}
}
}
int dataOff = checksumOff + checksumDataLen;
if (!transferTo) { // normal transfer
IOUtils.readFully(blockIn, buf, dataOff, dataLen);
if (verifyChecksum) {
verifyChecksum(buf, dataOff, dataLen, numChunks, checksumOff);
}
}
try {
if (transferTo) {
SocketOutputStream sockOut = (SocketOutputStream)out;
// First write header and checksums
sockOut.write(buf, headerOff, dataOff - headerOff);
// no need to flush since we know out is not a buffered stream
FileChannel fileCh = ((FileInputStream)blockIn).getChannel();
LongWritable waitTime = new LongWritable();
LongWritable transferTime = new LongWritable();
sockOut.transferToFully(fileCh, blockInPosition, dataLen,
waitTime, transferTime);
datanode.metrics.addSendDataPacketBlockedOnNetworkNanos(waitTime.get());
datanode.metrics.addSendDataPacketTransferNanos(transferTime.get());
blockInPosition += dataLen;
} else {
// normal transfer
out.write(buf, headerOff, dataOff + dataLen - headerOff);
}
} catch (IOException e) {
if (e instanceof SocketTimeoutException) {
/*
* writing to client timed out. This happens if the client reads
* part of a block and then decides not to read the rest (but leaves
* the socket open).
*
* Reporting of this case is done in DataXceiver#run
*/
} else {
/* Exception while writing to the client. Connection closure from
* the other end is mostly the case and we do not care much about
* it. But other things can go wrong, especially in transferTo(),
* which we do not want to ignore.
*
* The message parsing below should not be considered as a good
* coding example. NEVER do it to drive a program logic. NEVER.
* It was done here because the NIO throws an IOException for EPIPE.
*/
String ioem = e.getMessage();
if (!ioem.startsWith("Broken pipe") && !ioem.startsWith("Connection reset")) {
LOG.error("BlockSender.sendChunks() exception: ", e);
}
datanode.getBlockScanner().markSuspectBlock(
volumeRef.getVolume().getStorageID(),
block);
}
throw ioeToSocketException(e);
}
if (throttler != null) { // rebalancing so throttle
throttler.throttle(packetLen);
}
return dataLen;
}
/**
* Read checksum into given buffer
* @param buf buffer to read the checksum into
* @param checksumOffset offset at which to write the checksum into buf
* @param checksumLen length of checksum to write
* @throws IOException on error
*/
private void readChecksum(byte[] buf, final int checksumOffset,
final int checksumLen) throws IOException {
if (checksumSize <= 0 && checksumIn == null) {
return;
}
try {
checksumIn.readFully(buf, checksumOffset, checksumLen);
} catch (IOException e) {
LOG.warn(" Could not read or failed to veirfy checksum for data"
+ " at offset " + offset + " for block " + block, e);
IOUtils.closeStream(checksumIn);
checksumIn = null;
if (corruptChecksumOk) {
if (checksumOffset < checksumLen) {
// Just fill the array with zeros.
Arrays.fill(buf, checksumOffset, checksumLen, (byte) 0);
}
} else {
throw e;
}
}
}
/**
* Compute checksum for chunks and verify the checksum that is read from
* the metadata file is correct.
*
* @param buf buffer that has checksum and data
* @param dataOffset position where data is written in the buf
* @param datalen length of data
* @param numChunks number of chunks corresponding to data
* @param checksumOffset offset where checksum is written in the buf
* @throws ChecksumException on failed checksum verification
*/
public void verifyChecksum(final byte[] buf, final int dataOffset,
final int datalen, final int numChunks, final int checksumOffset)
throws ChecksumException {
int dOff = dataOffset;
int cOff = checksumOffset;
int dLeft = datalen;
for (int i = 0; i < numChunks; i++) {
checksum.reset();
int dLen = Math.min(dLeft, chunkSize);
checksum.update(buf, dOff, dLen);
if (!checksum.compare(buf, cOff)) {
long failedPos = offset + datalen - dLeft;
throw new ChecksumException("Checksum failed at " + failedPos,
failedPos);
}
dLeft -= dLen;
dOff += dLen;
cOff += checksumSize;
}
}
/**
* sendBlock() is used to read block and its metadata and stream the data to
* either a client or to another datanode.
*
* @param out stream to which the block is written to
* @param baseStream optional. if non-null, <code>out</code> is assumed to
* be a wrapper over this stream. This enables optimizations for
* sending the data, e.g.
* {@link SocketOutputStream#transferToFully(FileChannel,
* long, int)}.
* @param throttler for sending data.
* @return total bytes read, including checksum data.
*/
long sendBlock(DataOutputStream out, OutputStream baseStream,
DataTransferThrottler throttler) throws IOException {
TraceScope scope =
Trace.startSpan("sendBlock_" + block.getBlockId(), Sampler.NEVER);
try {
return doSendBlock(out, baseStream, throttler);
} finally {
scope.close();
}
}
private long doSendBlock(DataOutputStream out, OutputStream baseStream,
DataTransferThrottler throttler) throws IOException {
if (out == null) {
throw new IOException( "out stream is null" );
}
initialOffset = offset;
long totalRead = 0;
OutputStream streamForSendChunks = out;
lastCacheDropOffset = initialOffset;
if (isLongRead() && blockInFd != null) {
// Advise that this file descriptor will be accessed sequentially.
NativeIO.POSIX.getCacheManipulator().posixFadviseIfPossible(
block.getBlockName(), blockInFd, 0, 0, POSIX_FADV_SEQUENTIAL);
}
// Trigger readahead of beginning of file if configured.
manageOsCache();
final long startTime = ClientTraceLog.isDebugEnabled() ? System.nanoTime() : 0;
try {
int maxChunksPerPacket;
int pktBufSize = PacketHeader.PKT_MAX_HEADER_LEN;
boolean transferTo = transferToAllowed && !verifyChecksum
&& baseStream instanceof SocketOutputStream
&& blockIn instanceof FileInputStream;
if (transferTo) {
FileChannel fileChannel = ((FileInputStream)blockIn).getChannel();
blockInPosition = fileChannel.position();
streamForSendChunks = baseStream;
maxChunksPerPacket = numberOfChunks(TRANSFERTO_BUFFER_SIZE);
// Smaller packet size to only hold checksum when doing transferTo
pktBufSize += checksumSize * maxChunksPerPacket;
} else {
maxChunksPerPacket = Math.max(1,
numberOfChunks(IO_FILE_BUFFER_SIZE));
// Packet size includes both checksum and data
pktBufSize += (chunkSize + checksumSize) * maxChunksPerPacket;
}
ByteBuffer pktBuf = ByteBuffer.allocate(pktBufSize);
while (endOffset > offset && !Thread.currentThread().isInterrupted()) {
manageOsCache();
long len = sendPacket(pktBuf, maxChunksPerPacket, streamForSendChunks,
transferTo, throttler);
offset += len;
totalRead += len + (numberOfChunks(len) * checksumSize);
seqno++;
}
// If this thread was interrupted, then it did not send the full block.
if (!Thread.currentThread().isInterrupted()) {
try {
// send an empty packet to mark the end of the block
sendPacket(pktBuf, maxChunksPerPacket, streamForSendChunks, transferTo,
throttler);
out.flush();
} catch (IOException e) { //socket error
throw ioeToSocketException(e);
}
sentEntireByteRange = true;
}
} finally {
if ((clientTraceFmt != null) && ClientTraceLog.isDebugEnabled()) {
final long endTime = System.nanoTime();
ClientTraceLog.debug(String.format(clientTraceFmt, totalRead,
initialOffset, endTime - startTime));
}
close();
}
return totalRead;
}
/**
* Manage the OS buffer cache by performing read-ahead
* and drop-behind.
*/
private void manageOsCache() throws IOException {
// We can't manage the cache for this block if we don't have a file
// descriptor to work with.
if (blockInFd == null) return;
// Perform readahead if necessary
if ((readaheadLength > 0) && (datanode.readaheadPool != null) &&
(alwaysReadahead || isLongRead())) {
curReadahead = datanode.readaheadPool.readaheadStream(
clientTraceFmt, blockInFd, offset, readaheadLength, Long.MAX_VALUE,
curReadahead);
}
// Drop what we've just read from cache, since we aren't
// likely to need it again
if (dropCacheBehindAllReads ||
(dropCacheBehindLargeReads && isLongRead())) {
long nextCacheDropOffset = lastCacheDropOffset + CACHE_DROP_INTERVAL_BYTES;
if (offset >= nextCacheDropOffset) {
long dropLength = offset - lastCacheDropOffset;
NativeIO.POSIX.getCacheManipulator().posixFadviseIfPossible(
block.getBlockName(), blockInFd, lastCacheDropOffset,
dropLength, POSIX_FADV_DONTNEED);
lastCacheDropOffset = offset;
}
}
}
/**
* Returns true if we have done a long enough read for this block to qualify
* for the DataNode-wide cache management defaults. We avoid applying the
* cache management defaults to smaller reads because the overhead would be
* too high.
*
* Note that if the client explicitly asked for dropBehind, we will do it
* even on short reads.
*
* This is also used to determine when to invoke
* posix_fadvise(POSIX_FADV_SEQUENTIAL).
*/
private boolean isLongRead() {
return (endOffset - initialOffset) > LONG_READ_THRESHOLD_BYTES;
}
/**
* Write packet header into {@code pkt},
* return the length of the header written.
*/
private int writePacketHeader(ByteBuffer pkt, int dataLen, int packetLen) {
pkt.clear();
// both syncBlock and syncPacket are false
PacketHeader header = new PacketHeader(packetLen, offset, seqno,
(dataLen == 0), dataLen, false);
int size = header.getSerializedSize();
pkt.position(PacketHeader.PKT_MAX_HEADER_LEN - size);
header.putInBuffer(pkt);
return size;
}
boolean didSendEntireByteRange() {
return sentEntireByteRange;
}
/**
* @return the checksum type that will be used with this block transfer.
*/
DataChecksum getChecksum() {
return checksum;
}
/**
* @return the offset into the block file where the sender is currently
* reading.
*/
long getOffset() {
return offset;
}
}
| 32,825 | 36.34471 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import static org.apache.hadoop.hdfs.server.datanode.DataNode.DN_CLIENTTRACE_FORMAT;
import java.io.BufferedOutputStream;
import java.io.Closeable;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.FileDescriptor;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.Writer;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.LinkedList;
import java.util.zip.Checksum;
import org.apache.commons.logging.Log;
import org.apache.hadoop.fs.ChecksumException;
import org.apache.hadoop.fs.FSOutputSummer;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader;
import org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver;
import org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaInputStreams;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.util.DataTransferThrottler;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.nativeio.NativeIO;
import org.apache.hadoop.util.Daemon;
import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
import static org.apache.hadoop.io.nativeio.NativeIO.POSIX.POSIX_FADV_DONTNEED;
import static org.apache.hadoop.io.nativeio.NativeIO.POSIX.SYNC_FILE_RANGE_WRITE;
import com.google.common.annotations.VisibleForTesting;
/** A class that receives a block and writes to its own disk, meanwhile
* may copies it to another site. If a throttler is provided,
* streaming throttling is also supported.
**/
class BlockReceiver implements Closeable {
public static final Log LOG = DataNode.LOG;
static final Log ClientTraceLog = DataNode.ClientTraceLog;
@VisibleForTesting
static long CACHE_DROP_LAG_BYTES = 8 * 1024 * 1024;
private final long datanodeSlowLogThresholdMs;
private DataInputStream in = null; // from where data are read
private DataChecksum clientChecksum; // checksum used by client
private DataChecksum diskChecksum; // checksum we write to disk
/**
* In the case that the client is writing with a different
* checksum polynomial than the block is stored with on disk,
* the DataNode needs to recalculate checksums before writing.
*/
private final boolean needsChecksumTranslation;
private OutputStream out = null; // to block file at local disk
private FileDescriptor outFd;
private DataOutputStream checksumOut = null; // to crc file at local disk
private final int bytesPerChecksum;
private final int checksumSize;
private final PacketReceiver packetReceiver = new PacketReceiver(false);
protected final String inAddr;
protected final String myAddr;
private String mirrorAddr;
private DataOutputStream mirrorOut;
private Daemon responder = null;
private DataTransferThrottler throttler;
private ReplicaOutputStreams streams;
private DatanodeInfo srcDataNode = null;
private final DataNode datanode;
volatile private boolean mirrorError;
// Cache management state
private boolean dropCacheBehindWrites;
private long lastCacheManagementOffset = 0;
private boolean syncBehindWrites;
private boolean syncBehindWritesInBackground;
/** The client name. It is empty if a datanode is the client */
private final String clientname;
private final boolean isClient;
private final boolean isDatanode;
/** the block to receive */
private final ExtendedBlock block;
/** the replica to write */
private final ReplicaInPipelineInterface replicaInfo;
/** pipeline stage */
private final BlockConstructionStage stage;
private final boolean isTransfer;
private boolean syncOnClose;
private long restartBudget;
/** the reference of the volume where the block receiver writes to */
private ReplicaHandler replicaHandler;
/**
* for replaceBlock response
*/
private final long responseInterval;
private long lastResponseTime = 0;
private boolean isReplaceBlock = false;
private DataOutputStream replyOut = null;
private boolean pinning;
BlockReceiver(final ExtendedBlock block, final StorageType storageType,
final DataInputStream in,
final String inAddr, final String myAddr,
final BlockConstructionStage stage,
final long newGs, final long minBytesRcvd, final long maxBytesRcvd,
final String clientname, final DatanodeInfo srcDataNode,
final DataNode datanode, DataChecksum requestedChecksum,
CachingStrategy cachingStrategy,
final boolean allowLazyPersist,
final boolean pinning) throws IOException {
try{
this.block = block;
this.in = in;
this.inAddr = inAddr;
this.myAddr = myAddr;
this.srcDataNode = srcDataNode;
this.datanode = datanode;
this.clientname = clientname;
this.isDatanode = clientname.length() == 0;
this.isClient = !this.isDatanode;
this.restartBudget = datanode.getDnConf().restartReplicaExpiry;
this.datanodeSlowLogThresholdMs = datanode.getDnConf().datanodeSlowIoWarningThresholdMs;
// For replaceBlock() calls response should be sent to avoid socketTimeout
// at clients. So sending with the interval of 0.5 * socketTimeout
this.responseInterval = (long) (datanode.getDnConf().socketTimeout * 0.5);
//for datanode, we have
//1: clientName.length() == 0, and
//2: stage == null or PIPELINE_SETUP_CREATE
this.stage = stage;
this.isTransfer = stage == BlockConstructionStage.TRANSFER_RBW
|| stage == BlockConstructionStage.TRANSFER_FINALIZED;
this.pinning = pinning;
if (LOG.isDebugEnabled()) {
LOG.debug(getClass().getSimpleName() + ": " + block
+ "\n isClient =" + isClient + ", clientname=" + clientname
+ "\n isDatanode=" + isDatanode + ", srcDataNode=" + srcDataNode
+ "\n inAddr=" + inAddr + ", myAddr=" + myAddr
+ "\n cachingStrategy = " + cachingStrategy
+ "\n pinning=" + pinning
);
}
//
// Open local disk out
//
if (isDatanode) { //replication or move
replicaHandler = datanode.data.createTemporary(storageType, block);
} else {
switch (stage) {
case PIPELINE_SETUP_CREATE:
replicaHandler = datanode.data.createRbw(storageType, block, allowLazyPersist);
datanode.notifyNamenodeReceivingBlock(
block, replicaHandler.getReplica().getStorageUuid());
break;
case PIPELINE_SETUP_STREAMING_RECOVERY:
replicaHandler = datanode.data.recoverRbw(
block, newGs, minBytesRcvd, maxBytesRcvd);
block.setGenerationStamp(newGs);
break;
case PIPELINE_SETUP_APPEND:
replicaHandler = datanode.data.append(block, newGs, minBytesRcvd);
block.setGenerationStamp(newGs);
datanode.notifyNamenodeReceivingBlock(
block, replicaHandler.getReplica().getStorageUuid());
break;
case PIPELINE_SETUP_APPEND_RECOVERY:
replicaHandler = datanode.data.recoverAppend(block, newGs, minBytesRcvd);
block.setGenerationStamp(newGs);
datanode.notifyNamenodeReceivingBlock(
block, replicaHandler.getReplica().getStorageUuid());
break;
case TRANSFER_RBW:
case TRANSFER_FINALIZED:
// this is a transfer destination
replicaHandler =
datanode.data.createTemporary(storageType, block);
break;
default: throw new IOException("Unsupported stage " + stage +
" while receiving block " + block + " from " + inAddr);
}
}
replicaInfo = replicaHandler.getReplica();
this.dropCacheBehindWrites = (cachingStrategy.getDropBehind() == null) ?
datanode.getDnConf().dropCacheBehindWrites :
cachingStrategy.getDropBehind();
this.syncBehindWrites = datanode.getDnConf().syncBehindWrites;
this.syncBehindWritesInBackground = datanode.getDnConf().
syncBehindWritesInBackground;
final boolean isCreate = isDatanode || isTransfer
|| stage == BlockConstructionStage.PIPELINE_SETUP_CREATE;
streams = replicaInfo.createStreams(isCreate, requestedChecksum);
assert streams != null : "null streams!";
// read checksum meta information
this.clientChecksum = requestedChecksum;
this.diskChecksum = streams.getChecksum();
this.needsChecksumTranslation = !clientChecksum.equals(diskChecksum);
this.bytesPerChecksum = diskChecksum.getBytesPerChecksum();
this.checksumSize = diskChecksum.getChecksumSize();
this.out = streams.getDataOut();
if (out instanceof FileOutputStream) {
this.outFd = ((FileOutputStream)out).getFD();
} else {
LOG.warn("Could not get file descriptor for outputstream of class " +
out.getClass());
}
this.checksumOut = new DataOutputStream(new BufferedOutputStream(
streams.getChecksumOut(), DFSUtil.getSmallBufferSize(
datanode.getConf())));
// write data chunk header if creating a new replica
if (isCreate) {
BlockMetadataHeader.writeHeader(checksumOut, diskChecksum);
}
} catch (ReplicaAlreadyExistsException bae) {
throw bae;
} catch (ReplicaNotFoundException bne) {
throw bne;
} catch(IOException ioe) {
IOUtils.closeStream(this);
cleanupBlock();
// check if there is a disk error
IOException cause = DatanodeUtil.getCauseIfDiskError(ioe);
DataNode.LOG.warn("IOException in BlockReceiver constructor"
+ (cause == null ? "" : ". Cause is "), cause);
if (cause != null) { // possible disk error
ioe = cause;
datanode.checkDiskErrorAsync();
}
throw ioe;
}
}
/** Return the datanode object. */
DataNode getDataNode() {return datanode;}
String getStorageUuid() {
return replicaInfo.getStorageUuid();
}
/**
* close files and release volume reference.
*/
@Override
public void close() throws IOException {
packetReceiver.close();
IOException ioe = null;
if (syncOnClose && (out != null || checksumOut != null)) {
datanode.metrics.incrFsyncCount();
}
long flushTotalNanos = 0;
boolean measuredFlushTime = false;
// close checksum file
try {
if (checksumOut != null) {
long flushStartNanos = System.nanoTime();
checksumOut.flush();
long flushEndNanos = System.nanoTime();
if (syncOnClose) {
long fsyncStartNanos = flushEndNanos;
streams.syncChecksumOut();
datanode.metrics.addFsyncNanos(System.nanoTime() - fsyncStartNanos);
}
flushTotalNanos += flushEndNanos - flushStartNanos;
measuredFlushTime = true;
checksumOut.close();
checksumOut = null;
}
} catch(IOException e) {
ioe = e;
}
finally {
IOUtils.closeStream(checksumOut);
}
// close block file
try {
if (out != null) {
long flushStartNanos = System.nanoTime();
out.flush();
long flushEndNanos = System.nanoTime();
if (syncOnClose) {
long fsyncStartNanos = flushEndNanos;
streams.syncDataOut();
datanode.metrics.addFsyncNanos(System.nanoTime() - fsyncStartNanos);
}
flushTotalNanos += flushEndNanos - flushStartNanos;
measuredFlushTime = true;
out.close();
out = null;
}
} catch (IOException e) {
ioe = e;
}
finally{
IOUtils.closeStream(out);
}
if (replicaHandler != null) {
IOUtils.cleanup(null, replicaHandler);
replicaHandler = null;
}
if (measuredFlushTime) {
datanode.metrics.addFlushNanos(flushTotalNanos);
}
// disk check
if(ioe != null) {
datanode.checkDiskErrorAsync();
throw ioe;
}
}
/**
* Flush block data and metadata files to disk.
* @throws IOException
*/
void flushOrSync(boolean isSync) throws IOException {
long flushTotalNanos = 0;
long begin = Time.monotonicNow();
if (checksumOut != null) {
long flushStartNanos = System.nanoTime();
checksumOut.flush();
long flushEndNanos = System.nanoTime();
if (isSync) {
long fsyncStartNanos = flushEndNanos;
streams.syncChecksumOut();
datanode.metrics.addFsyncNanos(System.nanoTime() - fsyncStartNanos);
}
flushTotalNanos += flushEndNanos - flushStartNanos;
}
if (out != null) {
long flushStartNanos = System.nanoTime();
out.flush();
long flushEndNanos = System.nanoTime();
if (isSync) {
long fsyncStartNanos = flushEndNanos;
streams.syncDataOut();
datanode.metrics.addFsyncNanos(System.nanoTime() - fsyncStartNanos);
}
flushTotalNanos += flushEndNanos - flushStartNanos;
}
if (checksumOut != null || out != null) {
datanode.metrics.addFlushNanos(flushTotalNanos);
if (isSync) {
datanode.metrics.incrFsyncCount();
}
}
long duration = Time.monotonicNow() - begin;
if (duration > datanodeSlowLogThresholdMs) {
LOG.warn("Slow flushOrSync took " + duration + "ms (threshold="
+ datanodeSlowLogThresholdMs + "ms), isSync:" + isSync + ", flushTotalNanos="
+ flushTotalNanos + "ns");
}
}
/**
* While writing to mirrorOut, failure to write to mirror should not
* affect this datanode unless it is caused by interruption.
*/
private void handleMirrorOutError(IOException ioe) throws IOException {
String bpid = block.getBlockPoolId();
LOG.info(datanode.getDNRegistrationForBP(bpid)
+ ":Exception writing " + block + " to mirror " + mirrorAddr, ioe);
if (Thread.interrupted()) { // shut down if the thread is interrupted
throw ioe;
} else { // encounter an error while writing to mirror
// continue to run even if can not write to mirror
// notify client of the error
// and wait for the client to shut down the pipeline
mirrorError = true;
}
}
/**
* Verify multiple CRC chunks.
*/
private void verifyChunks(ByteBuffer dataBuf, ByteBuffer checksumBuf)
throws IOException {
try {
clientChecksum.verifyChunkedSums(dataBuf, checksumBuf, clientname, 0);
} catch (ChecksumException ce) {
LOG.warn("Checksum error in block " + block + " from " + inAddr, ce);
// No need to report to namenode when client is writing.
if (srcDataNode != null && isDatanode) {
try {
LOG.info("report corrupt " + block + " from datanode " +
srcDataNode + " to namenode");
datanode.reportRemoteBadBlock(srcDataNode, block);
} catch (IOException e) {
LOG.warn("Failed to report bad " + block +
" from datanode " + srcDataNode + " to namenode");
}
}
throw new IOException("Unexpected checksum mismatch while writing "
+ block + " from " + inAddr);
}
}
/**
* Translate CRC chunks from the client's checksum implementation
* to the disk checksum implementation.
*
* This does not verify the original checksums, under the assumption
* that they have already been validated.
*/
private void translateChunks(ByteBuffer dataBuf, ByteBuffer checksumBuf) {
diskChecksum.calculateChunkedSums(dataBuf, checksumBuf);
}
/**
* Check whether checksum needs to be verified.
* Skip verifying checksum iff this is not the last one in the
* pipeline and clientName is non-null. i.e. Checksum is verified
* on all the datanodes when the data is being written by a
* datanode rather than a client. Whe client is writing the data,
* protocol includes acks and only the last datanode needs to verify
* checksum.
* @return true if checksum verification is needed, otherwise false.
*/
private boolean shouldVerifyChecksum() {
return (mirrorOut == null || isDatanode || needsChecksumTranslation);
}
/**
* Receives and processes a packet. It can contain many chunks.
* returns the number of data bytes that the packet has.
*/
private int receivePacket() throws IOException {
// read the next packet
packetReceiver.receiveNextPacket(in);
PacketHeader header = packetReceiver.getHeader();
if (LOG.isDebugEnabled()){
LOG.debug("Receiving one packet for block " + block +
": " + header);
}
// Sanity check the header
if (header.getOffsetInBlock() > replicaInfo.getNumBytes()) {
throw new IOException("Received an out-of-sequence packet for " + block +
"from " + inAddr + " at offset " + header.getOffsetInBlock() +
". Expecting packet starting at " + replicaInfo.getNumBytes());
}
if (header.getDataLen() < 0) {
throw new IOException("Got wrong length during writeBlock(" + block +
") from " + inAddr + " at offset " +
header.getOffsetInBlock() + ": " +
header.getDataLen());
}
long offsetInBlock = header.getOffsetInBlock();
long seqno = header.getSeqno();
boolean lastPacketInBlock = header.isLastPacketInBlock();
final int len = header.getDataLen();
boolean syncBlock = header.getSyncBlock();
// avoid double sync'ing on close
if (syncBlock && lastPacketInBlock) {
this.syncOnClose = false;
}
// update received bytes
final long firstByteInBlock = offsetInBlock;
offsetInBlock += len;
if (replicaInfo.getNumBytes() < offsetInBlock) {
replicaInfo.setNumBytes(offsetInBlock);
}
// put in queue for pending acks, unless sync was requested
if (responder != null && !syncBlock && !shouldVerifyChecksum()) {
((PacketResponder) responder.getRunnable()).enqueue(seqno,
lastPacketInBlock, offsetInBlock, Status.SUCCESS);
}
//First write the packet to the mirror:
if (mirrorOut != null && !mirrorError) {
try {
long begin = Time.monotonicNow();
packetReceiver.mirrorPacketTo(mirrorOut);
mirrorOut.flush();
long duration = Time.monotonicNow() - begin;
if (duration > datanodeSlowLogThresholdMs) {
LOG.warn("Slow BlockReceiver write packet to mirror took " + duration
+ "ms (threshold=" + datanodeSlowLogThresholdMs + "ms)");
}
} catch (IOException e) {
handleMirrorOutError(e);
}
}
ByteBuffer dataBuf = packetReceiver.getDataSlice();
ByteBuffer checksumBuf = packetReceiver.getChecksumSlice();
if (lastPacketInBlock || len == 0) {
if(LOG.isDebugEnabled()) {
LOG.debug("Receiving an empty packet or the end of the block " + block);
}
// sync block if requested
if (syncBlock) {
flushOrSync(true);
}
} else {
final int checksumLen = diskChecksum.getChecksumSize(len);
final int checksumReceivedLen = checksumBuf.capacity();
if (checksumReceivedLen > 0 && checksumReceivedLen != checksumLen) {
throw new IOException("Invalid checksum length: received length is "
+ checksumReceivedLen + " but expected length is " + checksumLen);
}
if (checksumReceivedLen > 0 && shouldVerifyChecksum()) {
try {
verifyChunks(dataBuf, checksumBuf);
} catch (IOException ioe) {
// checksum error detected locally. there is no reason to continue.
if (responder != null) {
try {
((PacketResponder) responder.getRunnable()).enqueue(seqno,
lastPacketInBlock, offsetInBlock,
Status.ERROR_CHECKSUM);
// Wait until the responder sends back the response
// and interrupt this thread.
Thread.sleep(3000);
} catch (InterruptedException e) { }
}
throw new IOException("Terminating due to a checksum error." + ioe);
}
if (needsChecksumTranslation) {
// overwrite the checksums in the packet buffer with the
// appropriate polynomial for the disk storage.
translateChunks(dataBuf, checksumBuf);
}
}
if (checksumReceivedLen == 0 && !streams.isTransientStorage()) {
// checksum is missing, need to calculate it
checksumBuf = ByteBuffer.allocate(checksumLen);
diskChecksum.calculateChunkedSums(dataBuf, checksumBuf);
}
// by this point, the data in the buffer uses the disk checksum
final boolean shouldNotWriteChecksum = checksumReceivedLen == 0
&& streams.isTransientStorage();
try {
long onDiskLen = replicaInfo.getBytesOnDisk();
if (onDiskLen<offsetInBlock) {
// Normally the beginning of an incoming packet is aligned with the
// existing data on disk. If the beginning packet data offset is not
// checksum chunk aligned, the end of packet will not go beyond the
// next chunk boundary.
// When a failure-recovery is involved, the client state and the
// the datanode state may not exactly agree. I.e. the client may
// resend part of data that is already on disk. Correct number of
// bytes should be skipped when writing the data and checksum
// buffers out to disk.
long partialChunkSizeOnDisk = onDiskLen % bytesPerChecksum;
long lastChunkBoundary = onDiskLen - partialChunkSizeOnDisk;
boolean alignedOnDisk = partialChunkSizeOnDisk == 0;
boolean alignedInPacket = firstByteInBlock % bytesPerChecksum == 0;
// If the end of the on-disk data is not chunk-aligned, the last
// checksum needs to be overwritten.
boolean overwriteLastCrc = !alignedOnDisk && !shouldNotWriteChecksum;
// If the starting offset of the packat data is at the last chunk
// boundary of the data on disk, the partial checksum recalculation
// can be skipped and the checksum supplied by the client can be used
// instead. This reduces disk reads and cpu load.
boolean doCrcRecalc = overwriteLastCrc &&
(lastChunkBoundary != firstByteInBlock);
// If this is a partial chunk, then verify that this is the only
// chunk in the packet. If the starting offset is not chunk
// aligned, the packet should terminate at or before the next
// chunk boundary.
if (!alignedInPacket && len > bytesPerChecksum) {
throw new IOException("Unexpected packet data length for "
+ block + " from " + inAddr + ": a partial chunk must be "
+ " sent in an individual packet (data length = " + len
+ " > bytesPerChecksum = " + bytesPerChecksum + ")");
}
// If the last portion of the block file is not a full chunk,
// then read in pre-existing partial data chunk and recalculate
// the checksum so that the checksum calculation can continue
// from the right state. If the client provided the checksum for
// the whole chunk, this is not necessary.
Checksum partialCrc = null;
if (doCrcRecalc) {
if (LOG.isDebugEnabled()) {
LOG.debug("receivePacket for " + block
+ ": previous write did not end at the chunk boundary."
+ " onDiskLen=" + onDiskLen);
}
long offsetInChecksum = BlockMetadataHeader.getHeaderSize() +
onDiskLen / bytesPerChecksum * checksumSize;
partialCrc = computePartialChunkCrc(onDiskLen, offsetInChecksum);
}
// The data buffer position where write will begin. If the packet
// data and on-disk data have no overlap, this will not be at the
// beginning of the buffer.
int startByteToDisk = (int)(onDiskLen-firstByteInBlock)
+ dataBuf.arrayOffset() + dataBuf.position();
// Actual number of data bytes to write.
int numBytesToDisk = (int)(offsetInBlock-onDiskLen);
// Write data to disk.
long begin = Time.monotonicNow();
out.write(dataBuf.array(), startByteToDisk, numBytesToDisk);
long duration = Time.monotonicNow() - begin;
if (duration > datanodeSlowLogThresholdMs) {
LOG.warn("Slow BlockReceiver write data to disk cost:" + duration
+ "ms (threshold=" + datanodeSlowLogThresholdMs + "ms)");
}
final byte[] lastCrc;
if (shouldNotWriteChecksum) {
lastCrc = null;
} else {
int skip = 0;
byte[] crcBytes = null;
// First, prepare to overwrite the partial crc at the end.
if (overwriteLastCrc) { // not chunk-aligned on disk
// prepare to overwrite last checksum
adjustCrcFilePosition();
}
// The CRC was recalculated for the last partial chunk. Update the
// CRC by reading the rest of the chunk, then write it out.
if (doCrcRecalc) {
// Calculate new crc for this chunk.
int bytesToReadForRecalc =
(int)(bytesPerChecksum - partialChunkSizeOnDisk);
if (numBytesToDisk < bytesToReadForRecalc) {
bytesToReadForRecalc = numBytesToDisk;
}
partialCrc.update(dataBuf.array(), startByteToDisk,
bytesToReadForRecalc);
byte[] buf = FSOutputSummer.convertToByteStream(partialCrc,
checksumSize);
crcBytes = copyLastChunkChecksum(buf, checksumSize, buf.length);
checksumOut.write(buf);
if(LOG.isDebugEnabled()) {
LOG.debug("Writing out partial crc for data len " + len +
", skip=" + skip);
}
skip++; // For the partial chunk that was just read.
}
// Determine how many checksums need to be skipped up to the last
// boundary. The checksum after the boundary was already counted
// above. Only count the number of checksums skipped up to the
// boundary here.
long skippedDataBytes = lastChunkBoundary - firstByteInBlock;
if (skippedDataBytes > 0) {
skip += (int)(skippedDataBytes / bytesPerChecksum) +
((skippedDataBytes % bytesPerChecksum == 0) ? 0 : 1);
}
skip *= checksumSize; // Convert to number of bytes
// write the rest of checksum
final int offset = checksumBuf.arrayOffset() +
checksumBuf.position() + skip;
final int end = offset + checksumLen - skip;
// If offset > end, there is no more checksum to write.
// I.e. a partial chunk checksum rewrite happened and there is no
// more to write after that.
if (offset > end) {
assert crcBytes != null;
lastCrc = crcBytes;
} else {
final int remainingBytes = checksumLen - skip;
lastCrc = copyLastChunkChecksum(checksumBuf.array(),
checksumSize, end);
checksumOut.write(checksumBuf.array(), offset, remainingBytes);
}
}
/// flush entire packet, sync if requested
flushOrSync(syncBlock);
replicaInfo.setLastChecksumAndDataLen(offsetInBlock, lastCrc);
datanode.metrics.incrBytesWritten(len);
datanode.metrics.incrTotalWriteTime(duration);
manageWriterOsCache(offsetInBlock);
}
} catch (IOException iex) {
datanode.checkDiskErrorAsync();
throw iex;
}
}
// if sync was requested, put in queue for pending acks here
// (after the fsync finished)
if (responder != null && (syncBlock || shouldVerifyChecksum())) {
((PacketResponder) responder.getRunnable()).enqueue(seqno,
lastPacketInBlock, offsetInBlock, Status.SUCCESS);
}
/*
* Send in-progress responses for the replaceBlock() calls back to caller to
* avoid timeouts due to balancer throttling. HDFS-6247
*/
if (isReplaceBlock
&& (Time.monotonicNow() - lastResponseTime > responseInterval)) {
BlockOpResponseProto.Builder response = BlockOpResponseProto.newBuilder()
.setStatus(Status.IN_PROGRESS);
response.build().writeDelimitedTo(replyOut);
replyOut.flush();
lastResponseTime = Time.monotonicNow();
}
if (throttler != null) { // throttle I/O
throttler.throttle(len);
}
return lastPacketInBlock?-1:len;
}
private static byte[] copyLastChunkChecksum(byte[] array, int size, int end) {
return Arrays.copyOfRange(array, end - size, end);
}
private void manageWriterOsCache(long offsetInBlock) {
try {
if (outFd != null &&
offsetInBlock > lastCacheManagementOffset + CACHE_DROP_LAG_BYTES) {
long begin = Time.monotonicNow();
//
// For SYNC_FILE_RANGE_WRITE, we want to sync from
// lastCacheManagementOffset to a position "two windows ago"
//
// <========= sync ===========>
// +-----------------------O--------------------------X
// start last curPos
// of file
//
if (syncBehindWrites) {
if (syncBehindWritesInBackground) {
this.datanode.getFSDataset().submitBackgroundSyncFileRangeRequest(
block, outFd, lastCacheManagementOffset,
offsetInBlock - lastCacheManagementOffset,
SYNC_FILE_RANGE_WRITE);
} else {
NativeIO.POSIX.syncFileRangeIfPossible(outFd,
lastCacheManagementOffset,
offsetInBlock - lastCacheManagementOffset,
SYNC_FILE_RANGE_WRITE);
}
}
//
// For POSIX_FADV_DONTNEED, we want to drop from the beginning
// of the file to a position prior to the current position.
//
// <=== drop =====>
// <---W--->
// +--------------+--------O--------------------------X
// start dropPos last curPos
// of file
//
long dropPos = lastCacheManagementOffset - CACHE_DROP_LAG_BYTES;
if (dropPos > 0 && dropCacheBehindWrites) {
NativeIO.POSIX.getCacheManipulator().posixFadviseIfPossible(
block.getBlockName(), outFd, 0, dropPos, POSIX_FADV_DONTNEED);
}
lastCacheManagementOffset = offsetInBlock;
long duration = Time.monotonicNow() - begin;
if (duration > datanodeSlowLogThresholdMs) {
LOG.warn("Slow manageWriterOsCache took " + duration
+ "ms (threshold=" + datanodeSlowLogThresholdMs + "ms)");
}
}
} catch (Throwable t) {
LOG.warn("Error managing cache for writer of block " + block, t);
}
}
public void sendOOB() throws IOException, InterruptedException {
((PacketResponder) responder.getRunnable()).sendOOBResponse(PipelineAck
.getRestartOOBStatus());
}
void receiveBlock(
DataOutputStream mirrOut, // output to next datanode
DataInputStream mirrIn, // input from next datanode
DataOutputStream replyOut, // output to previous datanode
String mirrAddr, DataTransferThrottler throttlerArg,
DatanodeInfo[] downstreams,
boolean isReplaceBlock) throws IOException {
syncOnClose = datanode.getDnConf().syncOnClose;
boolean responderClosed = false;
mirrorOut = mirrOut;
mirrorAddr = mirrAddr;
throttler = throttlerArg;
this.replyOut = replyOut;
this.isReplaceBlock = isReplaceBlock;
try {
if (isClient && !isTransfer) {
responder = new Daemon(datanode.threadGroup,
new PacketResponder(replyOut, mirrIn, downstreams));
responder.start(); // start thread to processes responses
}
while (receivePacket() >= 0) { /* Receive until the last packet */ }
// wait for all outstanding packet responses. And then
// indicate responder to gracefully shutdown.
// Mark that responder has been closed for future processing
if (responder != null) {
((PacketResponder)responder.getRunnable()).close();
responderClosed = true;
}
// If this write is for a replication or transfer-RBW/Finalized,
// then finalize block or convert temporary to RBW.
// For client-writes, the block is finalized in the PacketResponder.
if (isDatanode || isTransfer) {
// Hold a volume reference to finalize block.
try (ReplicaHandler handler = claimReplicaHandler()) {
// close the block/crc files
close();
block.setNumBytes(replicaInfo.getNumBytes());
if (stage == BlockConstructionStage.TRANSFER_RBW) {
// for TRANSFER_RBW, convert temporary to RBW
datanode.data.convertTemporaryToRbw(block);
} else {
// for isDatnode or TRANSFER_FINALIZED
// Finalize the block.
datanode.data.finalizeBlock(block);
}
}
datanode.metrics.incrBlocksWritten();
}
} catch (IOException ioe) {
replicaInfo.releaseAllBytesReserved();
if (datanode.isRestarting()) {
// Do not throw if shutting down for restart. Otherwise, it will cause
// premature termination of responder.
LOG.info("Shutting down for restart (" + block + ").");
} else {
LOG.info("Exception for " + block, ioe);
throw ioe;
}
} finally {
// Clear the previous interrupt state of this thread.
Thread.interrupted();
// If a shutdown for restart was initiated, upstream needs to be notified.
// There is no need to do anything special if the responder was closed
// normally.
if (!responderClosed) { // Data transfer was not complete.
if (responder != null) {
// In case this datanode is shutting down for quick restart,
// send a special ack upstream.
if (datanode.isRestarting() && isClient && !isTransfer) {
try (Writer out = new OutputStreamWriter(
replicaInfo.createRestartMetaStream(), "UTF-8")) {
// write out the current time.
out.write(Long.toString(Time.now() + restartBudget));
out.flush();
} catch (IOException ioe) {
// The worst case is not recovering this RBW replica.
// Client will fall back to regular pipeline recovery.
} finally {
IOUtils.cleanup(LOG, out);
}
try {
// Even if the connection is closed after the ack packet is
// flushed, the client can react to the connection closure
// first. Insert a delay to lower the chance of client
// missing the OOB ack.
Thread.sleep(1000);
} catch (InterruptedException ie) {
// It is already going down. Ignore this.
}
}
responder.interrupt();
}
IOUtils.closeStream(this);
cleanupBlock();
}
if (responder != null) {
try {
responder.interrupt();
// join() on the responder should timeout a bit earlier than the
// configured deadline. Otherwise, the join() on this thread will
// likely timeout as well.
long joinTimeout = datanode.getDnConf().getXceiverStopTimeout();
joinTimeout = joinTimeout > 1 ? joinTimeout*8/10 : joinTimeout;
responder.join(joinTimeout);
if (responder.isAlive()) {
String msg = "Join on responder thread " + responder
+ " timed out";
LOG.warn(msg + "\n" + StringUtils.getStackTrace(responder));
throw new IOException(msg);
}
} catch (InterruptedException e) {
responder.interrupt();
// do not throw if shutting down for restart.
if (!datanode.isRestarting()) {
throw new IOException("Interrupted receiveBlock");
}
}
responder = null;
}
}
}
/** Cleanup a partial block
* if this write is for a replication request (and not from a client)
*/
private void cleanupBlock() throws IOException {
if (isDatanode) {
datanode.data.unfinalizeBlock(block);
}
}
/**
* Adjust the file pointer in the local meta file so that the last checksum
* will be overwritten.
*/
private void adjustCrcFilePosition() throws IOException {
if (out != null) {
out.flush();
}
if (checksumOut != null) {
checksumOut.flush();
}
// rollback the position of the meta file
datanode.data.adjustCrcChannelPosition(block, streams, checksumSize);
}
/**
* Convert a checksum byte array to a long
*/
static private long checksum2long(byte[] checksum) {
long crc = 0L;
for(int i=0; i<checksum.length; i++) {
crc |= (0xffL&checksum[i])<<((checksum.length-i-1)*8);
}
return crc;
}
/**
* reads in the partial crc chunk and computes checksum
* of pre-existing data in partial chunk.
*/
private Checksum computePartialChunkCrc(long blkoff, long ckoff)
throws IOException {
// find offset of the beginning of partial chunk.
//
int sizePartialChunk = (int) (blkoff % bytesPerChecksum);
blkoff = blkoff - sizePartialChunk;
if (LOG.isDebugEnabled()) {
LOG.debug("computePartialChunkCrc for " + block
+ ": sizePartialChunk=" + sizePartialChunk
+ ", block offset=" + blkoff
+ ", metafile offset=" + ckoff);
}
// create an input stream from the block file
// and read in partial crc chunk into temporary buffer
//
byte[] buf = new byte[sizePartialChunk];
byte[] crcbuf = new byte[checksumSize];
try (ReplicaInputStreams instr =
datanode.data.getTmpInputStreams(block, blkoff, ckoff)) {
IOUtils.readFully(instr.getDataIn(), buf, 0, sizePartialChunk);
// open meta file and read in crc value computer earlier
IOUtils.readFully(instr.getChecksumIn(), crcbuf, 0, crcbuf.length);
}
// compute crc of partial chunk from data read in the block file.
final Checksum partialCrc = DataChecksum.newDataChecksum(
diskChecksum.getChecksumType(), diskChecksum.getBytesPerChecksum());
partialCrc.update(buf, 0, sizePartialChunk);
if (LOG.isDebugEnabled()) {
LOG.debug("Read in partial CRC chunk from disk for " + block);
}
// paranoia! verify that the pre-computed crc matches what we
// recalculated just now
if (partialCrc.getValue() != checksum2long(crcbuf)) {
String msg = "Partial CRC " + partialCrc.getValue() +
" does not match value computed the " +
" last time file was closed " +
checksum2long(crcbuf);
throw new IOException(msg);
}
return partialCrc;
}
/** The caller claims the ownership of the replica handler. */
private ReplicaHandler claimReplicaHandler() {
ReplicaHandler handler = replicaHandler;
replicaHandler = null;
return handler;
}
private static enum PacketResponderType {
NON_PIPELINE, LAST_IN_PIPELINE, HAS_DOWNSTREAM_IN_PIPELINE
}
/**
* Processes responses from downstream datanodes in the pipeline
* and sends back replies to the originator.
*/
class PacketResponder implements Runnable, Closeable {
/** queue for packets waiting for ack - synchronization using monitor lock */
private final LinkedList<Packet> ackQueue = new LinkedList<Packet>();
/** the thread that spawns this responder */
private final Thread receiverThread = Thread.currentThread();
/** is this responder running? - synchronization using monitor lock */
private volatile boolean running = true;
/** input from the next downstream datanode */
private final DataInputStream downstreamIn;
/** output to upstream datanode/client */
private final DataOutputStream upstreamOut;
/** The type of this responder */
private final PacketResponderType type;
/** for log and error messages */
private final String myString;
private boolean sending = false;
@Override
public String toString() {
return myString;
}
PacketResponder(final DataOutputStream upstreamOut,
final DataInputStream downstreamIn, final DatanodeInfo[] downstreams) {
this.downstreamIn = downstreamIn;
this.upstreamOut = upstreamOut;
this.type = downstreams == null? PacketResponderType.NON_PIPELINE
: downstreams.length == 0? PacketResponderType.LAST_IN_PIPELINE
: PacketResponderType.HAS_DOWNSTREAM_IN_PIPELINE;
final StringBuilder b = new StringBuilder(getClass().getSimpleName())
.append(": ").append(block).append(", type=").append(type);
if (type != PacketResponderType.HAS_DOWNSTREAM_IN_PIPELINE) {
b.append(", downstreams=").append(downstreams.length)
.append(":").append(Arrays.asList(downstreams));
}
this.myString = b.toString();
}
private boolean isRunning() {
// When preparing for a restart, it should continue to run until
// interrupted by the receiver thread.
return running && (datanode.shouldRun || datanode.isRestarting());
}
/**
* enqueue the seqno that is still be to acked by the downstream datanode.
* @param seqno sequence number of the packet
* @param lastPacketInBlock if true, this is the last packet in block
* @param offsetInBlock offset of this packet in block
*/
void enqueue(final long seqno, final boolean lastPacketInBlock,
final long offsetInBlock, final Status ackStatus) {
final Packet p = new Packet(seqno, lastPacketInBlock, offsetInBlock,
System.nanoTime(), ackStatus);
if(LOG.isDebugEnabled()) {
LOG.debug(myString + ": enqueue " + p);
}
synchronized(ackQueue) {
if (running) {
ackQueue.addLast(p);
ackQueue.notifyAll();
}
}
}
/**
* Send an OOB response. If all acks have been sent already for the block
* and the responder is about to close, the delivery is not guaranteed.
* This is because the other end can close the connection independently.
* An OOB coming from downstream will be automatically relayed upstream
* by the responder. This method is used only by originating datanode.
*
* @param ackStatus the type of ack to be sent
*/
void sendOOBResponse(final Status ackStatus) throws IOException,
InterruptedException {
if (!running) {
LOG.info("Cannot send OOB response " + ackStatus +
". Responder not running.");
return;
}
synchronized(this) {
if (sending) {
wait(PipelineAck.getOOBTimeout(ackStatus));
// Didn't get my turn in time. Give up.
if (sending) {
throw new IOException("Could not send OOB reponse in time: "
+ ackStatus);
}
}
sending = true;
}
LOG.info("Sending an out of band ack of type " + ackStatus);
try {
sendAckUpstreamUnprotected(null, PipelineAck.UNKOWN_SEQNO, 0L, 0L,
PipelineAck.combineHeader(datanode.getECN(), ackStatus));
} finally {
// Let others send ack. Unless there are miltiple OOB send
// calls, there can be only one waiter, the responder thread.
// In any case, only one needs to be notified.
synchronized(this) {
sending = false;
notify();
}
}
}
/** Wait for a packet with given {@code seqno} to be enqueued to ackQueue */
Packet waitForAckHead(long seqno) throws InterruptedException {
synchronized(ackQueue) {
while (isRunning() && ackQueue.size() == 0) {
if (LOG.isDebugEnabled()) {
LOG.debug(myString + ": seqno=" + seqno +
" waiting for local datanode to finish write.");
}
ackQueue.wait();
}
return isRunning() ? ackQueue.getFirst() : null;
}
}
/**
* wait for all pending packets to be acked. Then shutdown thread.
*/
@Override
public void close() {
synchronized(ackQueue) {
while (isRunning() && ackQueue.size() != 0) {
try {
ackQueue.wait();
} catch (InterruptedException e) {
running = false;
Thread.currentThread().interrupt();
}
}
if(LOG.isDebugEnabled()) {
LOG.debug(myString + ": closing");
}
running = false;
ackQueue.notifyAll();
}
synchronized(this) {
running = false;
notifyAll();
}
}
/**
* Thread to process incoming acks.
* @see java.lang.Runnable#run()
*/
@Override
public void run() {
boolean lastPacketInBlock = false;
final long startTime = ClientTraceLog.isInfoEnabled() ? System.nanoTime() : 0;
while (isRunning() && !lastPacketInBlock) {
long totalAckTimeNanos = 0;
boolean isInterrupted = false;
try {
Packet pkt = null;
long expected = -2;
PipelineAck ack = new PipelineAck();
long seqno = PipelineAck.UNKOWN_SEQNO;
long ackRecvNanoTime = 0;
try {
if (type != PacketResponderType.LAST_IN_PIPELINE && !mirrorError) {
// read an ack from downstream datanode
ack.readFields(downstreamIn);
ackRecvNanoTime = System.nanoTime();
if (LOG.isDebugEnabled()) {
LOG.debug(myString + " got " + ack);
}
// Process an OOB ACK.
Status oobStatus = ack.getOOBStatus();
if (oobStatus != null) {
LOG.info("Relaying an out of band ack of type " + oobStatus);
sendAckUpstream(ack, PipelineAck.UNKOWN_SEQNO, 0L, 0L,
PipelineAck.combineHeader(datanode.getECN(),
Status.SUCCESS));
continue;
}
seqno = ack.getSeqno();
}
if (seqno != PipelineAck.UNKOWN_SEQNO
|| type == PacketResponderType.LAST_IN_PIPELINE) {
pkt = waitForAckHead(seqno);
if (!isRunning()) {
break;
}
expected = pkt.seqno;
if (type == PacketResponderType.HAS_DOWNSTREAM_IN_PIPELINE
&& seqno != expected) {
throw new IOException(myString + "seqno: expected=" + expected
+ ", received=" + seqno);
}
if (type == PacketResponderType.HAS_DOWNSTREAM_IN_PIPELINE) {
// The total ack time includes the ack times of downstream
// nodes.
// The value is 0 if this responder doesn't have a downstream
// DN in the pipeline.
totalAckTimeNanos = ackRecvNanoTime - pkt.ackEnqueueNanoTime;
// Report the elapsed time from ack send to ack receive minus
// the downstream ack time.
long ackTimeNanos = totalAckTimeNanos
- ack.getDownstreamAckTimeNanos();
if (ackTimeNanos < 0) {
if (LOG.isDebugEnabled()) {
LOG.debug("Calculated invalid ack time: " + ackTimeNanos
+ "ns.");
}
} else {
datanode.metrics.addPacketAckRoundTripTimeNanos(ackTimeNanos);
}
}
lastPacketInBlock = pkt.lastPacketInBlock;
}
} catch (InterruptedException ine) {
isInterrupted = true;
} catch (IOException ioe) {
if (Thread.interrupted()) {
isInterrupted = true;
} else {
// continue to run even if can not read from mirror
// notify client of the error
// and wait for the client to shut down the pipeline
mirrorError = true;
LOG.info(myString, ioe);
}
}
if (Thread.interrupted() || isInterrupted) {
/*
* The receiver thread cancelled this thread. We could also check
* any other status updates from the receiver thread (e.g. if it is
* ok to write to replyOut). It is prudent to not send any more
* status back to the client because this datanode has a problem.
* The upstream datanode will detect that this datanode is bad, and
* rightly so.
*
* The receiver thread can also interrupt this thread for sending
* an out-of-band response upstream.
*/
LOG.info(myString + ": Thread is interrupted.");
running = false;
continue;
}
if (lastPacketInBlock) {
// Finalize the block and close the block file
finalizeBlock(startTime);
}
Status myStatus = pkt != null ? pkt.ackStatus : Status.SUCCESS;
sendAckUpstream(ack, expected, totalAckTimeNanos,
(pkt != null ? pkt.offsetInBlock : 0),
PipelineAck.combineHeader(datanode.getECN(), myStatus));
if (pkt != null) {
// remove the packet from the ack queue
removeAckHead();
}
} catch (IOException e) {
LOG.warn("IOException in BlockReceiver.run(): ", e);
if (running) {
datanode.checkDiskErrorAsync();
LOG.info(myString, e);
running = false;
if (!Thread.interrupted()) { // failure not caused by interruption
receiverThread.interrupt();
}
}
} catch (Throwable e) {
if (running) {
LOG.info(myString, e);
running = false;
receiverThread.interrupt();
}
}
}
LOG.info(myString + " terminating");
}
/**
* Finalize the block and close the block file
* @param startTime time when BlockReceiver started receiving the block
*/
private void finalizeBlock(long startTime) throws IOException {
long endTime = 0;
// Hold a volume reference to finalize block.
try (ReplicaHandler handler = BlockReceiver.this.claimReplicaHandler()) {
BlockReceiver.this.close();
endTime = ClientTraceLog.isInfoEnabled() ? System.nanoTime() : 0;
block.setNumBytes(replicaInfo.getNumBytes());
datanode.data.finalizeBlock(block);
}
if (pinning) {
datanode.data.setPinning(block);
}
datanode.closeBlock(
block, DataNode.EMPTY_DEL_HINT, replicaInfo.getStorageUuid());
if (ClientTraceLog.isInfoEnabled() && isClient) {
long offset = 0;
DatanodeRegistration dnR = datanode.getDNRegistrationForBP(block
.getBlockPoolId());
ClientTraceLog.info(String.format(DN_CLIENTTRACE_FORMAT, inAddr,
myAddr, block.getNumBytes(), "HDFS_WRITE", clientname, offset,
dnR.getDatanodeUuid(), block, endTime - startTime));
} else {
LOG.info("Received " + block + " size " + block.getNumBytes()
+ " from " + inAddr);
}
}
/**
* The wrapper for the unprotected version. This is only called by
* the responder's run() method.
*
* @param ack Ack received from downstream
* @param seqno sequence number of ack to be sent upstream
* @param totalAckTimeNanos total ack time including all the downstream
* nodes
* @param offsetInBlock offset in block for the data in packet
* @param myHeader the local ack header
*/
private void sendAckUpstream(PipelineAck ack, long seqno,
long totalAckTimeNanos, long offsetInBlock,
int myHeader) throws IOException {
try {
// Wait for other sender to finish. Unless there is an OOB being sent,
// the responder won't have to wait.
synchronized(this) {
while(sending) {
wait();
}
sending = true;
}
try {
if (!running) return;
sendAckUpstreamUnprotected(ack, seqno, totalAckTimeNanos,
offsetInBlock, myHeader);
} finally {
synchronized(this) {
sending = false;
notify();
}
}
} catch (InterruptedException ie) {
// The responder was interrupted. Make it go down without
// interrupting the receiver(writer) thread.
running = false;
}
}
/**
* @param ack Ack received from downstream
* @param seqno sequence number of ack to be sent upstream
* @param totalAckTimeNanos total ack time including all the downstream
* nodes
* @param offsetInBlock offset in block for the data in packet
* @param myHeader the local ack header
*/
private void sendAckUpstreamUnprotected(PipelineAck ack, long seqno,
long totalAckTimeNanos, long offsetInBlock, int myHeader)
throws IOException {
final int[] replies;
if (ack == null) {
// A new OOB response is being sent from this node. Regardless of
// downstream nodes, reply should contain one reply.
replies = new int[] { myHeader };
} else if (mirrorError) { // ack read error
int h = PipelineAck.combineHeader(datanode.getECN(), Status.SUCCESS);
int h1 = PipelineAck.combineHeader(datanode.getECN(), Status.ERROR);
replies = new int[] {h, h1};
} else {
short ackLen = type == PacketResponderType.LAST_IN_PIPELINE ? 0 : ack
.getNumOfReplies();
replies = new int[ackLen + 1];
replies[0] = myHeader;
for (int i = 0; i < ackLen; ++i) {
replies[i + 1] = ack.getHeaderFlag(i);
}
// If the mirror has reported that it received a corrupt packet,
// do self-destruct to mark myself bad, instead of making the
// mirror node bad. The mirror is guaranteed to be good without
// corrupt data on disk.
if (ackLen > 0 && PipelineAck.getStatusFromHeader(replies[1]) ==
Status.ERROR_CHECKSUM) {
throw new IOException("Shutting down writer and responder "
+ "since the down streams reported the data sent by this "
+ "thread is corrupt");
}
}
PipelineAck replyAck = new PipelineAck(seqno, replies,
totalAckTimeNanos);
if (replyAck.isSuccess()
&& offsetInBlock > replicaInfo.getBytesAcked()) {
replicaInfo.setBytesAcked(offsetInBlock);
}
// send my ack back to upstream datanode
long begin = Time.monotonicNow();
replyAck.write(upstreamOut);
upstreamOut.flush();
long duration = Time.monotonicNow() - begin;
if (duration > datanodeSlowLogThresholdMs) {
LOG.warn("Slow PacketResponder send ack to upstream took " + duration
+ "ms (threshold=" + datanodeSlowLogThresholdMs + "ms), " + myString
+ ", replyAck=" + replyAck);
} else if (LOG.isDebugEnabled()) {
LOG.debug(myString + ", replyAck=" + replyAck);
}
// If a corruption was detected in the received data, terminate after
// sending ERROR_CHECKSUM back.
Status myStatus = PipelineAck.getStatusFromHeader(myHeader);
if (myStatus == Status.ERROR_CHECKSUM) {
throw new IOException("Shutting down writer and responder "
+ "due to a checksum error in received data. The error "
+ "response has been sent upstream.");
}
}
/**
* Remove a packet from the head of the ack queue
*
* This should be called only when the ack queue is not empty
*/
private void removeAckHead() {
synchronized(ackQueue) {
ackQueue.removeFirst();
ackQueue.notifyAll();
}
}
}
/**
* This information is cached by the Datanode in the ackQueue.
*/
private static class Packet {
final long seqno;
final boolean lastPacketInBlock;
final long offsetInBlock;
final long ackEnqueueNanoTime;
final Status ackStatus;
Packet(long seqno, boolean lastPacketInBlock, long offsetInBlock,
long ackEnqueueNanoTime, Status ackStatus) {
this.seqno = seqno;
this.lastPacketInBlock = lastPacketInBlock;
this.offsetInBlock = offsetInBlock;
this.ackEnqueueNanoTime = ackEnqueueNanoTime;
this.ackStatus = ackStatus;
}
@Override
public String toString() {
return getClass().getSimpleName() + "(seqno=" + seqno
+ ", lastPacketInBlock=" + lastPacketInBlock
+ ", offsetInBlock=" + offsetInBlock
+ ", ackEnqueueNanoTime=" + ackEnqueueNanoTime
+ ", ackStatus=" + ackStatus
+ ")";
}
}
}
| 59,282 | 37.470474 | 94 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ErrorReportAction.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import java.io.IOException;
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.ipc.RemoteException;
/**
* A ErrorReportAction is an instruction issued by BPOfferService to
* BPServiceActor about a particular block encapsulated in errorMessage.
*/
public class ErrorReportAction implements BPServiceActorAction {
final int errorCode;
final String errorMessage;
public ErrorReportAction(int errorCode, String errorMessage) {
this.errorCode = errorCode;
this.errorMessage = errorMessage;
}
@Override
public void reportTo(DatanodeProtocolClientSideTranslatorPB bpNamenode,
DatanodeRegistration bpRegistration) throws BPServiceActorActionException {
try {
bpNamenode.errorReport(bpRegistration, errorCode, errorMessage);
} catch (RemoteException re) {
DataNode.LOG.info("trySendErrorReport encountered RemoteException "
+ "errorMessage: " + errorMessage + " errorCode: " + errorCode, re);
} catch(IOException e) {
throw new BPServiceActorActionException("Error reporting "
+ "an error to namenode: ");
}
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + errorCode;
result = prime * result
+ ((errorMessage == null) ? 0 : errorMessage.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || !(obj instanceof ErrorReportAction)) {
return false;
}
ErrorReportAction other = (ErrorReportAction) obj;
if (errorCode != other.errorCode) {
return false;
}
if (errorMessage == null) {
if (other.errorMessage != null) {
return false;
}
} else if (!errorMessage.equals(other.errorMessage)) {
return false;
}
return true;
}
}
| 2,839 | 31.272727 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import org.apache.commons.logging.Log;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.RollingUpgradeStatus;
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdfs.server.protocol.*;
import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStatus;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.List;
import java.util.Set;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
/**
* One instance per block-pool/namespace on the DN, which handles the
* heartbeats to the active and standby NNs for that namespace.
* This class manages an instance of {@link BPServiceActor} for each NN,
* and delegates calls to both NNs.
* It also maintains the state about which of the NNs is considered active.
*/
@InterfaceAudience.Private
class BPOfferService {
static final Log LOG = DataNode.LOG;
/**
* Information about the namespace that this service
* is registering with. This is assigned after
* the first phase of the handshake.
*/
NamespaceInfo bpNSInfo;
/**
* The registration information for this block pool.
* This is assigned after the second phase of the
* handshake.
*/
volatile DatanodeRegistration bpRegistration;
private final DataNode dn;
/**
* A reference to the BPServiceActor associated with the currently
* ACTIVE NN. In the case that all NameNodes are in STANDBY mode,
* this can be null. If non-null, this must always refer to a member
* of the {@link #bpServices} list.
*/
private BPServiceActor bpServiceToActive = null;
/**
* The list of all actors for namenodes in this nameservice, regardless
* of their active or standby states.
*/
private final List<BPServiceActor> bpServices =
new CopyOnWriteArrayList<BPServiceActor>();
/**
* Each time we receive a heartbeat from a NN claiming to be ACTIVE,
* we record that NN's most recent transaction ID here, so long as it
* is more recent than the previous value. This allows us to detect
* split-brain scenarios in which a prior NN is still asserting its
* ACTIVE state but with a too-low transaction ID. See HDFS-2627
* for details.
*/
private long lastActiveClaimTxId = -1;
private final ReentrantReadWriteLock mReadWriteLock =
new ReentrantReadWriteLock();
private final Lock mReadLock = mReadWriteLock.readLock();
private final Lock mWriteLock = mReadWriteLock.writeLock();
// utility methods to acquire and release read lock and write lock
void readLock() {
mReadLock.lock();
}
void readUnlock() {
mReadLock.unlock();
}
void writeLock() {
mWriteLock.lock();
}
void writeUnlock() {
mWriteLock.unlock();
}
BPOfferService(List<InetSocketAddress> nnAddrs, DataNode dn) {
Preconditions.checkArgument(!nnAddrs.isEmpty(),
"Must pass at least one NN.");
this.dn = dn;
for (InetSocketAddress addr : nnAddrs) {
this.bpServices.add(new BPServiceActor(addr, this));
}
}
void refreshNNList(ArrayList<InetSocketAddress> addrs) throws IOException {
Set<InetSocketAddress> oldAddrs = Sets.newHashSet();
for (BPServiceActor actor : bpServices) {
oldAddrs.add(actor.getNNSocketAddress());
}
Set<InetSocketAddress> newAddrs = Sets.newHashSet(addrs);
if (!Sets.symmetricDifference(oldAddrs, newAddrs).isEmpty()) {
// Keep things simple for now -- we can implement this at a later date.
throw new IOException(
"HA does not currently support adding a new standby to a running DN. " +
"Please do a rolling restart of DNs to reconfigure the list of NNs.");
}
}
/**
* @return true if the service has registered with at least one NameNode.
*/
boolean isInitialized() {
return bpRegistration != null;
}
/**
* @return true if there is at least one actor thread running which is
* talking to a NameNode.
*/
boolean isAlive() {
for (BPServiceActor actor : bpServices) {
if (actor.isAlive()) {
return true;
}
}
return false;
}
String getBlockPoolId() {
readLock();
try {
if (bpNSInfo != null) {
return bpNSInfo.getBlockPoolID();
} else {
LOG.warn("Block pool ID needed, but service not yet registered with NN",
new Exception("trace"));
return null;
}
} finally {
readUnlock();
}
}
boolean hasBlockPoolId() {
return getNamespaceInfo() != null;
}
NamespaceInfo getNamespaceInfo() {
readLock();
try {
return bpNSInfo;
} finally {
readUnlock();
}
}
@Override
public String toString() {
readLock();
try {
if (bpNSInfo == null) {
// If we haven't yet connected to our NN, we don't yet know our
// own block pool ID.
// If _none_ of the block pools have connected yet, we don't even
// know the DatanodeID ID of this DN.
String datanodeUuid = dn.getDatanodeUuid();
if (datanodeUuid == null || datanodeUuid.isEmpty()) {
datanodeUuid = "unassigned";
}
return "Block pool <registering> (Datanode Uuid " + datanodeUuid + ")";
} else {
return "Block pool " + getBlockPoolId() +
" (Datanode Uuid " + dn.getDatanodeUuid() +
")";
}
} finally {
readUnlock();
}
}
void reportBadBlocks(ExtendedBlock block,
String storageUuid, StorageType storageType) {
checkBlock(block);
for (BPServiceActor actor : bpServices) {
ReportBadBlockAction rbbAction = new ReportBadBlockAction
(block, storageUuid, storageType);
actor.bpThreadEnqueue(rbbAction);
}
}
/*
* Informing the name node could take a long long time! Should we wait
* till namenode is informed before responding with success to the
* client? For now we don't.
*/
void notifyNamenodeReceivedBlock(
ExtendedBlock block, String delHint, String storageUuid) {
checkBlock(block);
ReceivedDeletedBlockInfo bInfo = new ReceivedDeletedBlockInfo(
block.getLocalBlock(),
ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK,
delHint);
for (BPServiceActor actor : bpServices) {
actor.notifyNamenodeBlock(bInfo, storageUuid, true);
}
}
private void checkBlock(ExtendedBlock block) {
Preconditions.checkArgument(block != null,
"block is null");
Preconditions.checkArgument(block.getBlockPoolId().equals(getBlockPoolId()),
"block belongs to BP %s instead of BP %s",
block.getBlockPoolId(), getBlockPoolId());
}
void notifyNamenodeDeletedBlock(ExtendedBlock block, String storageUuid) {
checkBlock(block);
ReceivedDeletedBlockInfo bInfo = new ReceivedDeletedBlockInfo(
block.getLocalBlock(), BlockStatus.DELETED_BLOCK, null);
for (BPServiceActor actor : bpServices) {
actor.notifyNamenodeDeletedBlock(bInfo, storageUuid);
}
}
void notifyNamenodeReceivingBlock(ExtendedBlock block, String storageUuid) {
checkBlock(block);
ReceivedDeletedBlockInfo bInfo = new ReceivedDeletedBlockInfo(
block.getLocalBlock(), BlockStatus.RECEIVING_BLOCK, null);
for (BPServiceActor actor : bpServices) {
actor.notifyNamenodeBlock(bInfo, storageUuid, false);
}
}
//This must be called only by blockPoolManager
void start() {
for (BPServiceActor actor : bpServices) {
actor.start();
}
}
//This must be called only by blockPoolManager.
void stop() {
for (BPServiceActor actor : bpServices) {
actor.stop();
}
}
//This must be called only by blockPoolManager
void join() {
for (BPServiceActor actor : bpServices) {
actor.join();
}
}
DataNode getDataNode() {
return dn;
}
/**
* Called by the BPServiceActors when they handshake to a NN.
* If this is the first NN connection, this sets the namespace info
* for this BPOfferService. If it's a connection to a new NN, it
* verifies that this namespace matches (eg to prevent a misconfiguration
* where a StandbyNode from a different cluster is specified)
*/
void verifyAndSetNamespaceInfo(NamespaceInfo nsInfo) throws IOException {
writeLock();
try {
if (this.bpNSInfo == null) {
this.bpNSInfo = nsInfo;
boolean success = false;
// Now that we know the namespace ID, etc, we can pass this to the DN.
// The DN can now initialize its local storage if we are the
// first BP to handshake, etc.
try {
dn.initBlockPool(this);
success = true;
} finally {
if (!success) {
// The datanode failed to initialize the BP. We need to reset
// the namespace info so that other BPService actors still have
// a chance to set it, and re-initialize the datanode.
this.bpNSInfo = null;
}
}
} else {
checkNSEquality(bpNSInfo.getBlockPoolID(), nsInfo.getBlockPoolID(),
"Blockpool ID");
checkNSEquality(bpNSInfo.getNamespaceID(), nsInfo.getNamespaceID(),
"Namespace ID");
checkNSEquality(bpNSInfo.getClusterID(), nsInfo.getClusterID(),
"Cluster ID");
}
} finally {
writeUnlock();
}
}
/**
* After one of the BPServiceActors registers successfully with the
* NN, it calls this function to verify that the NN it connected to
* is consistent with other NNs serving the block-pool.
*/
void registrationSucceeded(BPServiceActor bpServiceActor,
DatanodeRegistration reg) throws IOException {
writeLock();
try {
if (bpRegistration != null) {
checkNSEquality(bpRegistration.getStorageInfo().getNamespaceID(),
reg.getStorageInfo().getNamespaceID(), "namespace ID");
checkNSEquality(bpRegistration.getStorageInfo().getClusterID(),
reg.getStorageInfo().getClusterID(), "cluster ID");
} else {
bpRegistration = reg;
}
dn.bpRegistrationSucceeded(bpRegistration, getBlockPoolId());
// Add the initial block token secret keys to the DN's secret manager.
if (dn.isBlockTokenEnabled) {
dn.blockPoolTokenSecretManager.addKeys(getBlockPoolId(),
reg.getExportedKeys());
}
} finally {
writeUnlock();
}
}
/**
* Verify equality of two namespace-related fields, throwing
* an exception if they are unequal.
*/
private static void checkNSEquality(
Object ourID, Object theirID,
String idHelpText) throws IOException {
if (!ourID.equals(theirID)) {
throw new IOException(idHelpText + " mismatch: " +
"previously connected to " + idHelpText + " " + ourID +
" but now connected to " + idHelpText + " " + theirID);
}
}
DatanodeRegistration createRegistration() {
writeLock();
try {
Preconditions.checkState(bpNSInfo != null,
"getRegistration() can only be called after initial handshake");
return dn.createBPRegistration(bpNSInfo);
} finally {
writeUnlock();
}
}
/**
* Called when an actor shuts down. If this is the last actor
* to shut down, shuts down the whole blockpool in the DN.
*/
void shutdownActor(BPServiceActor actor) {
writeLock();
try {
if (bpServiceToActive == actor) {
bpServiceToActive = null;
}
bpServices.remove(actor);
if (bpServices.isEmpty()) {
dn.shutdownBlockPool(this);
}
} finally {
writeUnlock();
}
}
/**
* Called by the DN to report an error to the NNs.
*/
void trySendErrorReport(int errCode, String errMsg) {
for (BPServiceActor actor : bpServices) {
ErrorReportAction errorReportAction = new ErrorReportAction
(errCode, errMsg);
actor.bpThreadEnqueue(errorReportAction);
}
}
/**
* Ask each of the actors to schedule a block report after
* the specified delay.
*/
void scheduleBlockReport(long delay) {
for (BPServiceActor actor : bpServices) {
actor.getScheduler().scheduleBlockReport(delay);
}
}
/**
* Ask each of the actors to report a bad block hosted on another DN.
*/
void reportRemoteBadBlock(DatanodeInfo dnInfo, ExtendedBlock block) {
for (BPServiceActor actor : bpServices) {
try {
actor.reportRemoteBadBlock(dnInfo, block);
} catch (IOException e) {
LOG.warn("Couldn't report bad block " + block + " to " + actor,
e);
}
}
}
/**
* @return a proxy to the active NN, or null if the BPOS has not
* acknowledged any NN as active yet.
*/
DatanodeProtocolClientSideTranslatorPB getActiveNN() {
readLock();
try {
if (bpServiceToActive != null) {
return bpServiceToActive.bpNamenode;
} else {
return null;
}
} finally {
readUnlock();
}
}
@VisibleForTesting
List<BPServiceActor> getBPServiceActors() {
return Lists.newArrayList(bpServices);
}
/**
* Signal the current rolling upgrade status as indicated by the NN.
* @param rollingUpgradeStatus rolling upgrade status
*/
void signalRollingUpgrade(RollingUpgradeStatus rollingUpgradeStatus)
throws IOException {
if (rollingUpgradeStatus == null) {
return;
}
String bpid = getBlockPoolId();
if (!rollingUpgradeStatus.isFinalized()) {
dn.getFSDataset().enableTrash(bpid);
dn.getFSDataset().setRollingUpgradeMarker(bpid);
} else {
dn.getFSDataset().clearTrash(bpid);
dn.getFSDataset().clearRollingUpgradeMarker(bpid);
}
}
/**
* Update the BPOS's view of which NN is active, based on a heartbeat
* response from one of the actors.
*
* @param actor the actor which received the heartbeat
* @param nnHaState the HA-related heartbeat contents
*/
void updateActorStatesFromHeartbeat(
BPServiceActor actor,
NNHAStatusHeartbeat nnHaState) {
writeLock();
try {
final long txid = nnHaState.getTxId();
final boolean nnClaimsActive =
nnHaState.getState() == HAServiceState.ACTIVE;
final boolean bposThinksActive = bpServiceToActive == actor;
final boolean isMoreRecentClaim = txid > lastActiveClaimTxId;
if (nnClaimsActive && !bposThinksActive) {
LOG.info("Namenode " + actor + " trying to claim ACTIVE state with " +
"txid=" + txid);
if (!isMoreRecentClaim) {
// Split-brain scenario - an NN is trying to claim active
// state when a different NN has already claimed it with a higher
// txid.
LOG.warn("NN " + actor + " tried to claim ACTIVE state at txid=" +
txid + " but there was already a more recent claim at txid=" +
lastActiveClaimTxId);
return;
} else {
if (bpServiceToActive == null) {
LOG.info("Acknowledging ACTIVE Namenode " + actor);
} else {
LOG.info("Namenode " + actor + " taking over ACTIVE state from " +
bpServiceToActive + " at higher txid=" + txid);
}
bpServiceToActive = actor;
}
} else if (!nnClaimsActive && bposThinksActive) {
LOG.info("Namenode " + actor + " relinquishing ACTIVE state with " +
"txid=" + nnHaState.getTxId());
bpServiceToActive = null;
}
if (bpServiceToActive == actor) {
assert txid >= lastActiveClaimTxId;
lastActiveClaimTxId = txid;
}
} finally {
writeUnlock();
}
}
/**
* @return true if the given NN address is one of the NNs for this
* block pool
*/
boolean containsNN(InetSocketAddress addr) {
for (BPServiceActor actor : bpServices) {
if (actor.getNNSocketAddress().equals(addr)) {
return true;
}
}
return false;
}
@VisibleForTesting
int countNameNodes() {
return bpServices.size();
}
/**
* Run an immediate block report on this thread. Used by tests.
*/
@VisibleForTesting
void triggerBlockReportForTests() throws IOException {
for (BPServiceActor actor : bpServices) {
actor.triggerBlockReportForTests();
}
}
/**
* Run an immediate deletion report on this thread. Used by tests.
*/
@VisibleForTesting
void triggerDeletionReportForTests() throws IOException {
for (BPServiceActor actor : bpServices) {
actor.triggerDeletionReportForTests();
}
}
/**
* Run an immediate heartbeat from all actors. Used by tests.
*/
@VisibleForTesting
void triggerHeartbeatForTests() throws IOException {
for (BPServiceActor actor : bpServices) {
actor.triggerHeartbeatForTests();
}
}
boolean processCommandFromActor(DatanodeCommand cmd,
BPServiceActor actor) throws IOException {
assert bpServices.contains(actor);
if (cmd == null) {
return true;
}
/*
* Datanode Registration can be done asynchronously here. No need to hold
* the lock. for more info refer HDFS-5014
*/
if (DatanodeProtocol.DNA_REGISTER == cmd.getAction()) {
// namenode requested a registration - at start or if NN lost contact
// Just logging the claiming state is OK here instead of checking the
// actor state by obtaining the lock
LOG.info("DatanodeCommand action : DNA_REGISTER from " + actor.nnAddr
+ " with " + actor.state + " state");
actor.reRegister();
return false;
}
writeLock();
try {
if (actor == bpServiceToActive) {
return processCommandFromActive(cmd, actor);
} else {
return processCommandFromStandby(cmd, actor);
}
} finally {
writeUnlock();
}
}
private String blockIdArrayToString(long ids[]) {
long maxNumberOfBlocksToLog = dn.getMaxNumberOfBlocksToLog();
StringBuilder bld = new StringBuilder();
String prefix = "";
for (int i = 0; i < ids.length; i++) {
if (i >= maxNumberOfBlocksToLog) {
bld.append("...");
break;
}
bld.append(prefix).append(ids[i]);
prefix = ", ";
}
return bld.toString();
}
/**
* This method should handle all commands from Active namenode except
* DNA_REGISTER which should be handled earlier itself.
*
* @param cmd
* @return true if further processing may be required or false otherwise.
* @throws IOException
*/
private boolean processCommandFromActive(DatanodeCommand cmd,
BPServiceActor actor) throws IOException {
final BlockCommand bcmd =
cmd instanceof BlockCommand? (BlockCommand)cmd: null;
final BlockIdCommand blockIdCmd =
cmd instanceof BlockIdCommand ? (BlockIdCommand)cmd: null;
switch(cmd.getAction()) {
case DatanodeProtocol.DNA_TRANSFER:
// Send a copy of a block to another datanode
dn.transferBlocks(bcmd.getBlockPoolId(), bcmd.getBlocks(),
bcmd.getTargets(), bcmd.getTargetStorageTypes());
break;
case DatanodeProtocol.DNA_INVALIDATE:
//
// Some local block(s) are obsolete and can be
// safely garbage-collected.
//
Block toDelete[] = bcmd.getBlocks();
try {
// using global fsdataset
dn.getFSDataset().invalidate(bcmd.getBlockPoolId(), toDelete);
} catch(IOException e) {
// Exceptions caught here are not expected to be disk-related.
throw e;
}
dn.metrics.incrBlocksRemoved(toDelete.length);
break;
case DatanodeProtocol.DNA_CACHE:
LOG.info("DatanodeCommand action: DNA_CACHE for " +
blockIdCmd.getBlockPoolId() + " of [" +
blockIdArrayToString(blockIdCmd.getBlockIds()) + "]");
dn.getFSDataset().cache(blockIdCmd.getBlockPoolId(), blockIdCmd.getBlockIds());
break;
case DatanodeProtocol.DNA_UNCACHE:
LOG.info("DatanodeCommand action: DNA_UNCACHE for " +
blockIdCmd.getBlockPoolId() + " of [" +
blockIdArrayToString(blockIdCmd.getBlockIds()) + "]");
dn.getFSDataset().uncache(blockIdCmd.getBlockPoolId(), blockIdCmd.getBlockIds());
break;
case DatanodeProtocol.DNA_SHUTDOWN:
// TODO: DNA_SHUTDOWN appears to be unused - the NN never sends this command
// See HDFS-2987.
throw new UnsupportedOperationException("Received unimplemented DNA_SHUTDOWN");
case DatanodeProtocol.DNA_FINALIZE:
String bp = ((FinalizeCommand) cmd).getBlockPoolId();
LOG.info("Got finalize command for block pool " + bp);
assert getBlockPoolId().equals(bp) :
"BP " + getBlockPoolId() + " received DNA_FINALIZE " +
"for other block pool " + bp;
dn.finalizeUpgradeForPool(bp);
break;
case DatanodeProtocol.DNA_RECOVERBLOCK:
String who = "NameNode at " + actor.getNNSocketAddress();
dn.recoverBlocks(who, ((BlockRecoveryCommand)cmd).getRecoveringBlocks());
break;
case DatanodeProtocol.DNA_ACCESSKEYUPDATE:
LOG.info("DatanodeCommand action: DNA_ACCESSKEYUPDATE");
if (dn.isBlockTokenEnabled) {
dn.blockPoolTokenSecretManager.addKeys(
getBlockPoolId(),
((KeyUpdateCommand) cmd).getExportedKeys());
}
break;
case DatanodeProtocol.DNA_BALANCERBANDWIDTHUPDATE:
LOG.info("DatanodeCommand action: DNA_BALANCERBANDWIDTHUPDATE");
long bandwidth =
((BalancerBandwidthCommand) cmd).getBalancerBandwidthValue();
if (bandwidth > 0) {
DataXceiverServer dxcs =
(DataXceiverServer) dn.dataXceiverServer.getRunnable();
LOG.info("Updating balance throttler bandwidth from "
+ dxcs.balanceThrottler.getBandwidth() + " bytes/s "
+ "to: " + bandwidth + " bytes/s.");
dxcs.balanceThrottler.setBandwidth(bandwidth);
}
break;
default:
LOG.warn("Unknown DatanodeCommand action: " + cmd.getAction());
}
return true;
}
/**
* This method should handle commands from Standby namenode except
* DNA_REGISTER which should be handled earlier itself.
*/
private boolean processCommandFromStandby(DatanodeCommand cmd,
BPServiceActor actor) throws IOException {
switch(cmd.getAction()) {
case DatanodeProtocol.DNA_ACCESSKEYUPDATE:
LOG.info("DatanodeCommand action from standby: DNA_ACCESSKEYUPDATE");
if (dn.isBlockTokenEnabled) {
dn.blockPoolTokenSecretManager.addKeys(
getBlockPoolId(),
((KeyUpdateCommand) cmd).getExportedKeys());
}
break;
case DatanodeProtocol.DNA_TRANSFER:
case DatanodeProtocol.DNA_INVALIDATE:
case DatanodeProtocol.DNA_SHUTDOWN:
case DatanodeProtocol.DNA_FINALIZE:
case DatanodeProtocol.DNA_RECOVERBLOCK:
case DatanodeProtocol.DNA_BALANCERBANDWIDTHUPDATE:
case DatanodeProtocol.DNA_CACHE:
case DatanodeProtocol.DNA_UNCACHE:
LOG.warn("Got a command from standby NN - ignoring command:" + cmd.getAction());
break;
default:
LOG.warn("Unknown DatanodeCommand action: " + cmd.getAction());
}
return true;
}
/*
* Let the actor retry for initialization until all namenodes of cluster have
* failed.
*/
boolean shouldRetryInit() {
if (hasBlockPoolId()) {
// One of the namenode registered successfully. lets continue retry for
// other.
return true;
}
return isAlive();
}
}
| 24,962 | 31.127413 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import static org.apache.hadoop.util.Time.monotonicNow;
import java.io.EOFException;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.SocketTimeoutException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.atomic.AtomicBoolean;
import com.google.common.base.Joiner;
import org.apache.commons.logging.Log;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.hdfs.client.BlockReportOptions;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.RollingUpgradeStatus;
import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.hdfs.server.protocol.DisallowedDatanodeException;
import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.VersionInfo;
import org.apache.hadoop.util.VersionUtil;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.Maps;
/**
* A thread per active or standby namenode to perform:
* <ul>
* <li> Pre-registration handshake with namenode</li>
* <li> Registration with namenode</li>
* <li> Send periodic heartbeats to the namenode</li>
* <li> Handle commands received from the namenode</li>
* </ul>
*/
@InterfaceAudience.Private
class BPServiceActor implements Runnable {
static final Log LOG = DataNode.LOG;
final InetSocketAddress nnAddr;
HAServiceState state;
final BPOfferService bpos;
volatile long lastCacheReport = 0;
private final Scheduler scheduler;
Thread bpThread;
DatanodeProtocolClientSideTranslatorPB bpNamenode;
static enum RunningState {
CONNECTING, INIT_FAILED, RUNNING, EXITED, FAILED;
}
private volatile RunningState runningState = RunningState.CONNECTING;
/**
* Between block reports (which happen on the order of once an hour) the
* DN reports smaller incremental changes to its block list. This map,
* keyed by block ID, contains the pending changes which have yet to be
* reported to the NN. Access should be synchronized on this object.
*/
private final Map<DatanodeStorage, PerStoragePendingIncrementalBR>
pendingIncrementalBRperStorage = Maps.newHashMap();
// IBR = Incremental Block Report. If this flag is set then an IBR will be
// sent immediately by the actor thread without waiting for the IBR timer
// to elapse.
private volatile boolean sendImmediateIBR = false;
private volatile boolean shouldServiceRun = true;
private final DataNode dn;
private final DNConf dnConf;
private long prevBlockReportId;
private DatanodeRegistration bpRegistration;
final LinkedList<BPServiceActorAction> bpThreadQueue
= new LinkedList<BPServiceActorAction>();
BPServiceActor(InetSocketAddress nnAddr, BPOfferService bpos) {
this.bpos = bpos;
this.dn = bpos.getDataNode();
this.nnAddr = nnAddr;
this.dnConf = dn.getDnConf();
prevBlockReportId = ThreadLocalRandom.current().nextLong();
scheduler = new Scheduler(dnConf.heartBeatInterval, dnConf.blockReportInterval);
}
boolean isAlive() {
if (!shouldServiceRun || !bpThread.isAlive()) {
return false;
}
return runningState == BPServiceActor.RunningState.RUNNING
|| runningState == BPServiceActor.RunningState.CONNECTING;
}
@Override
public String toString() {
return bpos.toString() + " service to " + nnAddr;
}
InetSocketAddress getNNSocketAddress() {
return nnAddr;
}
/**
* Used to inject a spy NN in the unit tests.
*/
@VisibleForTesting
void setNameNode(DatanodeProtocolClientSideTranslatorPB dnProtocol) {
bpNamenode = dnProtocol;
}
@VisibleForTesting
DatanodeProtocolClientSideTranslatorPB getNameNodeProxy() {
return bpNamenode;
}
/**
* Perform the first part of the handshake with the NameNode.
* This calls <code>versionRequest</code> to determine the NN's
* namespace and version info. It automatically retries until
* the NN responds or the DN is shutting down.
*
* @return the NamespaceInfo
*/
@VisibleForTesting
NamespaceInfo retrieveNamespaceInfo() throws IOException {
NamespaceInfo nsInfo = null;
while (shouldRun()) {
try {
nsInfo = bpNamenode.versionRequest();
LOG.debug(this + " received versionRequest response: " + nsInfo);
break;
} catch(SocketTimeoutException e) { // namenode is busy
LOG.warn("Problem connecting to server: " + nnAddr);
} catch(IOException e ) { // namenode is not available
LOG.warn("Problem connecting to server: " + nnAddr);
}
// try again in a second
sleepAndLogInterrupts(5000, "requesting version info from NN");
}
if (nsInfo != null) {
checkNNVersion(nsInfo);
} else {
throw new IOException("DN shut down before block pool connected");
}
return nsInfo;
}
private void checkNNVersion(NamespaceInfo nsInfo)
throws IncorrectVersionException {
// build and layout versions should match
String nnVersion = nsInfo.getSoftwareVersion();
String minimumNameNodeVersion = dnConf.getMinimumNameNodeVersion();
if (VersionUtil.compareVersions(nnVersion, minimumNameNodeVersion) < 0) {
IncorrectVersionException ive = new IncorrectVersionException(
minimumNameNodeVersion, nnVersion, "NameNode", "DataNode");
LOG.warn(ive.getMessage());
throw ive;
}
String dnVersion = VersionInfo.getVersion();
if (!nnVersion.equals(dnVersion)) {
LOG.info("Reported NameNode version '" + nnVersion + "' does not match " +
"DataNode version '" + dnVersion + "' but is within acceptable " +
"limits. Note: This is normal during a rolling upgrade.");
}
}
private void connectToNNAndHandshake() throws IOException {
// get NN proxy
bpNamenode = dn.connectToNN(nnAddr);
// First phase of the handshake with NN - get the namespace
// info.
NamespaceInfo nsInfo = retrieveNamespaceInfo();
// Verify that this matches the other NN in this HA pair.
// This also initializes our block pool in the DN if we are
// the first NN connection for this BP.
bpos.verifyAndSetNamespaceInfo(nsInfo);
// Second phase of the handshake with the NN.
register(nsInfo);
}
/**
* Report received blocks and delete hints to the Namenode for each
* storage.
*
* @throws IOException
*/
private void reportReceivedDeletedBlocks() throws IOException {
// Generate a list of the pending reports for each storage under the lock
ArrayList<StorageReceivedDeletedBlocks> reports =
new ArrayList<StorageReceivedDeletedBlocks>(pendingIncrementalBRperStorage.size());
synchronized (pendingIncrementalBRperStorage) {
for (Map.Entry<DatanodeStorage, PerStoragePendingIncrementalBR> entry :
pendingIncrementalBRperStorage.entrySet()) {
final DatanodeStorage storage = entry.getKey();
final PerStoragePendingIncrementalBR perStorageMap = entry.getValue();
if (perStorageMap.getBlockInfoCount() > 0) {
// Send newly-received and deleted blockids to namenode
ReceivedDeletedBlockInfo[] rdbi = perStorageMap.dequeueBlockInfos();
reports.add(new StorageReceivedDeletedBlocks(storage, rdbi));
}
}
sendImmediateIBR = false;
}
if (reports.size() == 0) {
// Nothing new to report.
return;
}
// Send incremental block reports to the Namenode outside the lock
boolean success = false;
final long startTime = monotonicNow();
try {
bpNamenode.blockReceivedAndDeleted(bpRegistration,
bpos.getBlockPoolId(),
reports.toArray(new StorageReceivedDeletedBlocks[reports.size()]));
success = true;
} finally {
dn.getMetrics().addIncrementalBlockReport(monotonicNow() - startTime);
if (!success) {
synchronized (pendingIncrementalBRperStorage) {
for (StorageReceivedDeletedBlocks report : reports) {
// If we didn't succeed in sending the report, put all of the
// blocks back onto our queue, but only in the case where we
// didn't put something newer in the meantime.
PerStoragePendingIncrementalBR perStorageMap =
pendingIncrementalBRperStorage.get(report.getStorage());
perStorageMap.putMissingBlockInfos(report.getBlocks());
sendImmediateIBR = true;
}
}
}
}
}
/**
* @return pending incremental block report for given {@code storage}
*/
private PerStoragePendingIncrementalBR getIncrementalBRMapForStorage(
DatanodeStorage storage) {
PerStoragePendingIncrementalBR mapForStorage =
pendingIncrementalBRperStorage.get(storage);
if (mapForStorage == null) {
// This is the first time we are adding incremental BR state for
// this storage so create a new map. This is required once per
// storage, per service actor.
mapForStorage = new PerStoragePendingIncrementalBR();
pendingIncrementalBRperStorage.put(storage, mapForStorage);
}
return mapForStorage;
}
/**
* Add a blockInfo for notification to NameNode. If another entry
* exists for the same block it is removed.
*
* Caller must synchronize access using pendingIncrementalBRperStorage.
*/
void addPendingReplicationBlockInfo(ReceivedDeletedBlockInfo bInfo,
DatanodeStorage storage) {
// Make sure another entry for the same block is first removed.
// There may only be one such entry.
for (Map.Entry<DatanodeStorage, PerStoragePendingIncrementalBR> entry :
pendingIncrementalBRperStorage.entrySet()) {
if (entry.getValue().removeBlockInfo(bInfo)) {
break;
}
}
getIncrementalBRMapForStorage(storage).putBlockInfo(bInfo);
}
/*
* Informing the name node could take a long long time! Should we wait
* till namenode is informed before responding with success to the
* client? For now we don't.
*/
void notifyNamenodeBlock(ReceivedDeletedBlockInfo bInfo,
String storageUuid, boolean now) {
synchronized (pendingIncrementalBRperStorage) {
addPendingReplicationBlockInfo(
bInfo, dn.getFSDataset().getStorage(storageUuid));
sendImmediateIBR = true;
// If now is true, the report is sent right away.
// Otherwise, it will be sent out in the next heartbeat.
if (now) {
pendingIncrementalBRperStorage.notifyAll();
}
}
}
void notifyNamenodeDeletedBlock(
ReceivedDeletedBlockInfo bInfo, String storageUuid) {
synchronized (pendingIncrementalBRperStorage) {
addPendingReplicationBlockInfo(
bInfo, dn.getFSDataset().getStorage(storageUuid));
}
}
/**
* Run an immediate block report on this thread. Used by tests.
*/
@VisibleForTesting
void triggerBlockReportForTests() {
synchronized (pendingIncrementalBRperStorage) {
scheduler.scheduleHeartbeat();
long oldBlockReportTime = scheduler.nextBlockReportTime;
scheduler.forceFullBlockReportNow();
pendingIncrementalBRperStorage.notifyAll();
while (oldBlockReportTime == scheduler.nextBlockReportTime) {
try {
pendingIncrementalBRperStorage.wait(100);
} catch (InterruptedException e) {
return;
}
}
}
}
@VisibleForTesting
void triggerHeartbeatForTests() {
synchronized (pendingIncrementalBRperStorage) {
final long nextHeartbeatTime = scheduler.scheduleHeartbeat();
pendingIncrementalBRperStorage.notifyAll();
while (nextHeartbeatTime - scheduler.nextHeartbeatTime >= 0) {
try {
pendingIncrementalBRperStorage.wait(100);
} catch (InterruptedException e) {
return;
}
}
}
}
@VisibleForTesting
void triggerDeletionReportForTests() {
synchronized (pendingIncrementalBRperStorage) {
sendImmediateIBR = true;
pendingIncrementalBRperStorage.notifyAll();
while (sendImmediateIBR) {
try {
pendingIncrementalBRperStorage.wait(100);
} catch (InterruptedException e) {
return;
}
}
}
}
@VisibleForTesting
boolean hasPendingIBR() {
return sendImmediateIBR;
}
private long generateUniqueBlockReportId() {
// Initialize the block report ID the first time through.
// Note that 0 is used on the NN to indicate "uninitialized", so we should
// not send a 0 value ourselves.
prevBlockReportId++;
while (prevBlockReportId == 0) {
prevBlockReportId = ThreadLocalRandom.current().nextLong();
}
return prevBlockReportId;
}
/**
* Report the list blocks to the Namenode
* @return DatanodeCommands returned by the NN. May be null.
* @throws IOException
*/
List<DatanodeCommand> blockReport(long fullBrLeaseId) throws IOException {
final ArrayList<DatanodeCommand> cmds = new ArrayList<DatanodeCommand>();
// Flush any block information that precedes the block report. Otherwise
// we have a chance that we will miss the delHint information
// or we will report an RBW replica after the BlockReport already reports
// a FINALIZED one.
reportReceivedDeletedBlocks();
long brCreateStartTime = monotonicNow();
Map<DatanodeStorage, BlockListAsLongs> perVolumeBlockLists =
dn.getFSDataset().getBlockReports(bpos.getBlockPoolId());
// Convert the reports to the format expected by the NN.
int i = 0;
int totalBlockCount = 0;
StorageBlockReport reports[] =
new StorageBlockReport[perVolumeBlockLists.size()];
for(Map.Entry<DatanodeStorage, BlockListAsLongs> kvPair : perVolumeBlockLists.entrySet()) {
BlockListAsLongs blockList = kvPair.getValue();
reports[i++] = new StorageBlockReport(kvPair.getKey(), blockList);
totalBlockCount += blockList.getNumberOfBlocks();
}
// Send the reports to the NN.
int numReportsSent = 0;
int numRPCs = 0;
boolean success = false;
long brSendStartTime = monotonicNow();
long reportId = generateUniqueBlockReportId();
try {
if (totalBlockCount < dnConf.blockReportSplitThreshold) {
// Below split threshold, send all reports in a single message.
DatanodeCommand cmd = bpNamenode.blockReport(
bpRegistration, bpos.getBlockPoolId(), reports,
new BlockReportContext(1, 0, reportId, fullBrLeaseId));
numRPCs = 1;
numReportsSent = reports.length;
if (cmd != null) {
cmds.add(cmd);
}
} else {
// Send one block report per message.
for (int r = 0; r < reports.length; r++) {
StorageBlockReport singleReport[] = { reports[r] };
DatanodeCommand cmd = bpNamenode.blockReport(
bpRegistration, bpos.getBlockPoolId(), singleReport,
new BlockReportContext(reports.length, r, reportId,
fullBrLeaseId));
numReportsSent++;
numRPCs++;
if (cmd != null) {
cmds.add(cmd);
}
}
}
success = true;
} finally {
// Log the block report processing stats from Datanode perspective
long brSendCost = monotonicNow() - brSendStartTime;
long brCreateCost = brSendStartTime - brCreateStartTime;
dn.getMetrics().addBlockReport(brSendCost);
final int nCmds = cmds.size();
LOG.info((success ? "S" : "Uns") +
"uccessfully sent block report 0x" +
Long.toHexString(reportId) + ", containing " + reports.length +
" storage report(s), of which we sent " + numReportsSent + "." +
" The reports had " + totalBlockCount +
" total blocks and used " + numRPCs +
" RPC(s). This took " + brCreateCost +
" msec to generate and " + brSendCost +
" msecs for RPC and NN processing." +
" Got back " +
((nCmds == 0) ? "no commands" :
((nCmds == 1) ? "one command: " + cmds.get(0) :
(nCmds + " commands: " + Joiner.on("; ").join(cmds)))) +
".");
}
scheduler.scheduleNextBlockReport();
return cmds.size() == 0 ? null : cmds;
}
DatanodeCommand cacheReport() throws IOException {
// If caching is disabled, do not send a cache report
if (dn.getFSDataset().getCacheCapacity() == 0) {
return null;
}
// send cache report if timer has expired.
DatanodeCommand cmd = null;
final long startTime = monotonicNow();
if (startTime - lastCacheReport > dnConf.cacheReportInterval) {
if (LOG.isDebugEnabled()) {
LOG.debug("Sending cacheReport from service actor: " + this);
}
lastCacheReport = startTime;
String bpid = bpos.getBlockPoolId();
List<Long> blockIds = dn.getFSDataset().getCacheReport(bpid);
long createTime = monotonicNow();
cmd = bpNamenode.cacheReport(bpRegistration, bpid, blockIds);
long sendTime = monotonicNow();
long createCost = createTime - startTime;
long sendCost = sendTime - createTime;
dn.getMetrics().addCacheReport(sendCost);
if (LOG.isDebugEnabled()) {
LOG.debug("CacheReport of " + blockIds.size()
+ " block(s) took " + createCost + " msec to generate and "
+ sendCost + " msecs for RPC and NN processing");
}
}
return cmd;
}
HeartbeatResponse sendHeartBeat(boolean requestBlockReportLease)
throws IOException {
StorageReport[] reports =
dn.getFSDataset().getStorageReports(bpos.getBlockPoolId());
if (LOG.isDebugEnabled()) {
LOG.debug("Sending heartbeat with " + reports.length +
" storage reports from service actor: " + this);
}
VolumeFailureSummary volumeFailureSummary = dn.getFSDataset()
.getVolumeFailureSummary();
int numFailedVolumes = volumeFailureSummary != null ?
volumeFailureSummary.getFailedStorageLocations().length : 0;
return bpNamenode.sendHeartbeat(bpRegistration,
reports,
dn.getFSDataset().getCacheCapacity(),
dn.getFSDataset().getCacheUsed(),
dn.getXmitsInProgress(),
dn.getXceiverCount(),
numFailedVolumes,
volumeFailureSummary,
requestBlockReportLease);
}
//This must be called only by BPOfferService
void start() {
if ((bpThread != null) && (bpThread.isAlive())) {
//Thread is started already
return;
}
bpThread = new Thread(this, formatThreadName());
bpThread.setDaemon(true); // needed for JUnit testing
bpThread.start();
}
private String formatThreadName() {
Collection<StorageLocation> dataDirs =
DataNode.getStorageLocations(dn.getConf());
return "DataNode: [" + dataDirs.toString() + "] " +
" heartbeating to " + nnAddr;
}
//This must be called only by blockPoolManager.
void stop() {
shouldServiceRun = false;
if (bpThread != null) {
bpThread.interrupt();
}
}
//This must be called only by blockPoolManager
void join() {
try {
if (bpThread != null) {
bpThread.join();
}
} catch (InterruptedException ie) { }
}
//Cleanup method to be called by current thread before exiting.
private synchronized void cleanUp() {
shouldServiceRun = false;
IOUtils.cleanup(LOG, bpNamenode);
bpos.shutdownActor(this);
}
private void handleRollingUpgradeStatus(HeartbeatResponse resp) throws IOException {
RollingUpgradeStatus rollingUpgradeStatus = resp.getRollingUpdateStatus();
if (rollingUpgradeStatus != null &&
rollingUpgradeStatus.getBlockPoolId().compareTo(bpos.getBlockPoolId()) != 0) {
// Can this ever occur?
LOG.error("Invalid BlockPoolId " +
rollingUpgradeStatus.getBlockPoolId() +
" in HeartbeatResponse. Expected " +
bpos.getBlockPoolId());
} else {
bpos.signalRollingUpgrade(rollingUpgradeStatus);
}
}
/**
* Main loop for each BP thread. Run until shutdown,
* forever calling remote NameNode functions.
*/
private void offerService() throws Exception {
LOG.info("For namenode " + nnAddr + " using"
+ " BLOCKREPORT_INTERVAL of " + dnConf.blockReportInterval + "msec"
+ " CACHEREPORT_INTERVAL of " + dnConf.cacheReportInterval + "msec"
+ " Initial delay: " + dnConf.initialBlockReportDelayMs + "msec"
+ "; heartBeatInterval=" + dnConf.heartBeatInterval);
long fullBlockReportLeaseId = 0;
//
// Now loop for a long time....
//
while (shouldRun()) {
try {
final long startTime = scheduler.monotonicNow();
//
// Every so often, send heartbeat or block-report
//
final boolean sendHeartbeat = scheduler.isHeartbeatDue(startTime);
HeartbeatResponse resp = null;
if (sendHeartbeat) {
//
// All heartbeat messages include following info:
// -- Datanode name
// -- data transfer port
// -- Total capacity
// -- Bytes remaining
//
boolean requestBlockReportLease = (fullBlockReportLeaseId == 0) &&
scheduler.isBlockReportDue(startTime);
scheduler.scheduleNextHeartbeat();
if (!dn.areHeartbeatsDisabledForTests()) {
resp = sendHeartBeat(requestBlockReportLease);
assert resp != null;
if (resp.getFullBlockReportLeaseId() != 0) {
if (fullBlockReportLeaseId != 0) {
LOG.warn(nnAddr + " sent back a full block report lease " +
"ID of 0x" +
Long.toHexString(resp.getFullBlockReportLeaseId()) +
", but we already have a lease ID of 0x" +
Long.toHexString(fullBlockReportLeaseId) + ". " +
"Overwriting old lease ID.");
}
fullBlockReportLeaseId = resp.getFullBlockReportLeaseId();
}
dn.getMetrics().addHeartbeat(scheduler.monotonicNow() - startTime);
// If the state of this NN has changed (eg STANDBY->ACTIVE)
// then let the BPOfferService update itself.
//
// Important that this happens before processCommand below,
// since the first heartbeat to a new active might have commands
// that we should actually process.
bpos.updateActorStatesFromHeartbeat(
this, resp.getNameNodeHaState());
state = resp.getNameNodeHaState().getState();
if (state == HAServiceState.ACTIVE) {
handleRollingUpgradeStatus(resp);
}
long startProcessCommands = monotonicNow();
if (!processCommand(resp.getCommands()))
continue;
long endProcessCommands = monotonicNow();
if (endProcessCommands - startProcessCommands > 2000) {
LOG.info("Took " + (endProcessCommands - startProcessCommands)
+ "ms to process " + resp.getCommands().length
+ " commands from NN");
}
}
}
if (sendImmediateIBR || sendHeartbeat) {
reportReceivedDeletedBlocks();
}
List<DatanodeCommand> cmds = null;
boolean forceFullBr =
scheduler.forceFullBlockReport.getAndSet(false);
if (forceFullBr) {
LOG.info("Forcing a full block report to " + nnAddr);
}
if ((fullBlockReportLeaseId != 0) || forceFullBr) {
cmds = blockReport(fullBlockReportLeaseId);
fullBlockReportLeaseId = 0;
}
processCommand(cmds == null ? null : cmds.toArray(new DatanodeCommand[cmds.size()]));
if (!dn.areCacheReportsDisabledForTests()) {
DatanodeCommand cmd = cacheReport();
processCommand(new DatanodeCommand[]{ cmd });
}
//
// There is no work to do; sleep until hearbeat timer elapses,
// or work arrives, and then iterate again.
//
long waitTime = scheduler.getHeartbeatWaitTime();
synchronized(pendingIncrementalBRperStorage) {
if (waitTime > 0 && !sendImmediateIBR) {
try {
pendingIncrementalBRperStorage.wait(waitTime);
} catch (InterruptedException ie) {
LOG.warn("BPOfferService for " + this + " interrupted");
}
}
} // synchronized
} catch(RemoteException re) {
String reClass = re.getClassName();
if (UnregisteredNodeException.class.getName().equals(reClass) ||
DisallowedDatanodeException.class.getName().equals(reClass) ||
IncorrectVersionException.class.getName().equals(reClass)) {
LOG.warn(this + " is shutting down", re);
shouldServiceRun = false;
return;
}
LOG.warn("RemoteException in offerService", re);
try {
long sleepTime = Math.min(1000, dnConf.heartBeatInterval);
Thread.sleep(sleepTime);
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
}
} catch (IOException e) {
LOG.warn("IOException in offerService", e);
}
processQueueMessages();
} // while (shouldRun())
} // offerService
/**
* Register one bp with the corresponding NameNode
* <p>
* The bpDatanode needs to register with the namenode on startup in order
* 1) to report which storage it is serving now and
* 2) to receive a registrationID
*
* issued by the namenode to recognize registered datanodes.
*
* @param nsInfo current NamespaceInfo
* @see FSNamesystem#registerDatanode(DatanodeRegistration)
* @throws IOException
*/
void register(NamespaceInfo nsInfo) throws IOException {
// The handshake() phase loaded the block pool storage
// off disk - so update the bpRegistration object from that info
bpRegistration = bpos.createRegistration();
LOG.info(this + " beginning handshake with NN");
while (shouldRun()) {
try {
// Use returned registration from namenode with updated fields
bpRegistration = bpNamenode.registerDatanode(bpRegistration);
bpRegistration.setNamespaceInfo(nsInfo);
break;
} catch(EOFException e) { // namenode might have just restarted
LOG.info("Problem connecting to server: " + nnAddr + " :"
+ e.getLocalizedMessage());
sleepAndLogInterrupts(1000, "connecting to server");
} catch(SocketTimeoutException e) { // namenode is busy
LOG.info("Problem connecting to server: " + nnAddr);
sleepAndLogInterrupts(1000, "connecting to server");
}
}
LOG.info("Block pool " + this + " successfully registered with NN");
bpos.registrationSucceeded(this, bpRegistration);
// random short delay - helps scatter the BR from all DNs
scheduler.scheduleBlockReport(dnConf.initialBlockReportDelayMs);
}
private void sleepAndLogInterrupts(int millis,
String stateString) {
try {
Thread.sleep(millis);
} catch (InterruptedException ie) {
LOG.info("BPOfferService " + this + " interrupted while " + stateString);
}
}
/**
* No matter what kind of exception we get, keep retrying to offerService().
* That's the loop that connects to the NameNode and provides basic DataNode
* functionality.
*
* Only stop when "shouldRun" or "shouldServiceRun" is turned off, which can
* happen either at shutdown or due to refreshNamenodes.
*/
@Override
public void run() {
LOG.info(this + " starting to offer service");
try {
while (true) {
// init stuff
try {
// setup storage
connectToNNAndHandshake();
break;
} catch (IOException ioe) {
// Initial handshake, storage recovery or registration failed
runningState = RunningState.INIT_FAILED;
if (shouldRetryInit()) {
// Retry until all namenode's of BPOS failed initialization
LOG.error("Initialization failed for " + this + " "
+ ioe.getLocalizedMessage());
sleepAndLogInterrupts(5000, "initializing");
} else {
runningState = RunningState.FAILED;
LOG.fatal("Initialization failed for " + this + ". Exiting. ", ioe);
return;
}
}
}
runningState = RunningState.RUNNING;
while (shouldRun()) {
try {
offerService();
} catch (Exception ex) {
LOG.error("Exception in BPOfferService for " + this, ex);
sleepAndLogInterrupts(5000, "offering service");
}
}
runningState = RunningState.EXITED;
} catch (Throwable ex) {
LOG.warn("Unexpected exception in block pool " + this, ex);
runningState = RunningState.FAILED;
} finally {
LOG.warn("Ending block pool service for: " + this);
cleanUp();
}
}
private boolean shouldRetryInit() {
return shouldRun() && bpos.shouldRetryInit();
}
private boolean shouldRun() {
return shouldServiceRun && dn.shouldRun();
}
/**
* Process an array of datanode commands
*
* @param cmds an array of datanode commands
* @return true if further processing may be required or false otherwise.
*/
boolean processCommand(DatanodeCommand[] cmds) {
if (cmds != null) {
for (DatanodeCommand cmd : cmds) {
try {
if (bpos.processCommandFromActor(cmd, this) == false) {
return false;
}
} catch (IOException ioe) {
LOG.warn("Error processing datanode Command", ioe);
}
}
}
return true;
}
/**
* Report a bad block from another DN in this cluster.
*/
void reportRemoteBadBlock(DatanodeInfo dnInfo, ExtendedBlock block)
throws IOException {
LocatedBlock lb = new LocatedBlock(block,
new DatanodeInfo[] {dnInfo});
bpNamenode.reportBadBlocks(new LocatedBlock[] {lb});
}
void reRegister() throws IOException {
if (shouldRun()) {
// re-retrieve namespace info to make sure that, if the NN
// was restarted, we still match its version (HDFS-2120)
NamespaceInfo nsInfo = retrieveNamespaceInfo();
// and re-register
register(nsInfo);
scheduler.scheduleHeartbeat();
}
}
private static class PerStoragePendingIncrementalBR {
private final Map<Long, ReceivedDeletedBlockInfo> pendingIncrementalBR =
Maps.newHashMap();
/**
* Return the number of blocks on this storage that have pending
* incremental block reports.
* @return
*/
int getBlockInfoCount() {
return pendingIncrementalBR.size();
}
/**
* Dequeue and return all pending incremental block report state.
* @return
*/
ReceivedDeletedBlockInfo[] dequeueBlockInfos() {
ReceivedDeletedBlockInfo[] blockInfos =
pendingIncrementalBR.values().toArray(
new ReceivedDeletedBlockInfo[getBlockInfoCount()]);
pendingIncrementalBR.clear();
return blockInfos;
}
/**
* Add blocks from blockArray to pendingIncrementalBR, unless the
* block already exists in pendingIncrementalBR.
* @param blockArray list of blocks to add.
* @return the number of missing blocks that we added.
*/
int putMissingBlockInfos(ReceivedDeletedBlockInfo[] blockArray) {
int blocksPut = 0;
for (ReceivedDeletedBlockInfo rdbi : blockArray) {
if (!pendingIncrementalBR.containsKey(rdbi.getBlock().getBlockId())) {
pendingIncrementalBR.put(rdbi.getBlock().getBlockId(), rdbi);
++blocksPut;
}
}
return blocksPut;
}
/**
* Add pending incremental block report for a single block.
* @param blockInfo
*/
void putBlockInfo(ReceivedDeletedBlockInfo blockInfo) {
pendingIncrementalBR.put(blockInfo.getBlock().getBlockId(), blockInfo);
}
/**
* Remove pending incremental block report for a single block if it
* exists.
*
* @param blockInfo
* @return true if a report was removed, false if no report existed for
* the given block.
*/
boolean removeBlockInfo(ReceivedDeletedBlockInfo blockInfo) {
return (pendingIncrementalBR.remove(blockInfo.getBlock().getBlockId()) != null);
}
}
void triggerBlockReport(BlockReportOptions options) throws IOException {
if (options.isIncremental()) {
LOG.info(bpos.toString() + ": scheduling an incremental block report.");
synchronized(pendingIncrementalBRperStorage) {
sendImmediateIBR = true;
pendingIncrementalBRperStorage.notifyAll();
}
} else {
LOG.info(bpos.toString() + ": scheduling a full block report.");
synchronized(pendingIncrementalBRperStorage) {
scheduler.forceFullBlockReportNow();
pendingIncrementalBRperStorage.notifyAll();
}
}
}
public void bpThreadEnqueue(BPServiceActorAction action) {
synchronized (bpThreadQueue) {
if (!bpThreadQueue.contains(action)) {
bpThreadQueue.add(action);
}
}
}
private void processQueueMessages() {
LinkedList<BPServiceActorAction> duplicateQueue;
synchronized (bpThreadQueue) {
duplicateQueue = new LinkedList<BPServiceActorAction>(bpThreadQueue);
bpThreadQueue.clear();
}
while (!duplicateQueue.isEmpty()) {
BPServiceActorAction actionItem = duplicateQueue.remove();
try {
actionItem.reportTo(bpNamenode, bpRegistration);
} catch (BPServiceActorActionException baae) {
LOG.warn(baae.getMessage() + nnAddr , baae);
// Adding it back to the queue if not present
bpThreadEnqueue(actionItem);
}
}
}
Scheduler getScheduler() {
return scheduler;
}
/**
* Utility class that wraps the timestamp computations for scheduling
* heartbeats and block reports.
*/
static class Scheduler {
// nextBlockReportTime and nextHeartbeatTime may be assigned/read
// by testing threads (through BPServiceActor#triggerXXX), while also
// assigned/read by the actor thread.
@VisibleForTesting
volatile long nextBlockReportTime = monotonicNow();
@VisibleForTesting
volatile long nextHeartbeatTime = monotonicNow();
@VisibleForTesting
boolean resetBlockReportTime = true;
private final AtomicBoolean forceFullBlockReport =
new AtomicBoolean(false);
private final long heartbeatIntervalMs;
private final long blockReportIntervalMs;
Scheduler(long heartbeatIntervalMs, long blockReportIntervalMs) {
this.heartbeatIntervalMs = heartbeatIntervalMs;
this.blockReportIntervalMs = blockReportIntervalMs;
}
// This is useful to make sure NN gets Heartbeat before Blockreport
// upon NN restart while DN keeps retrying Otherwise,
// 1. NN restarts.
// 2. Heartbeat RPC will retry and succeed. NN asks DN to reregister.
// 3. After reregistration completes, DN will send Blockreport first.
// 4. Given NN receives Blockreport after Heartbeat, it won't mark
// DatanodeStorageInfo#blockContentsStale to false until the next
// Blockreport.
long scheduleHeartbeat() {
nextHeartbeatTime = monotonicNow();
return nextHeartbeatTime;
}
long scheduleNextHeartbeat() {
// Numerical overflow is possible here and is okay.
nextHeartbeatTime += heartbeatIntervalMs;
return nextHeartbeatTime;
}
boolean isHeartbeatDue(long startTime) {
return (nextHeartbeatTime - startTime <= 0);
}
boolean isBlockReportDue(long curTime) {
return nextBlockReportTime - curTime <= 0;
}
void forceFullBlockReportNow() {
forceFullBlockReport.set(true);
resetBlockReportTime = true;
}
/**
* This methods arranges for the data node to send the block report at
* the next heartbeat.
*/
long scheduleBlockReport(long delay) {
if (delay > 0) { // send BR after random delay
// Numerical overflow is possible here and is okay.
nextBlockReportTime =
monotonicNow() + ThreadLocalRandom.current().nextInt((int) (delay));
} else { // send at next heartbeat
nextBlockReportTime = monotonicNow();
}
resetBlockReportTime = true; // reset future BRs for randomness
return nextBlockReportTime;
}
/**
* Schedule the next block report after the block report interval. If the
* current block report was delayed then the next block report is sent per
* the original schedule.
* Numerical overflow is possible here.
*/
void scheduleNextBlockReport() {
// If we have sent the first set of block reports, then wait a random
// time before we start the periodic block reports.
if (resetBlockReportTime) {
nextBlockReportTime = monotonicNow() +
ThreadLocalRandom.current().nextInt((int)(blockReportIntervalMs));
resetBlockReportTime = false;
} else {
/* say the last block report was at 8:20:14. The current report
* should have started around 9:20:14 (default 1 hour interval).
* If current time is :
* 1) normal like 9:20:18, next report should be at 10:20:14
* 2) unexpected like 11:35:43, next report should be at 12:20:14
*/
nextBlockReportTime +=
(((monotonicNow() - nextBlockReportTime + blockReportIntervalMs) /
blockReportIntervalMs)) * blockReportIntervalMs;
}
}
long getHeartbeatWaitTime() {
return nextHeartbeatTime - monotonicNow();
}
/**
* Wrapped for testing.
* @return
*/
@VisibleForTesting
public long monotonicNow() {
return Time.monotonicNow();
}
}
}
| 40,321 | 34.401229 | 95 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaHandler.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
import java.io.Closeable;
import java.io.IOException;
/**
* This class includes a replica being actively written and the reference to
* the fs volume where this replica is located.
*/
public class ReplicaHandler implements Closeable {
private final ReplicaInPipelineInterface replica;
private final FsVolumeReference volumeReference;
public ReplicaHandler(
ReplicaInPipelineInterface replica, FsVolumeReference reference) {
this.replica = replica;
this.volumeReference = reference;
}
@Override
public void close() throws IOException {
if (this.volumeReference != null) {
volumeReference.close();
}
}
public ReplicaInPipelineInterface getReplica() {
return replica;
}
}
| 1,658 | 32.18 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/CachingStrategy.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
/**
* The caching strategy we should use for an HDFS read or write operation.
*/
public class CachingStrategy {
private final Boolean dropBehind; // null = use server defaults
private final Long readahead; // null = use server defaults
public static CachingStrategy newDefaultStrategy() {
return new CachingStrategy(null, null);
}
public static CachingStrategy newDropBehind() {
return new CachingStrategy(true, null);
}
public static class Builder {
private Boolean dropBehind;
private Long readahead;
public Builder(CachingStrategy prev) {
this.dropBehind = prev.dropBehind;
this.readahead = prev.readahead;
}
public Builder setDropBehind(Boolean dropBehind) {
this.dropBehind = dropBehind;
return this;
}
public Builder setReadahead(Long readahead) {
this.readahead = readahead;
return this;
}
public CachingStrategy build() {
return new CachingStrategy(dropBehind, readahead);
}
}
public CachingStrategy(Boolean dropBehind, Long readahead) {
this.dropBehind = dropBehind;
this.readahead = readahead;
}
public Boolean getDropBehind() {
return dropBehind;
}
public Long getReadahead() {
return readahead;
}
public String toString() {
return "CachingStrategy(dropBehind=" + dropBehind +
", readahead=" + readahead + ")";
}
}
| 2,249 | 28.220779 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UnexpectedReplicaStateException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import java.io.IOException;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
/**
* Exception indicating that the replica is in an unexpected state
*/
public class UnexpectedReplicaStateException extends IOException {
private static final long serialVersionUID = 1L;
public UnexpectedReplicaStateException() {
super();
}
public UnexpectedReplicaStateException(ExtendedBlock b,
ReplicaState expectedState) {
super("Replica " + b + " is not in expected state " + expectedState);
}
public UnexpectedReplicaStateException(String msg) {
super(msg);
}
}
| 1,529 | 32.26087 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.daemon.Daemon;
import org.apache.commons.daemon.DaemonContext;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import java.net.InetSocketAddress;
import java.net.ServerSocket;
import java.nio.channels.ServerSocketChannel;
/**
* Utility class to start a datanode in a secure cluster, first obtaining
* privileged resources before main startup and handing them to the datanode.
*/
public class SecureDataNodeStarter implements Daemon {
/**
* Stash necessary resources needed for datanode operation in a secure env.
*/
public static class SecureResources {
private final ServerSocket streamingSocket;
private final ServerSocketChannel httpServerSocket;
public SecureResources(ServerSocket streamingSocket, ServerSocketChannel httpServerSocket) {
this.streamingSocket = streamingSocket;
this.httpServerSocket = httpServerSocket;
}
public ServerSocket getStreamingSocket() { return streamingSocket; }
public ServerSocketChannel getHttpServerChannel() {
return httpServerSocket;
}
}
private String [] args;
private SecureResources resources;
@Override
public void init(DaemonContext context) throws Exception {
System.err.println("Initializing secure datanode resources");
// Create a new HdfsConfiguration object to ensure that the configuration in
// hdfs-site.xml is picked up.
Configuration conf = new HdfsConfiguration();
// Stash command-line arguments for regular datanode
args = context.getArguments();
resources = getSecureResources(conf);
}
@Override
public void start() throws Exception {
System.err.println("Starting regular datanode initialization");
DataNode.secureMain(args, resources);
}
@Override public void destroy() {}
@Override public void stop() throws Exception { /* Nothing to do */ }
/**
* Acquire privileged resources (i.e., the privileged ports) for the data
* node. The privileged resources consist of the port of the RPC server and
* the port of HTTP (not HTTPS) server.
*/
@VisibleForTesting
public static SecureResources getSecureResources(Configuration conf)
throws Exception {
HttpConfig.Policy policy = DFSUtil.getHttpPolicy(conf);
boolean isSecure = UserGroupInformation.isSecurityEnabled();
// Obtain secure port for data streaming to datanode
InetSocketAddress streamingAddr = DataNode.getStreamingAddr(conf);
int socketWriteTimeout = conf.getInt(
DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY,
HdfsServerConstants.WRITE_TIMEOUT);
ServerSocket ss = (socketWriteTimeout > 0) ?
ServerSocketChannel.open().socket() : new ServerSocket();
ss.bind(streamingAddr, 0);
// Check that we got the port we need
if (ss.getLocalPort() != streamingAddr.getPort()) {
throw new RuntimeException(
"Unable to bind on specified streaming port in secure "
+ "context. Needed " + streamingAddr.getPort() + ", got "
+ ss.getLocalPort());
}
if (!SecurityUtil.isPrivilegedPort(ss.getLocalPort()) && isSecure) {
throw new RuntimeException(
"Cannot start secure datanode with unprivileged RPC ports");
}
System.err.println("Opened streaming server at " + streamingAddr);
// Bind a port for the web server. The code intends to bind HTTP server to
// privileged port only, as the client can authenticate the server using
// certificates if they are communicating through SSL.
final ServerSocketChannel httpChannel;
if (policy.isHttpEnabled()) {
httpChannel = ServerSocketChannel.open();
InetSocketAddress infoSocAddr = DataNode.getInfoAddr(conf);
httpChannel.socket().bind(infoSocAddr);
InetSocketAddress localAddr = (InetSocketAddress) httpChannel.socket()
.getLocalSocketAddress();
if (localAddr.getPort() != infoSocAddr.getPort()) {
throw new RuntimeException("Unable to bind on specified info port in secure " +
"context. Needed " + streamingAddr.getPort() + ", got " + ss.getLocalPort());
}
System.err.println("Successfully obtained privileged resources (streaming port = "
+ ss + " ) (http listener port = " + localAddr.getPort() +")");
if (localAddr.getPort() > 1023 && isSecure) {
throw new RuntimeException(
"Cannot start secure datanode with unprivileged HTTP ports");
}
System.err.println("Opened info server at " + infoSocAddr);
} else {
httpChannel = null;
}
return new SecureResources(ss, httpChannel);
}
}
| 5,862 | 38.348993 | 96 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_ADDRESS_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_INTERFACE_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_INTERFACE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_NAMESERVER_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_NAMESERVER_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_NETWORK_COUNTS_CACHE_MAX_SIZE_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_NETWORK_COUNTS_CACHE_MAX_SIZE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_PLUGINS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_STARTUP_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_MAX_NUM_BLOCKS_TO_LOG_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_MAX_NUM_BLOCKS_TO_LOG_KEY;
import static org.apache.hadoop.util.ExitUtil.terminate;
import java.io.BufferedOutputStream;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.EOFException;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.PrintStream;
import java.lang.management.ManagementFactory;
import java.net.InetSocketAddress;
import java.net.Socket;
import java.net.URI;
import java.net.UnknownHostException;
import java.nio.channels.ServerSocketChannel;
import java.nio.channels.SocketChannel;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.atomic.AtomicInteger;
import javax.management.ObjectName;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.ReconfigurableBase;
import org.apache.hadoop.conf.ReconfigurationException;
import org.apache.hadoop.conf.ReconfigurationTaskStatus;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DFSUtil.ConfiguredNNAddress;
import org.apache.hadoop.hdfs.HDFSPolicyProvider;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.client.BlockReportOptions;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.net.DomainPeerServer;
import org.apache.hadoop.hdfs.net.TcpPeerServer;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DatanodeLocalInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsBlocksMetadata;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
import org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck;
import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory;
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient;
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferServer;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ClientDatanodeProtocolService;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
import org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InterDatanodeProtocolService;
import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolPB;
import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolServerSideTranslatorPB;
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolPB;
import org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolServerSideTranslatorPB;
import org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB;
import org.apache.hadoop.hdfs.protocolPB.PBHelper;
import org.apache.hadoop.hdfs.security.token.block.BlockPoolTokenSecretManager;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier.AccessMode;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics;
import org.apache.hadoop.hdfs.server.datanode.web.DatanodeHttpServer;
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.ReadaheadPool;
import org.apache.hadoop.io.nativeio.NativeIO;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.metrics2.util.MBeans;
import org.apache.hadoop.net.DNS;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.net.unix.DomainSocket;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.SaslPropertiesResolver;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.tracing.SpanReceiverHost;
import org.apache.hadoop.tracing.SpanReceiverInfo;
import org.apache.hadoop.tracing.TraceAdminPB.TraceAdminService;
import org.apache.hadoop.tracing.TraceAdminProtocol;
import org.apache.hadoop.tracing.TraceAdminProtocolPB;
import org.apache.hadoop.tracing.TraceAdminProtocolServerSideTranslatorPB;
import org.apache.hadoop.util.Daemon;
import org.apache.hadoop.util.DiskChecker;
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.hadoop.util.JvmPauseMonitor;
import org.apache.hadoop.util.ServicePlugin;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.VersionInfo;
import org.mortbay.util.ajax.JSON;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
import com.google.common.collect.Lists;
import com.google.protobuf.BlockingService;
/**********************************************************
* DataNode is a class (and program) that stores a set of
* blocks for a DFS deployment. A single deployment can
* have one or many DataNodes. Each DataNode communicates
* regularly with a single NameNode. It also communicates
* with client code and other DataNodes from time to time.
*
* DataNodes store a series of named blocks. The DataNode
* allows client code to read these blocks, or to write new
* block data. The DataNode may also, in response to instructions
* from its NameNode, delete blocks or copy blocks to/from other
* DataNodes.
*
* The DataNode maintains just one critical table:
* block-> stream of bytes (of BLOCK_SIZE or less)
*
* This info is stored on a local disk. The DataNode
* reports the table's contents to the NameNode upon startup
* and every so often afterwards.
*
* DataNodes spend their lives in an endless loop of asking
* the NameNode for something to do. A NameNode cannot connect
* to a DataNode directly; a NameNode simply returns values from
* functions invoked by a DataNode.
*
* DataNodes maintain an open server socket so that client code
* or other DataNodes can read/write data. The host/port for
* this server is reported to the NameNode, which then sends that
* information to clients or other DataNodes that might be interested.
*
**********************************************************/
@InterfaceAudience.Private
public class DataNode extends ReconfigurableBase
implements InterDatanodeProtocol, ClientDatanodeProtocol,
TraceAdminProtocol, DataNodeMXBean {
public static final Log LOG = LogFactory.getLog(DataNode.class);
static{
HdfsConfiguration.init();
}
public static final String DN_CLIENTTRACE_FORMAT =
"src: %s" + // src IP
", dest: %s" + // dst IP
", bytes: %s" + // byte count
", op: %s" + // operation
", cliID: %s" + // DFSClient id
", offset: %s" + // offset
", srvID: %s" + // DatanodeRegistration
", blockid: %s" + // block id
", duration: %s"; // duration time
static final Log ClientTraceLog =
LogFactory.getLog(DataNode.class.getName() + ".clienttrace");
private static final String USAGE =
"Usage: hdfs datanode [-regular | -rollback | -rollingupgrade rollback" +
" ]\n" +
" -regular : Normal DataNode startup (default).\n" +
" -rollback : Rollback a standard or rolling upgrade.\n" +
" -rollingupgrade rollback : Rollback a rolling upgrade operation.\n" +
" Refer to HDFS documentation for the difference between standard\n" +
" and rolling upgrades.";
static final int CURRENT_BLOCK_FORMAT_VERSION = 1;
/** A list of property that are reconfigurable at runtime. */
private static final List<String> RECONFIGURABLE_PROPERTIES =
Collections.unmodifiableList(
Arrays.asList(DFS_DATANODE_DATA_DIR_KEY));
/**
* Use {@link NetUtils#createSocketAddr(String)} instead.
*/
@Deprecated
public static InetSocketAddress createSocketAddr(String target) {
return NetUtils.createSocketAddr(target);
}
volatile boolean shouldRun = true;
volatile boolean shutdownForUpgrade = false;
private boolean shutdownInProgress = false;
private BlockPoolManager blockPoolManager;
volatile FsDatasetSpi<? extends FsVolumeSpi> data = null;
private String clusterId = null;
public final static String EMPTY_DEL_HINT = "";
final AtomicInteger xmitsInProgress = new AtomicInteger();
Daemon dataXceiverServer = null;
DataXceiverServer xserver = null;
Daemon localDataXceiverServer = null;
ShortCircuitRegistry shortCircuitRegistry = null;
ThreadGroup threadGroup = null;
private DNConf dnConf;
private volatile boolean heartbeatsDisabledForTests = false;
private volatile boolean cacheReportsDisabledForTests = false;
private DataStorage storage = null;
private DatanodeHttpServer httpServer = null;
private int infoPort;
private int infoSecurePort;
DataNodeMetrics metrics;
private InetSocketAddress streamingAddr;
// See the note below in incrDatanodeNetworkErrors re: concurrency.
private LoadingCache<String, Map<String, Long>> datanodeNetworkCounts;
private String hostName;
private DatanodeID id;
final private String fileDescriptorPassingDisabledReason;
boolean isBlockTokenEnabled;
BlockPoolTokenSecretManager blockPoolTokenSecretManager;
private boolean hasAnyBlockPoolRegistered = false;
private final BlockScanner blockScanner;
private DirectoryScanner directoryScanner = null;
/** Activated plug-ins. */
private List<ServicePlugin> plugins;
// For InterDataNodeProtocol
public RPC.Server ipcServer;
private JvmPauseMonitor pauseMonitor;
private SecureResources secureResources = null;
// dataDirs must be accessed while holding the DataNode lock.
private List<StorageLocation> dataDirs;
private Configuration conf;
private final String confVersion;
private final long maxNumberOfBlocksToLog;
private final boolean pipelineSupportECN;
private final List<String> usersWithLocalPathAccess;
private final boolean connectToDnViaHostname;
ReadaheadPool readaheadPool;
SaslDataTransferClient saslClient;
SaslDataTransferServer saslServer;
private final boolean getHdfsBlockLocationsEnabled;
private ObjectName dataNodeInfoBeanName;
private Thread checkDiskErrorThread = null;
protected final int checkDiskErrorInterval = 5*1000;
private boolean checkDiskErrorFlag = false;
private Object checkDiskErrorMutex = new Object();
private long lastDiskErrorCheck;
private String supergroup;
private boolean isPermissionEnabled;
private String dnUserName = null;
private SpanReceiverHost spanReceiverHost;
private static final int NUM_CORES = Runtime.getRuntime()
.availableProcessors();
private static final double CONGESTION_RATIO = 1.5;
/**
* Creates a dummy DataNode for testing purpose.
*/
@VisibleForTesting
@InterfaceAudience.LimitedPrivate("HDFS")
DataNode(final Configuration conf) {
super(conf);
this.blockScanner = new BlockScanner(this, conf);
this.fileDescriptorPassingDisabledReason = null;
this.maxNumberOfBlocksToLog = 0;
this.confVersion = null;
this.usersWithLocalPathAccess = null;
this.connectToDnViaHostname = false;
this.getHdfsBlockLocationsEnabled = false;
this.pipelineSupportECN = false;
}
/**
* Create the DataNode given a configuration, an array of dataDirs,
* and a namenode proxy
*/
DataNode(final Configuration conf,
final List<StorageLocation> dataDirs,
final SecureResources resources) throws IOException {
super(conf);
this.blockScanner = new BlockScanner(this, conf);
this.lastDiskErrorCheck = 0;
this.maxNumberOfBlocksToLog = conf.getLong(DFS_MAX_NUM_BLOCKS_TO_LOG_KEY,
DFS_MAX_NUM_BLOCKS_TO_LOG_DEFAULT);
this.usersWithLocalPathAccess = Arrays.asList(
conf.getTrimmedStrings(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY));
this.connectToDnViaHostname = conf.getBoolean(
DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME,
DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME_DEFAULT);
this.getHdfsBlockLocationsEnabled = conf.getBoolean(
DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED,
DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED_DEFAULT);
this.supergroup = conf.get(DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY,
DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT);
this.isPermissionEnabled = conf.getBoolean(
DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY,
DFSConfigKeys.DFS_PERMISSIONS_ENABLED_DEFAULT);
this.pipelineSupportECN = conf.getBoolean(
DFSConfigKeys.DFS_PIPELINE_ECN_ENABLED,
DFSConfigKeys.DFS_PIPELINE_ECN_ENABLED_DEFAULT);
confVersion = "core-" +
conf.get("hadoop.common.configuration.version", "UNSPECIFIED") +
",hdfs-" +
conf.get("hadoop.hdfs.configuration.version", "UNSPECIFIED");
// Determine whether we should try to pass file descriptors to clients.
if (conf.getBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY,
HdfsClientConfigKeys.Read.ShortCircuit.DEFAULT)) {
String reason = DomainSocket.getLoadingFailureReason();
if (reason != null) {
LOG.warn("File descriptor passing is disabled because " + reason);
this.fileDescriptorPassingDisabledReason = reason;
} else {
LOG.info("File descriptor passing is enabled.");
this.fileDescriptorPassingDisabledReason = null;
}
} else {
this.fileDescriptorPassingDisabledReason =
"File descriptor passing was not configured.";
LOG.debug(this.fileDescriptorPassingDisabledReason);
}
try {
hostName = getHostName(conf);
LOG.info("Configured hostname is " + hostName);
startDataNode(conf, dataDirs, resources);
} catch (IOException ie) {
shutdown();
throw ie;
}
final int dncCacheMaxSize =
conf.getInt(DFS_DATANODE_NETWORK_COUNTS_CACHE_MAX_SIZE_KEY,
DFS_DATANODE_NETWORK_COUNTS_CACHE_MAX_SIZE_DEFAULT) ;
datanodeNetworkCounts =
CacheBuilder.newBuilder()
.maximumSize(dncCacheMaxSize)
.build(new CacheLoader<String, Map<String, Long>>() {
@Override
public Map<String, Long> load(String key) throws Exception {
final Map<String, Long> ret = new HashMap<String, Long>();
ret.put("networkErrors", 0L);
return ret;
}
});
}
@Override // ReconfigurableBase
protected Configuration getNewConf() {
return new HdfsConfiguration();
}
@Override
public void reconfigurePropertyImpl(String property, String newVal)
throws ReconfigurationException {
if (property.equals(DFS_DATANODE_DATA_DIR_KEY)) {
try {
LOG.info("Reconfiguring " + property + " to " + newVal);
this.refreshVolumes(newVal);
} catch (IOException e) {
throw new ReconfigurationException(property, newVal,
getConf().get(property), e);
}
} else {
throw new ReconfigurationException(
property, newVal, getConf().get(property));
}
}
/**
* Get a list of the keys of the re-configurable properties in configuration.
*/
@Override // Reconfigurable
public Collection<String> getReconfigurableProperties() {
return RECONFIGURABLE_PROPERTIES;
}
/**
* The ECN bit for the DataNode. The DataNode should return:
* <ul>
* <li>ECN.DISABLED when ECN is disabled.</li>
* <li>ECN.SUPPORTED when ECN is enabled but the DN still has capacity.</li>
* <li>ECN.CONGESTED when ECN is enabled and the DN is congested.</li>
* </ul>
*/
public PipelineAck.ECN getECN() {
if (!pipelineSupportECN) {
return PipelineAck.ECN.DISABLED;
}
double load = ManagementFactory.getOperatingSystemMXBean()
.getSystemLoadAverage();
return load > NUM_CORES * CONGESTION_RATIO ? PipelineAck.ECN.CONGESTED :
PipelineAck.ECN.SUPPORTED;
}
/**
* Contains the StorageLocations for changed data volumes.
*/
@VisibleForTesting
static class ChangedVolumes {
/** The storage locations of the newly added volumes. */
List<StorageLocation> newLocations = Lists.newArrayList();
/** The storage locations of the volumes that are removed. */
List<StorageLocation> deactivateLocations = Lists.newArrayList();
/** The unchanged locations that existed in the old configuration. */
List<StorageLocation> unchangedLocations = Lists.newArrayList();
}
/**
* Parse the new DFS_DATANODE_DATA_DIR value in the configuration to detect
* changed volumes.
* @param newVolumes a comma separated string that specifies the data volumes.
* @return changed volumes.
* @throws IOException if none of the directories are specified in the
* configuration.
*/
@VisibleForTesting
ChangedVolumes parseChangedVolumes(String newVolumes) throws IOException {
Configuration conf = new Configuration();
conf.set(DFS_DATANODE_DATA_DIR_KEY, newVolumes);
List<StorageLocation> locations = getStorageLocations(conf);
if (locations.isEmpty()) {
throw new IOException("No directory is specified.");
}
ChangedVolumes results = new ChangedVolumes();
results.newLocations.addAll(locations);
for (Iterator<Storage.StorageDirectory> it = storage.dirIterator();
it.hasNext(); ) {
Storage.StorageDirectory dir = it.next();
boolean found = false;
for (Iterator<StorageLocation> sl = results.newLocations.iterator();
sl.hasNext(); ) {
StorageLocation location = sl.next();
if (location.getFile().getCanonicalPath().equals(
dir.getRoot().getCanonicalPath())) {
sl.remove();
results.unchangedLocations.add(location);
found = true;
break;
}
}
if (!found) {
results.deactivateLocations.add(
StorageLocation.parse(dir.getRoot().toString()));
}
}
return results;
}
/**
* Attempts to reload data volumes with new configuration.
* @param newVolumes a comma separated string that specifies the data volumes.
* @throws IOException on error. If an IOException is thrown, some new volumes
* may have been successfully added and removed.
*/
private synchronized void refreshVolumes(String newVolumes) throws IOException {
Configuration conf = getConf();
conf.set(DFS_DATANODE_DATA_DIR_KEY, newVolumes);
int numOldDataDirs = dataDirs.size();
ChangedVolumes changedVolumes = parseChangedVolumes(newVolumes);
StringBuilder errorMessageBuilder = new StringBuilder();
List<String> effectiveVolumes = Lists.newArrayList();
for (StorageLocation sl : changedVolumes.unchangedLocations) {
effectiveVolumes.add(sl.toString());
}
try {
if (numOldDataDirs + changedVolumes.newLocations.size() -
changedVolumes.deactivateLocations.size() <= 0) {
throw new IOException("Attempt to remove all volumes.");
}
if (!changedVolumes.newLocations.isEmpty()) {
LOG.info("Adding new volumes: " +
Joiner.on(",").join(changedVolumes.newLocations));
// Add volumes for each Namespace
final List<NamespaceInfo> nsInfos = Lists.newArrayList();
for (BPOfferService bpos : blockPoolManager.getAllNamenodeThreads()) {
nsInfos.add(bpos.getNamespaceInfo());
}
ExecutorService service = Executors.newFixedThreadPool(
changedVolumes.newLocations.size());
List<Future<IOException>> exceptions = Lists.newArrayList();
for (final StorageLocation location : changedVolumes.newLocations) {
exceptions.add(service.submit(new Callable<IOException>() {
@Override
public IOException call() {
try {
data.addVolume(location, nsInfos);
} catch (IOException e) {
return e;
}
return null;
}
}));
}
for (int i = 0; i < changedVolumes.newLocations.size(); i++) {
StorageLocation volume = changedVolumes.newLocations.get(i);
Future<IOException> ioExceptionFuture = exceptions.get(i);
try {
IOException ioe = ioExceptionFuture.get();
if (ioe != null) {
errorMessageBuilder.append(
String.format("FAILED TO ADD: %s: %s%n",
volume, ioe.getMessage()));
LOG.error("Failed to add volume: " + volume, ioe);
} else {
effectiveVolumes.add(volume.toString());
LOG.info("Successfully added volume: " + volume);
}
} catch (Exception e) {
errorMessageBuilder.append(
String.format("FAILED to ADD: %s: %s%n", volume,
e.toString()));
LOG.error("Failed to add volume: " + volume, e);
}
}
}
try {
removeVolumes(changedVolumes.deactivateLocations);
} catch (IOException e) {
errorMessageBuilder.append(e.getMessage());
LOG.error("Failed to remove volume: " + e.getMessage(), e);
}
if (errorMessageBuilder.length() > 0) {
throw new IOException(errorMessageBuilder.toString());
}
} finally {
conf.set(DFS_DATANODE_DATA_DIR_KEY,
Joiner.on(",").join(effectiveVolumes));
dataDirs = getStorageLocations(conf);
// Send a full block report to let NN acknowledge the volume changes.
triggerBlockReport(new BlockReportOptions.Factory()
.setIncremental(false).build());
}
}
/**
* Remove volumes from DataNode.
* See {@link removeVolumes(final Set<File>, boolean)} for details.
*
* @param locations the StorageLocations of the volumes to be removed.
* @throws IOException
*/
private void removeVolumes(final Collection<StorageLocation> locations)
throws IOException {
if (locations.isEmpty()) {
return;
}
Set<File> volumesToRemove = new HashSet<>();
for (StorageLocation loc : locations) {
volumesToRemove.add(loc.getFile().getAbsoluteFile());
}
removeVolumes(volumesToRemove, true);
}
/**
* Remove volumes from DataNode.
*
* It does three things:
* <li>
* <ul>Remove volumes and block info from FsDataset.</ul>
* <ul>Remove volumes from DataStorage.</ul>
* <ul>Reset configuration DATA_DIR and {@link dataDirs} to represent
* active volumes.</ul>
* </li>
* @param absoluteVolumePaths the absolute path of volumes.
* @param clearFailure if true, clears the failure information related to the
* volumes.
* @throws IOException
*/
private synchronized void removeVolumes(
final Set<File> absoluteVolumePaths, boolean clearFailure)
throws IOException {
for (File vol : absoluteVolumePaths) {
Preconditions.checkArgument(vol.isAbsolute());
}
if (absoluteVolumePaths.isEmpty()) {
return;
}
LOG.info(String.format("Deactivating volumes (clear failure=%b): %s",
clearFailure, Joiner.on(",").join(absoluteVolumePaths)));
IOException ioe = null;
// Remove volumes and block infos from FsDataset.
data.removeVolumes(absoluteVolumePaths, clearFailure);
// Remove volumes from DataStorage.
try {
storage.removeVolumes(absoluteVolumePaths);
} catch (IOException e) {
ioe = e;
}
// Set configuration and dataDirs to reflect volume changes.
for (Iterator<StorageLocation> it = dataDirs.iterator(); it.hasNext(); ) {
StorageLocation loc = it.next();
if (absoluteVolumePaths.contains(loc.getFile().getAbsoluteFile())) {
it.remove();
}
}
conf.set(DFS_DATANODE_DATA_DIR_KEY, Joiner.on(",").join(dataDirs));
if (ioe != null) {
throw ioe;
}
}
private synchronized void setClusterId(final String nsCid, final String bpid
) throws IOException {
if(clusterId != null && !clusterId.equals(nsCid)) {
throw new IOException ("Cluster IDs not matched: dn cid=" + clusterId
+ " but ns cid="+ nsCid + "; bpid=" + bpid);
}
// else
clusterId = nsCid;
}
/**
* Returns the hostname for this datanode. If the hostname is not
* explicitly configured in the given config, then it is determined
* via the DNS class.
*
* @param config configuration
* @return the hostname (NB: may not be a FQDN)
* @throws UnknownHostException if the dfs.datanode.dns.interface
* option is used and the hostname can not be determined
*/
private static String getHostName(Configuration config)
throws UnknownHostException {
String name = config.get(DFS_DATANODE_HOST_NAME_KEY);
if (name == null) {
name = DNS.getDefaultHost(
config.get(DFS_DATANODE_DNS_INTERFACE_KEY,
DFS_DATANODE_DNS_INTERFACE_DEFAULT),
config.get(DFS_DATANODE_DNS_NAMESERVER_KEY,
DFS_DATANODE_DNS_NAMESERVER_DEFAULT));
}
return name;
}
/**
* @see DFSUtil#getHttpPolicy(org.apache.hadoop.conf.Configuration)
* for information related to the different configuration options and
* Http Policy is decided.
*/
private void startInfoServer(Configuration conf)
throws IOException {
// SecureDataNodeStarter will bind the privileged port to the channel if
// the DN is started by JSVC, pass it along.
ServerSocketChannel httpServerChannel = secureResources != null ?
secureResources.getHttpServerChannel() : null;
this.httpServer = new DatanodeHttpServer(conf, this, httpServerChannel);
httpServer.start();
if (httpServer.getHttpAddress() != null) {
infoPort = httpServer.getHttpAddress().getPort();
}
if (httpServer.getHttpsAddress() != null) {
infoSecurePort = httpServer.getHttpsAddress().getPort();
}
}
private void startPlugins(Configuration conf) {
plugins = conf.getInstances(DFS_DATANODE_PLUGINS_KEY, ServicePlugin.class);
for (ServicePlugin p: plugins) {
try {
p.start(this);
LOG.info("Started plug-in " + p);
} catch (Throwable t) {
LOG.warn("ServicePlugin " + p + " could not be started", t);
}
}
}
private void initIpcServer(Configuration conf) throws IOException {
InetSocketAddress ipcAddr = NetUtils.createSocketAddr(
conf.getTrimmed(DFS_DATANODE_IPC_ADDRESS_KEY));
// Add all the RPC protocols that the Datanode implements
RPC.setProtocolEngine(conf, ClientDatanodeProtocolPB.class,
ProtobufRpcEngine.class);
ClientDatanodeProtocolServerSideTranslatorPB clientDatanodeProtocolXlator =
new ClientDatanodeProtocolServerSideTranslatorPB(this);
BlockingService service = ClientDatanodeProtocolService
.newReflectiveBlockingService(clientDatanodeProtocolXlator);
ipcServer = new RPC.Builder(conf)
.setProtocol(ClientDatanodeProtocolPB.class)
.setInstance(service)
.setBindAddress(ipcAddr.getHostName())
.setPort(ipcAddr.getPort())
.setNumHandlers(
conf.getInt(DFS_DATANODE_HANDLER_COUNT_KEY,
DFS_DATANODE_HANDLER_COUNT_DEFAULT)).setVerbose(false)
.setSecretManager(blockPoolTokenSecretManager).build();
InterDatanodeProtocolServerSideTranslatorPB interDatanodeProtocolXlator =
new InterDatanodeProtocolServerSideTranslatorPB(this);
service = InterDatanodeProtocolService
.newReflectiveBlockingService(interDatanodeProtocolXlator);
DFSUtil.addPBProtocol(conf, InterDatanodeProtocolPB.class, service,
ipcServer);
TraceAdminProtocolServerSideTranslatorPB traceAdminXlator =
new TraceAdminProtocolServerSideTranslatorPB(this);
BlockingService traceAdminService = TraceAdminService
.newReflectiveBlockingService(traceAdminXlator);
DFSUtil.addPBProtocol(conf, TraceAdminProtocolPB.class, traceAdminService,
ipcServer);
LOG.info("Opened IPC server at " + ipcServer.getListenerAddress());
// set service-level authorization security policy
if (conf.getBoolean(
CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
ipcServer.refreshServiceAcl(conf, new HDFSPolicyProvider());
}
}
/** Check whether the current user is in the superuser group. */
private void checkSuperuserPrivilege() throws IOException, AccessControlException {
if (!isPermissionEnabled) {
return;
}
// Try to get the ugi in the RPC call.
UserGroupInformation callerUgi = ipcServer.getRemoteUser();
if (callerUgi == null) {
// This is not from RPC.
callerUgi = UserGroupInformation.getCurrentUser();
}
// Is this by the DN user itself?
assert dnUserName != null;
if (callerUgi.getShortUserName().equals(dnUserName)) {
return;
}
// Is the user a member of the super group?
List<String> groups = Arrays.asList(callerUgi.getGroupNames());
if (groups.contains(supergroup)) {
return;
}
// Not a superuser.
throw new AccessControlException();
}
private void shutdownPeriodicScanners() {
shutdownDirectoryScanner();
blockScanner.removeAllVolumeScanners();
}
/**
* See {@link DirectoryScanner}
*/
private synchronized void initDirectoryScanner(Configuration conf) {
if (directoryScanner != null) {
return;
}
String reason = null;
if (conf.getInt(DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY,
DFS_DATANODE_DIRECTORYSCAN_INTERVAL_DEFAULT) < 0) {
reason = "verification is turned off by configuration";
} else if ("SimulatedFSDataset".equals(data.getClass().getSimpleName())) {
reason = "verifcation is not supported by SimulatedFSDataset";
}
if (reason == null) {
directoryScanner = new DirectoryScanner(this, data, conf);
directoryScanner.start();
} else {
LOG.info("Periodic Directory Tree Verification scan is disabled because " +
reason);
}
}
private synchronized void shutdownDirectoryScanner() {
if (directoryScanner != null) {
directoryScanner.shutdown();
}
}
private void initDataXceiver(Configuration conf) throws IOException {
// find free port or use privileged port provided
TcpPeerServer tcpPeerServer;
if (secureResources != null) {
tcpPeerServer = new TcpPeerServer(secureResources);
} else {
tcpPeerServer = new TcpPeerServer(dnConf.socketWriteTimeout,
DataNode.getStreamingAddr(conf));
}
tcpPeerServer.setReceiveBufferSize(HdfsConstants.DEFAULT_DATA_SOCKET_SIZE);
streamingAddr = tcpPeerServer.getStreamingAddr();
LOG.info("Opened streaming server at " + streamingAddr);
this.threadGroup = new ThreadGroup("dataXceiverServer");
xserver = new DataXceiverServer(tcpPeerServer, conf, this);
this.dataXceiverServer = new Daemon(threadGroup, xserver);
this.threadGroup.setDaemon(true); // auto destroy when empty
if (conf.getBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY,
HdfsClientConfigKeys.Read.ShortCircuit.DEFAULT) ||
conf.getBoolean(DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC,
DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC_DEFAULT)) {
DomainPeerServer domainPeerServer =
getDomainPeerServer(conf, streamingAddr.getPort());
if (domainPeerServer != null) {
this.localDataXceiverServer = new Daemon(threadGroup,
new DataXceiverServer(domainPeerServer, conf, this));
LOG.info("Listening on UNIX domain socket: " +
domainPeerServer.getBindPath());
}
}
this.shortCircuitRegistry = new ShortCircuitRegistry(conf);
}
private static DomainPeerServer getDomainPeerServer(Configuration conf,
int port) throws IOException {
String domainSocketPath =
conf.getTrimmed(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_DEFAULT);
if (domainSocketPath.isEmpty()) {
if (conf.getBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY,
HdfsClientConfigKeys.Read.ShortCircuit.DEFAULT) &&
(!conf.getBoolean(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL,
DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL_DEFAULT))) {
LOG.warn("Although short-circuit local reads are configured, " +
"they are disabled because you didn't configure " +
DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY);
}
return null;
}
if (DomainSocket.getLoadingFailureReason() != null) {
throw new RuntimeException("Although a UNIX domain socket " +
"path is configured as " + domainSocketPath + ", we cannot " +
"start a localDataXceiverServer because " +
DomainSocket.getLoadingFailureReason());
}
DomainPeerServer domainPeerServer =
new DomainPeerServer(domainSocketPath, port);
domainPeerServer.setReceiveBufferSize(
HdfsConstants.DEFAULT_DATA_SOCKET_SIZE);
return domainPeerServer;
}
// calls specific to BP
public void notifyNamenodeReceivedBlock(
ExtendedBlock block, String delHint, String storageUuid) {
BPOfferService bpos = blockPoolManager.get(block.getBlockPoolId());
if(bpos != null) {
bpos.notifyNamenodeReceivedBlock(block, delHint, storageUuid);
} else {
LOG.error("Cannot find BPOfferService for reporting block received for bpid="
+ block.getBlockPoolId());
}
}
// calls specific to BP
protected void notifyNamenodeReceivingBlock(
ExtendedBlock block, String storageUuid) {
BPOfferService bpos = blockPoolManager.get(block.getBlockPoolId());
if(bpos != null) {
bpos.notifyNamenodeReceivingBlock(block, storageUuid);
} else {
LOG.error("Cannot find BPOfferService for reporting block receiving for bpid="
+ block.getBlockPoolId());
}
}
/** Notify the corresponding namenode to delete the block. */
public void notifyNamenodeDeletedBlock(ExtendedBlock block, String storageUuid) {
BPOfferService bpos = blockPoolManager.get(block.getBlockPoolId());
if (bpos != null) {
bpos.notifyNamenodeDeletedBlock(block, storageUuid);
} else {
LOG.error("Cannot find BPOfferService for reporting block deleted for bpid="
+ block.getBlockPoolId());
}
}
/**
* Report a bad block which is hosted on the local DN.
*/
public void reportBadBlocks(ExtendedBlock block) throws IOException{
BPOfferService bpos = getBPOSForBlock(block);
FsVolumeSpi volume = getFSDataset().getVolume(block);
bpos.reportBadBlocks(
block, volume.getStorageID(), volume.getStorageType());
}
/**
* Report a bad block on another DN (eg if we received a corrupt replica
* from a remote host).
* @param srcDataNode the DN hosting the bad block
* @param block the block itself
*/
public void reportRemoteBadBlock(DatanodeInfo srcDataNode, ExtendedBlock block)
throws IOException {
BPOfferService bpos = getBPOSForBlock(block);
bpos.reportRemoteBadBlock(srcDataNode, block);
}
/**
* Try to send an error report to the NNs associated with the given
* block pool.
* @param bpid the block pool ID
* @param errCode error code to send
* @param errMsg textual message to send
*/
void trySendErrorReport(String bpid, int errCode, String errMsg) {
BPOfferService bpos = blockPoolManager.get(bpid);
if (bpos == null) {
throw new IllegalArgumentException("Bad block pool: " + bpid);
}
bpos.trySendErrorReport(errCode, errMsg);
}
/**
* Return the BPOfferService instance corresponding to the given block.
* @return the BPOS
* @throws IOException if no such BPOS can be found
*/
private BPOfferService getBPOSForBlock(ExtendedBlock block)
throws IOException {
Preconditions.checkNotNull(block);
BPOfferService bpos = blockPoolManager.get(block.getBlockPoolId());
if (bpos == null) {
throw new IOException("cannot locate OfferService thread for bp="+
block.getBlockPoolId());
}
return bpos;
}
// used only for testing
@VisibleForTesting
void setHeartbeatsDisabledForTests(
boolean heartbeatsDisabledForTests) {
this.heartbeatsDisabledForTests = heartbeatsDisabledForTests;
}
@VisibleForTesting
boolean areHeartbeatsDisabledForTests() {
return this.heartbeatsDisabledForTests;
}
@VisibleForTesting
void setCacheReportsDisabledForTest(boolean disabled) {
this.cacheReportsDisabledForTests = disabled;
}
@VisibleForTesting
boolean areCacheReportsDisabledForTests() {
return this.cacheReportsDisabledForTests;
}
/**
* This method starts the data node with the specified conf.
*
* @param conf - the configuration
* if conf's CONFIG_PROPERTY_SIMULATED property is set
* then a simulated storage based data node is created.
*
* @param dataDirs - only for a non-simulated storage data node
* @throws IOException
*/
void startDataNode(Configuration conf,
List<StorageLocation> dataDirs,
SecureResources resources
) throws IOException {
// settings global for all BPs in the Data Node
this.secureResources = resources;
synchronized (this) {
this.dataDirs = dataDirs;
}
this.conf = conf;
this.dnConf = new DNConf(conf);
checkSecureConfig(dnConf, conf, resources);
this.spanReceiverHost =
SpanReceiverHost.get(conf, DFSConfigKeys.DFS_SERVER_HTRACE_PREFIX);
if (dnConf.maxLockedMemory > 0) {
if (!NativeIO.POSIX.getCacheManipulator().verifyCanMlock()) {
throw new RuntimeException(String.format(
"Cannot start datanode because the configured max locked memory" +
" size (%s) is greater than zero and native code is not available.",
DFS_DATANODE_MAX_LOCKED_MEMORY_KEY));
}
if (Path.WINDOWS) {
NativeIO.Windows.extendWorkingSetSize(dnConf.maxLockedMemory);
} else {
long ulimit = NativeIO.POSIX.getCacheManipulator().getMemlockLimit();
if (dnConf.maxLockedMemory > ulimit) {
throw new RuntimeException(String.format(
"Cannot start datanode because the configured max locked memory" +
" size (%s) of %d bytes is more than the datanode's available" +
" RLIMIT_MEMLOCK ulimit of %d bytes.",
DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
dnConf.maxLockedMemory,
ulimit));
}
}
}
LOG.info("Starting DataNode with maxLockedMemory = " +
dnConf.maxLockedMemory);
storage = new DataStorage();
// global DN settings
registerMXBean();
initDataXceiver(conf);
startInfoServer(conf);
pauseMonitor = new JvmPauseMonitor(conf);
pauseMonitor.start();
// BlockPoolTokenSecretManager is required to create ipc server.
this.blockPoolTokenSecretManager = new BlockPoolTokenSecretManager();
// Login is done by now. Set the DN user name.
dnUserName = UserGroupInformation.getCurrentUser().getShortUserName();
LOG.info("dnUserName = " + dnUserName);
LOG.info("supergroup = " + supergroup);
initIpcServer(conf);
metrics = DataNodeMetrics.create(conf, getDisplayName());
metrics.getJvmMetrics().setPauseMonitor(pauseMonitor);
blockPoolManager = new BlockPoolManager(this);
blockPoolManager.refreshNamenodes(conf);
// Create the ReadaheadPool from the DataNode context so we can
// exit without having to explicitly shutdown its thread pool.
readaheadPool = ReadaheadPool.getInstance();
saslClient = new SaslDataTransferClient(dnConf.conf,
dnConf.saslPropsResolver, dnConf.trustedChannelResolver);
saslServer = new SaslDataTransferServer(dnConf, blockPoolTokenSecretManager);
}
/**
* Checks if the DataNode has a secure configuration if security is enabled.
* There are 2 possible configurations that are considered secure:
* 1. The server has bound to privileged ports for RPC and HTTP via
* SecureDataNodeStarter.
* 2. The configuration enables SASL on DataTransferProtocol and HTTPS (no
* plain HTTP) for the HTTP server. The SASL handshake guarantees
* authentication of the RPC server before a client transmits a secret, such
* as a block access token. Similarly, SSL guarantees authentication of the
* HTTP server before a client transmits a secret, such as a delegation
* token.
* It is not possible to run with both privileged ports and SASL on
* DataTransferProtocol. For backwards-compatibility, the connection logic
* must check if the target port is a privileged port, and if so, skip the
* SASL handshake.
*
* @param dnConf DNConf to check
* @param conf Configuration to check
* @param resources SecuredResources obtained for DataNode
* @throws RuntimeException if security enabled, but configuration is insecure
*/
private static void checkSecureConfig(DNConf dnConf, Configuration conf,
SecureResources resources) throws RuntimeException {
if (!UserGroupInformation.isSecurityEnabled()) {
return;
}
// Abort out of inconsistent state if Kerberos is enabled
// but block access tokens are not enabled.
boolean isEnabled = conf.getBoolean(
DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY,
DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_DEFAULT);
if (!isEnabled) {
String errMessage = "Security is enabled but block access tokens " +
"(via " + DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY + ") " +
"aren't enabled. This may cause issues " +
"when clients attempt to connect to a DataNode. Aborting DataNode";
throw new RuntimeException(errMessage);
}
SaslPropertiesResolver saslPropsResolver = dnConf.getSaslPropsResolver();
if (resources != null && saslPropsResolver == null) {
return;
}
if (dnConf.getIgnoreSecurePortsForTesting()) {
return;
}
if (saslPropsResolver != null &&
DFSUtil.getHttpPolicy(conf) == HttpConfig.Policy.HTTPS_ONLY &&
resources == null) {
return;
}
throw new RuntimeException("Cannot start secure DataNode without " +
"configuring either privileged resources or SASL RPC data transfer " +
"protection and SSL for HTTP. Using privileged resources in " +
"combination with SASL RPC data transfer protection is not supported.");
}
public static String generateUuid() {
return UUID.randomUUID().toString();
}
/**
* Verify that the DatanodeUuid has been initialized. If this is a new
* datanode then we generate a new Datanode Uuid and persist it to disk.
*
* @throws IOException
*/
synchronized void checkDatanodeUuid() throws IOException {
if (storage.getDatanodeUuid() == null) {
storage.setDatanodeUuid(generateUuid());
storage.writeAll();
LOG.info("Generated and persisted new Datanode UUID " +
storage.getDatanodeUuid());
}
}
/**
* Create a DatanodeRegistration for a specific block pool.
* @param nsInfo the namespace info from the first part of the NN handshake
*/
DatanodeRegistration createBPRegistration(NamespaceInfo nsInfo) {
StorageInfo storageInfo = storage.getBPStorage(nsInfo.getBlockPoolID());
if (storageInfo == null) {
// it's null in the case of SimulatedDataSet
storageInfo = new StorageInfo(
DataNodeLayoutVersion.CURRENT_LAYOUT_VERSION,
nsInfo.getNamespaceID(), nsInfo.clusterID, nsInfo.getCTime(),
NodeType.DATA_NODE);
}
DatanodeID dnId = new DatanodeID(
streamingAddr.getAddress().getHostAddress(), hostName,
storage.getDatanodeUuid(), getXferPort(), getInfoPort(),
infoSecurePort, getIpcPort());
return new DatanodeRegistration(dnId, storageInfo,
new ExportedBlockKeys(), VersionInfo.getVersion());
}
/**
* Check that the registration returned from a NameNode is consistent
* with the information in the storage. If the storage is fresh/unformatted,
* sets the storage ID based on this registration.
* Also updates the block pool's state in the secret manager.
*/
synchronized void bpRegistrationSucceeded(DatanodeRegistration bpRegistration,
String blockPoolId) throws IOException {
// Set the ID if we haven't already
if (null == id) {
id = bpRegistration;
}
if(!storage.getDatanodeUuid().equals(bpRegistration.getDatanodeUuid())) {
throw new IOException("Inconsistent Datanode IDs. Name-node returned "
+ bpRegistration.getDatanodeUuid()
+ ". Expecting " + storage.getDatanodeUuid());
}
registerBlockPoolWithSecretManager(bpRegistration, blockPoolId);
}
/**
* After the block pool has contacted the NN, registers that block pool
* with the secret manager, updating it with the secrets provided by the NN.
* @throws IOException on error
*/
private synchronized void registerBlockPoolWithSecretManager(
DatanodeRegistration bpRegistration, String blockPoolId) throws IOException {
ExportedBlockKeys keys = bpRegistration.getExportedKeys();
if (!hasAnyBlockPoolRegistered) {
hasAnyBlockPoolRegistered = true;
isBlockTokenEnabled = keys.isBlockTokenEnabled();
} else {
if (isBlockTokenEnabled != keys.isBlockTokenEnabled()) {
throw new RuntimeException("Inconsistent configuration of block access"
+ " tokens. Either all block pools must be configured to use block"
+ " tokens, or none may be.");
}
}
if (!isBlockTokenEnabled) return;
if (!blockPoolTokenSecretManager.isBlockPoolRegistered(blockPoolId)) {
long blockKeyUpdateInterval = keys.getKeyUpdateInterval();
long blockTokenLifetime = keys.getTokenLifetime();
LOG.info("Block token params received from NN: for block pool " +
blockPoolId + " keyUpdateInterval="
+ blockKeyUpdateInterval / (60 * 1000)
+ " min(s), tokenLifetime=" + blockTokenLifetime / (60 * 1000)
+ " min(s)");
final BlockTokenSecretManager secretMgr =
new BlockTokenSecretManager(0, blockTokenLifetime, blockPoolId,
dnConf.encryptionAlgorithm);
blockPoolTokenSecretManager.addBlockPool(blockPoolId, secretMgr);
}
}
/**
* Remove the given block pool from the block scanner, dataset, and storage.
*/
void shutdownBlockPool(BPOfferService bpos) {
blockPoolManager.remove(bpos);
if (bpos.hasBlockPoolId()) {
// Possible that this is shutting down before successfully
// registering anywhere. If that's the case, we wouldn't have
// a block pool id
String bpId = bpos.getBlockPoolId();
blockScanner.disableBlockPoolId(bpId);
if (data != null) {
data.shutdownBlockPool(bpId);
}
if (storage != null) {
storage.removeBlockPoolStorage(bpId);
}
}
}
/**
* One of the Block Pools has successfully connected to its NN.
* This initializes the local storage for that block pool,
* checks consistency of the NN's cluster ID, etc.
*
* If this is the first block pool to register, this also initializes
* the datanode-scoped storage.
*
* @param bpos Block pool offer service
* @throws IOException if the NN is inconsistent with the local storage.
*/
void initBlockPool(BPOfferService bpos) throws IOException {
NamespaceInfo nsInfo = bpos.getNamespaceInfo();
if (nsInfo == null) {
throw new IOException("NamespaceInfo not found: Block pool " + bpos
+ " should have retrieved namespace info before initBlockPool.");
}
setClusterId(nsInfo.clusterID, nsInfo.getBlockPoolID());
// Register the new block pool with the BP manager.
blockPoolManager.addBlockPool(bpos);
// In the case that this is the first block pool to connect, initialize
// the dataset, block scanners, etc.
initStorage(nsInfo);
// Exclude failed disks before initializing the block pools to avoid startup
// failures.
checkDiskError();
data.addBlockPool(nsInfo.getBlockPoolID(), conf);
blockScanner.enableBlockPoolId(bpos.getBlockPoolId());
initDirectoryScanner(conf);
}
List<BPOfferService> getAllBpOs() {
return blockPoolManager.getAllNamenodeThreads();
}
int getBpOsCount() {
return blockPoolManager.getAllNamenodeThreads().size();
}
/**
* Initializes the {@link #data}. The initialization is done only once, when
* handshake with the the first namenode is completed.
*/
private void initStorage(final NamespaceInfo nsInfo) throws IOException {
final FsDatasetSpi.Factory<? extends FsDatasetSpi<?>> factory
= FsDatasetSpi.Factory.getFactory(conf);
if (!factory.isSimulated()) {
final StartupOption startOpt = getStartupOption(conf);
if (startOpt == null) {
throw new IOException("Startup option not set.");
}
final String bpid = nsInfo.getBlockPoolID();
//read storage info, lock data dirs and transition fs state if necessary
synchronized (this) {
storage.recoverTransitionRead(this, nsInfo, dataDirs, startOpt);
}
final StorageInfo bpStorage = storage.getBPStorage(bpid);
LOG.info("Setting up storage: nsid=" + bpStorage.getNamespaceID()
+ ";bpid=" + bpid + ";lv=" + storage.getLayoutVersion()
+ ";nsInfo=" + nsInfo + ";dnuuid=" + storage.getDatanodeUuid());
}
// If this is a newly formatted DataNode then assign a new DatanodeUuid.
checkDatanodeUuid();
synchronized(this) {
if (data == null) {
data = factory.newInstance(this, storage, conf);
}
}
}
/**
* Determine the http server's effective addr
*/
public static InetSocketAddress getInfoAddr(Configuration conf) {
return NetUtils.createSocketAddr(conf.getTrimmed(DFS_DATANODE_HTTP_ADDRESS_KEY,
DFS_DATANODE_HTTP_ADDRESS_DEFAULT));
}
private void registerMXBean() {
dataNodeInfoBeanName = MBeans.register("DataNode", "DataNodeInfo", this);
}
@VisibleForTesting
public DataXceiverServer getXferServer() {
return xserver;
}
@VisibleForTesting
public int getXferPort() {
return streamingAddr.getPort();
}
/**
* @return name useful for logging
*/
public String getDisplayName() {
// NB: our DatanodeID may not be set yet
return hostName + ":" + getXferPort();
}
/**
* NB: The datanode can perform data transfer on the streaming
* address however clients are given the IPC IP address for data
* transfer, and that may be a different address.
*
* @return socket address for data transfer
*/
public InetSocketAddress getXferAddress() {
return streamingAddr;
}
/**
* @return the datanode's IPC port
*/
public int getIpcPort() {
return ipcServer.getListenerAddress().getPort();
}
/**
* get BP registration by blockPool id
* @return BP registration object
* @throws IOException on error
*/
@VisibleForTesting
public DatanodeRegistration getDNRegistrationForBP(String bpid)
throws IOException {
BPOfferService bpos = blockPoolManager.get(bpid);
if(bpos==null || bpos.bpRegistration==null) {
throw new IOException("cannot find BPOfferService for bpid="+bpid);
}
return bpos.bpRegistration;
}
/**
* Creates either NIO or regular depending on socketWriteTimeout.
*/
protected Socket newSocket() throws IOException {
return (dnConf.socketWriteTimeout > 0) ?
SocketChannel.open().socket() : new Socket();
}
/**
* Connect to the NN. This is separated out for easier testing.
*/
DatanodeProtocolClientSideTranslatorPB connectToNN(
InetSocketAddress nnAddr) throws IOException {
return new DatanodeProtocolClientSideTranslatorPB(nnAddr, conf);
}
public static InterDatanodeProtocol createInterDataNodeProtocolProxy(
DatanodeID datanodeid, final Configuration conf, final int socketTimeout,
final boolean connectToDnViaHostname) throws IOException {
final String dnAddr = datanodeid.getIpcAddr(connectToDnViaHostname);
final InetSocketAddress addr = NetUtils.createSocketAddr(dnAddr);
if (LOG.isDebugEnabled()) {
LOG.debug("Connecting to datanode " + dnAddr + " addr=" + addr);
}
final UserGroupInformation loginUgi = UserGroupInformation.getLoginUser();
try {
return loginUgi
.doAs(new PrivilegedExceptionAction<InterDatanodeProtocol>() {
@Override
public InterDatanodeProtocol run() throws IOException {
return new InterDatanodeProtocolTranslatorPB(addr, loginUgi,
conf, NetUtils.getDefaultSocketFactory(conf), socketTimeout);
}
});
} catch (InterruptedException ie) {
throw new IOException(ie.getMessage());
}
}
public DataNodeMetrics getMetrics() {
return metrics;
}
/** Ensure the authentication method is kerberos */
private void checkKerberosAuthMethod(String msg) throws IOException {
// User invoking the call must be same as the datanode user
if (!UserGroupInformation.isSecurityEnabled()) {
return;
}
if (UserGroupInformation.getCurrentUser().getAuthenticationMethod() !=
AuthenticationMethod.KERBEROS) {
throw new AccessControlException("Error in " + msg
+ "Only kerberos based authentication is allowed.");
}
}
private void checkBlockLocalPathAccess() throws IOException {
checkKerberosAuthMethod("getBlockLocalPathInfo()");
String currentUser = UserGroupInformation.getCurrentUser().getShortUserName();
if (!usersWithLocalPathAccess.contains(currentUser)) {
throw new AccessControlException(
"Can't continue with getBlockLocalPathInfo() "
+ "authorization. The user " + currentUser
+ " is not allowed to call getBlockLocalPathInfo");
}
}
public long getMaxNumberOfBlocksToLog() {
return maxNumberOfBlocksToLog;
}
@Override
public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block,
Token<BlockTokenIdentifier> token) throws IOException {
checkBlockLocalPathAccess();
checkBlockToken(block, token, BlockTokenIdentifier.AccessMode.READ);
Preconditions.checkNotNull(data, "Storage not yet initialized");
BlockLocalPathInfo info = data.getBlockLocalPathInfo(block);
if (LOG.isDebugEnabled()) {
if (info != null) {
if (LOG.isTraceEnabled()) {
LOG.trace("getBlockLocalPathInfo successful block=" + block
+ " blockfile " + info.getBlockPath() + " metafile "
+ info.getMetaPath());
}
} else {
if (LOG.isTraceEnabled()) {
LOG.trace("getBlockLocalPathInfo for block=" + block
+ " returning null");
}
}
}
metrics.incrBlocksGetLocalPathInfo();
return info;
}
@InterfaceAudience.LimitedPrivate("HDFS")
static public class ShortCircuitFdsUnsupportedException extends IOException {
private static final long serialVersionUID = 1L;
public ShortCircuitFdsUnsupportedException(String msg) {
super(msg);
}
}
@InterfaceAudience.LimitedPrivate("HDFS")
static public class ShortCircuitFdsVersionException extends IOException {
private static final long serialVersionUID = 1L;
public ShortCircuitFdsVersionException(String msg) {
super(msg);
}
}
FileInputStream[] requestShortCircuitFdsForRead(final ExtendedBlock blk,
final Token<BlockTokenIdentifier> token, int maxVersion)
throws ShortCircuitFdsUnsupportedException,
ShortCircuitFdsVersionException, IOException {
if (fileDescriptorPassingDisabledReason != null) {
throw new ShortCircuitFdsUnsupportedException(
fileDescriptorPassingDisabledReason);
}
checkBlockToken(blk, token, BlockTokenIdentifier.AccessMode.READ);
int blkVersion = CURRENT_BLOCK_FORMAT_VERSION;
if (maxVersion < blkVersion) {
throw new ShortCircuitFdsVersionException("Your client is too old " +
"to read this block! Its format version is " +
blkVersion + ", but the highest format version you can read is " +
maxVersion);
}
metrics.incrBlocksGetLocalPathInfo();
FileInputStream fis[] = new FileInputStream[2];
try {
fis[0] = (FileInputStream)data.getBlockInputStream(blk, 0);
fis[1] = DatanodeUtil.getMetaDataInputStream(blk, data);
} catch (ClassCastException e) {
LOG.debug("requestShortCircuitFdsForRead failed", e);
throw new ShortCircuitFdsUnsupportedException("This DataNode's " +
"FsDatasetSpi does not support short-circuit local reads");
}
return fis;
}
@Override
public HdfsBlocksMetadata getHdfsBlocksMetadata(
String bpId, long[] blockIds,
List<Token<BlockTokenIdentifier>> tokens) throws IOException,
UnsupportedOperationException {
if (!getHdfsBlockLocationsEnabled) {
throw new UnsupportedOperationException("Datanode#getHdfsBlocksMetadata "
+ " is not enabled in datanode config");
}
if (blockIds.length != tokens.size()) {
throw new IOException("Differing number of blocks and tokens");
}
// Check access for each block
for (int i = 0; i < blockIds.length; i++) {
checkBlockToken(new ExtendedBlock(bpId, blockIds[i]),
tokens.get(i), BlockTokenIdentifier.AccessMode.READ);
}
DataNodeFaultInjector.get().getHdfsBlocksMetadata();
return data.getHdfsBlocksMetadata(bpId, blockIds);
}
private void checkBlockToken(ExtendedBlock block, Token<BlockTokenIdentifier> token,
AccessMode accessMode) throws IOException {
if (isBlockTokenEnabled) {
BlockTokenIdentifier id = new BlockTokenIdentifier();
ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier());
DataInputStream in = new DataInputStream(buf);
id.readFields(in);
if (LOG.isDebugEnabled()) {
LOG.debug("Got: " + id.toString());
}
blockPoolTokenSecretManager.checkAccess(id, null, block, accessMode);
}
}
/**
* Shut down this instance of the datanode.
* Returns only after shutdown is complete.
* This method can only be called by the offerService thread.
* Otherwise, deadlock might occur.
*/
public void shutdown() {
if (plugins != null) {
for (ServicePlugin p : plugins) {
try {
p.stop();
LOG.info("Stopped plug-in " + p);
} catch (Throwable t) {
LOG.warn("ServicePlugin " + p + " could not be stopped", t);
}
}
}
List<BPOfferService> bposArray = (this.blockPoolManager == null)
? new ArrayList<BPOfferService>()
: this.blockPoolManager.getAllNamenodeThreads();
// If shutdown is not for restart, set shouldRun to false early.
if (!shutdownForUpgrade) {
shouldRun = false;
}
// When shutting down for restart, DataXceiverServer is interrupted
// in order to avoid any further acceptance of requests, but the peers
// for block writes are not closed until the clients are notified.
if (dataXceiverServer != null) {
try {
xserver.sendOOBToPeers();
((DataXceiverServer) this.dataXceiverServer.getRunnable()).kill();
this.dataXceiverServer.interrupt();
} catch (Throwable e) {
// Ignore, since the out of band messaging is advisory.
}
}
// Interrupt the checkDiskErrorThread and terminate it.
if(this.checkDiskErrorThread != null) {
this.checkDiskErrorThread.interrupt();
}
// Record the time of initial notification
long timeNotified = Time.monotonicNow();
if (localDataXceiverServer != null) {
((DataXceiverServer) this.localDataXceiverServer.getRunnable()).kill();
this.localDataXceiverServer.interrupt();
}
// Terminate directory scanner and block scanner
shutdownPeriodicScanners();
// Stop the web server
if (httpServer != null) {
try {
httpServer.close();
} catch (Exception e) {
LOG.warn("Exception shutting down DataNode HttpServer", e);
}
}
if (pauseMonitor != null) {
pauseMonitor.stop();
}
// shouldRun is set to false here to prevent certain threads from exiting
// before the restart prep is done.
this.shouldRun = false;
// wait reconfiguration thread, if any, to exit
shutdownReconfigurationTask();
// wait for all data receiver threads to exit
if (this.threadGroup != null) {
int sleepMs = 2;
while (true) {
// When shutting down for restart, wait 2.5 seconds before forcing
// termination of receiver threads.
if (!this.shutdownForUpgrade ||
(this.shutdownForUpgrade && (Time.monotonicNow() - timeNotified
> 1000))) {
this.threadGroup.interrupt();
break;
}
LOG.info("Waiting for threadgroup to exit, active threads is " +
this.threadGroup.activeCount());
if (this.threadGroup.activeCount() == 0) {
break;
}
try {
Thread.sleep(sleepMs);
} catch (InterruptedException e) {}
sleepMs = sleepMs * 3 / 2; // exponential backoff
if (sleepMs > 200) {
sleepMs = 200;
}
}
this.threadGroup = null;
}
if (this.dataXceiverServer != null) {
// wait for dataXceiverServer to terminate
try {
this.dataXceiverServer.join();
} catch (InterruptedException ie) {
}
}
if (this.localDataXceiverServer != null) {
// wait for localDataXceiverServer to terminate
try {
this.localDataXceiverServer.join();
} catch (InterruptedException ie) {
}
}
// IPC server needs to be shutdown late in the process, otherwise
// shutdown command response won't get sent.
if (ipcServer != null) {
ipcServer.stop();
}
if(blockPoolManager != null) {
try {
this.blockPoolManager.shutDownAll(bposArray);
} catch (InterruptedException ie) {
LOG.warn("Received exception in BlockPoolManager#shutDownAll: ", ie);
}
}
if (storage != null) {
try {
this.storage.unlockAll();
} catch (IOException ie) {
LOG.warn("Exception when unlocking storage: " + ie, ie);
}
}
if (data != null) {
data.shutdown();
}
if (metrics != null) {
metrics.shutdown();
}
if (dataNodeInfoBeanName != null) {
MBeans.unregister(dataNodeInfoBeanName);
dataNodeInfoBeanName = null;
}
if (this.spanReceiverHost != null) {
this.spanReceiverHost.closeReceivers();
}
if (shortCircuitRegistry != null) shortCircuitRegistry.shutdown();
LOG.info("Shutdown complete.");
synchronized(this) {
// it is already false, but setting it again to avoid a findbug warning.
this.shouldRun = false;
// Notify the main thread.
notifyAll();
}
}
/**
* Check if there is a disk failure asynchronously and if so, handle the error
*/
public void checkDiskErrorAsync() {
synchronized(checkDiskErrorMutex) {
checkDiskErrorFlag = true;
if(checkDiskErrorThread == null) {
startCheckDiskErrorThread();
checkDiskErrorThread.start();
LOG.info("Starting CheckDiskError Thread");
}
}
}
private void handleDiskError(String errMsgr) {
final boolean hasEnoughResources = data.hasEnoughResource();
LOG.warn("DataNode.handleDiskError: Keep Running: " + hasEnoughResources);
// If we have enough active valid volumes then we do not want to
// shutdown the DN completely.
int dpError = hasEnoughResources ? DatanodeProtocol.DISK_ERROR
: DatanodeProtocol.FATAL_DISK_ERROR;
metrics.incrVolumeFailures();
//inform NameNodes
for(BPOfferService bpos: blockPoolManager.getAllNamenodeThreads()) {
bpos.trySendErrorReport(dpError, errMsgr);
}
if(hasEnoughResources) {
scheduleAllBlockReport(0);
return; // do not shutdown
}
LOG.warn("DataNode is shutting down: " + errMsgr);
shouldRun = false;
}
/** Number of concurrent xceivers per node. */
@Override // DataNodeMXBean
public int getXceiverCount() {
return threadGroup == null ? 0 : threadGroup.activeCount();
}
@Override // DataNodeMXBean
public Map<String, Map<String, Long>> getDatanodeNetworkCounts() {
return datanodeNetworkCounts.asMap();
}
void incrDatanodeNetworkErrors(String host) {
metrics.incrDatanodeNetworkErrors();
/*
* Synchronizing on the whole cache is a big hammer, but since it's only
* accumulating errors, it should be ok. If this is ever expanded to include
* non-error stats, then finer-grained concurrency should be applied.
*/
synchronized (datanodeNetworkCounts) {
try {
final Map<String, Long> curCount = datanodeNetworkCounts.get(host);
curCount.put("networkErrors", curCount.get("networkErrors") + 1L);
datanodeNetworkCounts.put(host, curCount);
} catch (ExecutionException e) {
LOG.warn("failed to increment network error counts for " + host);
}
}
}
int getXmitsInProgress() {
return xmitsInProgress.get();
}
private void reportBadBlock(final BPOfferService bpos,
final ExtendedBlock block, final String msg) {
FsVolumeSpi volume = getFSDataset().getVolume(block);
bpos.reportBadBlocks(
block, volume.getStorageID(), volume.getStorageType());
LOG.warn(msg);
}
private void transferBlock(ExtendedBlock block, DatanodeInfo[] xferTargets,
StorageType[] xferTargetStorageTypes) throws IOException {
BPOfferService bpos = getBPOSForBlock(block);
DatanodeRegistration bpReg = getDNRegistrationForBP(block.getBlockPoolId());
boolean replicaNotExist = false;
boolean replicaStateNotFinalized = false;
boolean blockFileNotExist = false;
boolean lengthTooShort = false;
try {
data.checkBlock(block, block.getNumBytes(), ReplicaState.FINALIZED);
} catch (ReplicaNotFoundException e) {
replicaNotExist = true;
} catch (UnexpectedReplicaStateException e) {
replicaStateNotFinalized = true;
} catch (FileNotFoundException e) {
blockFileNotExist = true;
} catch (EOFException e) {
lengthTooShort = true;
} catch (IOException e) {
// The IOException indicates not being able to access block file,
// treat it the same here as blockFileNotExist, to trigger
// reporting it as a bad block
blockFileNotExist = true;
}
if (replicaNotExist || replicaStateNotFinalized) {
String errStr = "Can't send invalid block " + block;
LOG.info(errStr);
bpos.trySendErrorReport(DatanodeProtocol.INVALID_BLOCK, errStr);
return;
}
if (blockFileNotExist) {
// Report back to NN bad block caused by non-existent block file.
reportBadBlock(bpos, block, "Can't replicate block " + block
+ " because the block file doesn't exist, or is not accessible");
return;
}
if (lengthTooShort) {
// Check if NN recorded length matches on-disk length
// Shorter on-disk len indicates corruption so report NN the corrupt block
reportBadBlock(bpos, block, "Can't replicate block " + block
+ " because on-disk length " + data.getLength(block)
+ " is shorter than NameNode recorded length " + block.getNumBytes());
return;
}
int numTargets = xferTargets.length;
if (numTargets > 0) {
StringBuilder xfersBuilder = new StringBuilder();
for (int i = 0; i < numTargets; i++) {
xfersBuilder.append(xferTargets[i]);
xfersBuilder.append(" ");
}
LOG.info(bpReg + " Starting thread to transfer " +
block + " to " + xfersBuilder);
new Daemon(new DataTransfer(xferTargets, xferTargetStorageTypes, block,
BlockConstructionStage.PIPELINE_SETUP_CREATE, "")).start();
}
}
void transferBlocks(String poolId, Block blocks[],
DatanodeInfo xferTargets[][], StorageType[][] xferTargetStorageTypes) {
for (int i = 0; i < blocks.length; i++) {
try {
transferBlock(new ExtendedBlock(poolId, blocks[i]), xferTargets[i],
xferTargetStorageTypes[i]);
} catch (IOException ie) {
LOG.warn("Failed to transfer block " + blocks[i], ie);
}
}
}
/* ********************************************************************
Protocol when a client reads data from Datanode (Cur Ver: 9):
Client's Request :
=================
Processed in DataXceiver:
+----------------------------------------------+
| Common Header | 1 byte OP == OP_READ_BLOCK |
+----------------------------------------------+
Processed in readBlock() :
+-------------------------------------------------------------------------+
| 8 byte Block ID | 8 byte genstamp | 8 byte start offset | 8 byte length |
+-------------------------------------------------------------------------+
| vInt length | <DFSClient id> |
+-----------------------------------+
Client sends optional response only at the end of receiving data.
DataNode Response :
===================
In readBlock() :
If there is an error while initializing BlockSender :
+---------------------------+
| 2 byte OP_STATUS_ERROR | and connection will be closed.
+---------------------------+
Otherwise
+---------------------------+
| 2 byte OP_STATUS_SUCCESS |
+---------------------------+
Actual data, sent by BlockSender.sendBlock() :
ChecksumHeader :
+--------------------------------------------------+
| 1 byte CHECKSUM_TYPE | 4 byte BYTES_PER_CHECKSUM |
+--------------------------------------------------+
Followed by actual data in the form of PACKETS:
+------------------------------------+
| Sequence of data PACKETs .... |
+------------------------------------+
A "PACKET" is defined further below.
The client reads data until it receives a packet with
"LastPacketInBlock" set to true or with a zero length. It then replies
to DataNode with one of the status codes:
- CHECKSUM_OK: All the chunk checksums have been verified
- SUCCESS: Data received; checksums not verified
- ERROR_CHECKSUM: (Currently not used) Detected invalid checksums
+---------------+
| 2 byte Status |
+---------------+
The DataNode expects all well behaved clients to send the 2 byte
status code. And if the the client doesn't, the DN will close the
connection. So the status code is optional in the sense that it
does not affect the correctness of the data. (And the client can
always reconnect.)
PACKET : Contains a packet header, checksum and data. Amount of data
======== carried is set by BUFFER_SIZE.
+-----------------------------------------------------+
| 4 byte packet length (excluding packet header) |
+-----------------------------------------------------+
| 8 byte offset in the block | 8 byte sequence number |
+-----------------------------------------------------+
| 1 byte isLastPacketInBlock |
+-----------------------------------------------------+
| 4 byte Length of actual data |
+-----------------------------------------------------+
| x byte checksum data. x is defined below |
+-----------------------------------------------------+
| actual data ...... |
+-----------------------------------------------------+
x = (length of data + BYTE_PER_CHECKSUM - 1)/BYTES_PER_CHECKSUM *
CHECKSUM_SIZE
CHECKSUM_SIZE depends on CHECKSUM_TYPE (usually, 4 for CRC32)
The above packet format is used while writing data to DFS also.
Not all the fields might be used while reading.
************************************************************************ */
/**
* Used for transferring a block of data. This class
* sends a piece of data to another DataNode.
*/
private class DataTransfer implements Runnable {
final DatanodeInfo[] targets;
final StorageType[] targetStorageTypes;
final ExtendedBlock b;
final BlockConstructionStage stage;
final private DatanodeRegistration bpReg;
final String clientname;
final CachingStrategy cachingStrategy;
/**
* Connect to the first item in the target list. Pass along the
* entire target list, the block, and the data.
*/
DataTransfer(DatanodeInfo targets[], StorageType[] targetStorageTypes,
ExtendedBlock b, BlockConstructionStage stage,
final String clientname) {
if (DataTransferProtocol.LOG.isDebugEnabled()) {
DataTransferProtocol.LOG.debug(getClass().getSimpleName() + ": "
+ b + " (numBytes=" + b.getNumBytes() + ")"
+ ", stage=" + stage
+ ", clientname=" + clientname
+ ", targets=" + Arrays.asList(targets)
+ ", target storage types=" + (targetStorageTypes == null ? "[]" :
Arrays.asList(targetStorageTypes)));
}
this.targets = targets;
this.targetStorageTypes = targetStorageTypes;
this.b = b;
this.stage = stage;
BPOfferService bpos = blockPoolManager.get(b.getBlockPoolId());
bpReg = bpos.bpRegistration;
this.clientname = clientname;
this.cachingStrategy =
new CachingStrategy(true, getDnConf().readaheadLength);
}
/**
* Do the deed, write the bytes
*/
@Override
public void run() {
xmitsInProgress.getAndIncrement();
Socket sock = null;
DataOutputStream out = null;
DataInputStream in = null;
BlockSender blockSender = null;
final boolean isClient = clientname.length() > 0;
try {
final String dnAddr = targets[0].getXferAddr(connectToDnViaHostname);
InetSocketAddress curTarget = NetUtils.createSocketAddr(dnAddr);
if (LOG.isDebugEnabled()) {
LOG.debug("Connecting to datanode " + dnAddr);
}
sock = newSocket();
NetUtils.connect(sock, curTarget, dnConf.socketTimeout);
sock.setSoTimeout(targets.length * dnConf.socketTimeout);
//
// Header info
//
Token<BlockTokenIdentifier> accessToken = BlockTokenSecretManager.DUMMY_TOKEN;
if (isBlockTokenEnabled) {
accessToken = blockPoolTokenSecretManager.generateToken(b,
EnumSet.of(BlockTokenIdentifier.AccessMode.WRITE));
}
long writeTimeout = dnConf.socketWriteTimeout +
HdfsServerConstants.WRITE_TIMEOUT_EXTENSION * (targets.length-1);
OutputStream unbufOut = NetUtils.getOutputStream(sock, writeTimeout);
InputStream unbufIn = NetUtils.getInputStream(sock);
DataEncryptionKeyFactory keyFactory =
getDataEncryptionKeyFactoryForBlock(b);
IOStreamPair saslStreams = saslClient.socketSend(sock, unbufOut,
unbufIn, keyFactory, accessToken, bpReg);
unbufOut = saslStreams.out;
unbufIn = saslStreams.in;
out = new DataOutputStream(new BufferedOutputStream(unbufOut,
DFSUtil.getSmallBufferSize(conf)));
in = new DataInputStream(unbufIn);
blockSender = new BlockSender(b, 0, b.getNumBytes(),
false, false, true, DataNode.this, null, cachingStrategy);
DatanodeInfo srcNode = new DatanodeInfo(bpReg);
new Sender(out).writeBlock(b, targetStorageTypes[0], accessToken,
clientname, targets, targetStorageTypes, srcNode,
stage, 0, 0, 0, 0, blockSender.getChecksum(), cachingStrategy,
false, false, null);
// send data & checksum
blockSender.sendBlock(out, unbufOut, null);
// no response necessary
LOG.info(getClass().getSimpleName() + ": Transmitted " + b
+ " (numBytes=" + b.getNumBytes() + ") to " + curTarget);
// read ack
if (isClient) {
DNTransferAckProto closeAck = DNTransferAckProto.parseFrom(
PBHelper.vintPrefixed(in));
if (LOG.isDebugEnabled()) {
LOG.debug(getClass().getSimpleName() + ": close-ack=" + closeAck);
}
if (closeAck.getStatus() != Status.SUCCESS) {
if (closeAck.getStatus() == Status.ERROR_ACCESS_TOKEN) {
throw new InvalidBlockTokenException(
"Got access token error for connect ack, targets="
+ Arrays.asList(targets));
} else {
throw new IOException("Bad connect ack, targets="
+ Arrays.asList(targets));
}
}
} else {
metrics.incrBlocksReplicated();
}
} catch (IOException ie) {
LOG.warn(bpReg + ":Failed to transfer " + b + " to " +
targets[0] + " got ", ie);
// check if there are any disk problem
checkDiskErrorAsync();
} finally {
xmitsInProgress.getAndDecrement();
IOUtils.closeStream(blockSender);
IOUtils.closeStream(out);
IOUtils.closeStream(in);
IOUtils.closeSocket(sock);
}
}
}
/**
* Returns a new DataEncryptionKeyFactory that generates a key from the
* BlockPoolTokenSecretManager, using the block pool ID of the given block.
*
* @param block for which the factory needs to create a key
* @return DataEncryptionKeyFactory for block's block pool ID
*/
DataEncryptionKeyFactory getDataEncryptionKeyFactoryForBlock(
final ExtendedBlock block) {
return new DataEncryptionKeyFactory() {
@Override
public DataEncryptionKey newDataEncryptionKey() {
return dnConf.encryptDataTransfer ?
blockPoolTokenSecretManager.generateDataEncryptionKey(
block.getBlockPoolId()) : null;
}
};
}
/**
* After a block becomes finalized, a datanode increases metric counter,
* notifies namenode, and adds it to the block scanner
* @param block block to close
* @param delHint hint on which excess block to delete
* @param storageUuid UUID of the storage where block is stored
*/
void closeBlock(ExtendedBlock block, String delHint, String storageUuid) {
metrics.incrBlocksWritten();
BPOfferService bpos = blockPoolManager.get(block.getBlockPoolId());
if(bpos != null) {
bpos.notifyNamenodeReceivedBlock(block, delHint, storageUuid);
} else {
LOG.warn("Cannot find BPOfferService for reporting block received for bpid="
+ block.getBlockPoolId());
}
}
/** Start a single datanode daemon and wait for it to finish.
* If this thread is specifically interrupted, it will stop waiting.
*/
public void runDatanodeDaemon() throws IOException {
blockPoolManager.startAll();
// start dataXceiveServer
dataXceiverServer.start();
if (localDataXceiverServer != null) {
localDataXceiverServer.start();
}
ipcServer.start();
startPlugins(conf);
}
/**
* A data node is considered to be up if one of the bp services is up
*/
public boolean isDatanodeUp() {
for (BPOfferService bp : blockPoolManager.getAllNamenodeThreads()) {
if (bp.isAlive()) {
return true;
}
}
return false;
}
/** Instantiate a single datanode object. This must be run by invoking
* {@link DataNode#runDatanodeDaemon()} subsequently.
*/
public static DataNode instantiateDataNode(String args[],
Configuration conf) throws IOException {
return instantiateDataNode(args, conf, null);
}
/** Instantiate a single datanode object, along with its secure resources.
* This must be run by invoking{@link DataNode#runDatanodeDaemon()}
* subsequently.
*/
public static DataNode instantiateDataNode(String args [], Configuration conf,
SecureResources resources) throws IOException {
if (conf == null)
conf = new HdfsConfiguration();
if (args != null) {
// parse generic hadoop options
GenericOptionsParser hParser = new GenericOptionsParser(conf, args);
args = hParser.getRemainingArgs();
}
if (!parseArguments(args, conf)) {
printUsage(System.err);
return null;
}
Collection<StorageLocation> dataLocations = getStorageLocations(conf);
UserGroupInformation.setConfiguration(conf);
SecurityUtil.login(conf, DFS_DATANODE_KEYTAB_FILE_KEY,
DFS_DATANODE_KERBEROS_PRINCIPAL_KEY);
return makeInstance(dataLocations, conf, resources);
}
public static List<StorageLocation> getStorageLocations(Configuration conf) {
Collection<String> rawLocations =
conf.getTrimmedStringCollection(DFS_DATANODE_DATA_DIR_KEY);
List<StorageLocation> locations =
new ArrayList<StorageLocation>(rawLocations.size());
for(String locationString : rawLocations) {
final StorageLocation location;
try {
location = StorageLocation.parse(locationString);
} catch (IOException ioe) {
LOG.error("Failed to initialize storage directory " + locationString
+ ". Exception details: " + ioe);
// Ignore the exception.
continue;
} catch (SecurityException se) {
LOG.error("Failed to initialize storage directory " + locationString
+ ". Exception details: " + se);
// Ignore the exception.
continue;
}
locations.add(location);
}
return locations;
}
/** Instantiate & Start a single datanode daemon and wait for it to finish.
* If this thread is specifically interrupted, it will stop waiting.
*/
@VisibleForTesting
public static DataNode createDataNode(String args[],
Configuration conf) throws IOException {
return createDataNode(args, conf, null);
}
/** Instantiate & Start a single datanode daemon and wait for it to finish.
* If this thread is specifically interrupted, it will stop waiting.
*/
@VisibleForTesting
@InterfaceAudience.Private
public static DataNode createDataNode(String args[], Configuration conf,
SecureResources resources) throws IOException {
DataNode dn = instantiateDataNode(args, conf, resources);
if (dn != null) {
dn.runDatanodeDaemon();
}
return dn;
}
void join() {
while (shouldRun) {
try {
blockPoolManager.joinAll();
if (blockPoolManager.getAllNamenodeThreads().size() == 0) {
shouldRun = false;
}
// Terminate if shutdown is complete or 2 seconds after all BPs
// are shutdown.
synchronized(this) {
wait(2000);
}
} catch (InterruptedException ex) {
LOG.warn("Received exception in Datanode#join: " + ex);
}
}
}
// Small wrapper around the DiskChecker class that provides means to mock
// DiskChecker static methods and unittest DataNode#getDataDirsFromURIs.
static class DataNodeDiskChecker {
private final FsPermission expectedPermission;
public DataNodeDiskChecker(FsPermission expectedPermission) {
this.expectedPermission = expectedPermission;
}
public void checkDir(LocalFileSystem localFS, Path path)
throws DiskErrorException, IOException {
DiskChecker.checkDir(localFS, path, expectedPermission);
}
}
/**
* Make an instance of DataNode after ensuring that at least one of the
* given data directories (and their parent directories, if necessary)
* can be created.
* @param dataDirs List of directories, where the new DataNode instance should
* keep its files.
* @param conf Configuration instance to use.
* @param resources Secure resources needed to run under Kerberos
* @return DataNode instance for given list of data dirs and conf, or null if
* no directory from this directory list can be created.
* @throws IOException
*/
static DataNode makeInstance(Collection<StorageLocation> dataDirs,
Configuration conf, SecureResources resources) throws IOException {
LocalFileSystem localFS = FileSystem.getLocal(conf);
FsPermission permission = new FsPermission(
conf.get(DFS_DATANODE_DATA_DIR_PERMISSION_KEY,
DFS_DATANODE_DATA_DIR_PERMISSION_DEFAULT));
DataNodeDiskChecker dataNodeDiskChecker =
new DataNodeDiskChecker(permission);
List<StorageLocation> locations =
checkStorageLocations(dataDirs, localFS, dataNodeDiskChecker);
DefaultMetricsSystem.initialize("DataNode");
assert locations.size() > 0 : "number of data directories should be > 0";
return new DataNode(conf, locations, resources);
}
// DataNode ctor expects AbstractList instead of List or Collection...
static List<StorageLocation> checkStorageLocations(
Collection<StorageLocation> dataDirs,
LocalFileSystem localFS, DataNodeDiskChecker dataNodeDiskChecker)
throws IOException {
ArrayList<StorageLocation> locations = new ArrayList<StorageLocation>();
StringBuilder invalidDirs = new StringBuilder();
for (StorageLocation location : dataDirs) {
final URI uri = location.getUri();
try {
dataNodeDiskChecker.checkDir(localFS, new Path(uri));
locations.add(location);
} catch (IOException ioe) {
LOG.warn("Invalid " + DFS_DATANODE_DATA_DIR_KEY + " "
+ location.getFile() + " : ", ioe);
invalidDirs.append("\"").append(uri.getPath()).append("\" ");
}
}
if (locations.size() == 0) {
throw new IOException("All directories in "
+ DFS_DATANODE_DATA_DIR_KEY + " are invalid: "
+ invalidDirs);
}
return locations;
}
@Override
public String toString() {
return "DataNode{data=" + data + ", localName='" + getDisplayName()
+ "', datanodeUuid='" + storage.getDatanodeUuid() + "', xmitsInProgress="
+ xmitsInProgress.get() + "}";
}
private static void printUsage(PrintStream out) {
out.println(USAGE + "\n");
}
/**
* Parse and verify command line arguments and set configuration parameters.
*
* @return false if passed argements are incorrect
*/
@VisibleForTesting
static boolean parseArguments(String args[], Configuration conf) {
StartupOption startOpt = StartupOption.REGULAR;
int i = 0;
if (args != null && args.length != 0) {
String cmd = args[i++];
if ("-r".equalsIgnoreCase(cmd) || "--rack".equalsIgnoreCase(cmd)) {
LOG.error("-r, --rack arguments are not supported anymore. RackID " +
"resolution is handled by the NameNode.");
return false;
} else if (StartupOption.ROLLBACK.getName().equalsIgnoreCase(cmd)) {
startOpt = StartupOption.ROLLBACK;
} else if (StartupOption.REGULAR.getName().equalsIgnoreCase(cmd)) {
startOpt = StartupOption.REGULAR;
} else {
return false;
}
}
setStartupOption(conf, startOpt);
return (args == null || i == args.length); // Fail if more than one cmd specified!
}
private static void setStartupOption(Configuration conf, StartupOption opt) {
conf.set(DFS_DATANODE_STARTUP_KEY, opt.toString());
}
static StartupOption getStartupOption(Configuration conf) {
String value = conf.get(DFS_DATANODE_STARTUP_KEY,
StartupOption.REGULAR.toString());
return StartupOption.getEnum(value);
}
/**
* This methods arranges for the data node to send
* the block report at the next heartbeat.
*/
public void scheduleAllBlockReport(long delay) {
for(BPOfferService bpos : blockPoolManager.getAllNamenodeThreads()) {
bpos.scheduleBlockReport(delay);
}
}
/**
* Examples are adding and deleting blocks directly.
* The most common usage will be when the data node's storage is simulated.
*
* @return the fsdataset that stores the blocks
*/
@VisibleForTesting
public FsDatasetSpi<?> getFSDataset() {
return data;
}
@VisibleForTesting
/** @return the block scanner. */
public BlockScanner getBlockScanner() {
return blockScanner;
}
@VisibleForTesting
DirectoryScanner getDirectoryScanner() {
return directoryScanner;
}
public static void secureMain(String args[], SecureResources resources) {
int errorCode = 0;
try {
StringUtils.startupShutdownMessage(DataNode.class, args, LOG);
DataNode datanode = createDataNode(args, null, resources);
if (datanode != null) {
datanode.join();
} else {
errorCode = 1;
}
} catch (Throwable e) {
LOG.fatal("Exception in secureMain", e);
terminate(1, e);
} finally {
// We need to terminate the process here because either shutdown was called
// or some disk related conditions like volumes tolerated or volumes required
// condition was not met. Also, In secure mode, control will go to Jsvc
// and Datanode process hangs if it does not exit.
LOG.warn("Exiting Datanode");
terminate(errorCode);
}
}
public static void main(String args[]) {
if (DFSUtil.parseHelpArgument(args, DataNode.USAGE, System.out, true)) {
System.exit(0);
}
secureMain(args, null);
}
public Daemon recoverBlocks(
final String who,
final Collection<RecoveringBlock> blocks) {
Daemon d = new Daemon(threadGroup, new Runnable() {
/** Recover a list of blocks. It is run by the primary datanode. */
@Override
public void run() {
for(RecoveringBlock b : blocks) {
try {
logRecoverBlock(who, b);
recoverBlock(b);
} catch (IOException e) {
LOG.warn("recoverBlocks FAILED: " + b, e);
}
}
}
});
d.start();
return d;
}
// InterDataNodeProtocol implementation
@Override // InterDatanodeProtocol
public ReplicaRecoveryInfo initReplicaRecovery(RecoveringBlock rBlock)
throws IOException {
return data.initReplicaRecovery(rBlock);
}
/**
* Convenience method, which unwraps RemoteException.
* @throws IOException not a RemoteException.
*/
private static ReplicaRecoveryInfo callInitReplicaRecovery(
InterDatanodeProtocol datanode,
RecoveringBlock rBlock) throws IOException {
try {
return datanode.initReplicaRecovery(rBlock);
} catch(RemoteException re) {
throw re.unwrapRemoteException();
}
}
/**
* Update replica with the new generation stamp and length.
*/
@Override // InterDatanodeProtocol
public String updateReplicaUnderRecovery(final ExtendedBlock oldBlock,
final long recoveryId, final long newBlockId, final long newLength)
throws IOException {
final String storageID = data.updateReplicaUnderRecovery(oldBlock,
recoveryId, newBlockId, newLength);
// Notify the namenode of the updated block info. This is important
// for HA, since otherwise the standby node may lose track of the
// block locations until the next block report.
ExtendedBlock newBlock = new ExtendedBlock(oldBlock);
newBlock.setGenerationStamp(recoveryId);
newBlock.setBlockId(newBlockId);
newBlock.setNumBytes(newLength);
notifyNamenodeReceivedBlock(newBlock, "", storageID);
return storageID;
}
/** A convenient class used in block recovery */
static class BlockRecord {
final DatanodeID id;
final InterDatanodeProtocol datanode;
final ReplicaRecoveryInfo rInfo;
private String storageID;
BlockRecord(DatanodeID id,
InterDatanodeProtocol datanode,
ReplicaRecoveryInfo rInfo) {
this.id = id;
this.datanode = datanode;
this.rInfo = rInfo;
}
void updateReplicaUnderRecovery(String bpid, long recoveryId,
long newBlockId, long newLength)
throws IOException {
final ExtendedBlock b = new ExtendedBlock(bpid, rInfo);
storageID = datanode.updateReplicaUnderRecovery(b, recoveryId, newBlockId,
newLength);
}
@Override
public String toString() {
return "block:" + rInfo + " node:" + id;
}
}
/** Recover a block */
private void recoverBlock(RecoveringBlock rBlock) throws IOException {
ExtendedBlock block = rBlock.getBlock();
String blookPoolId = block.getBlockPoolId();
DatanodeID[] datanodeids = rBlock.getLocations();
List<BlockRecord> syncList = new ArrayList<BlockRecord>(datanodeids.length);
int errorCount = 0;
//check generation stamps
for(DatanodeID id : datanodeids) {
try {
BPOfferService bpos = blockPoolManager.get(blookPoolId);
DatanodeRegistration bpReg = bpos.bpRegistration;
InterDatanodeProtocol datanode = bpReg.equals(id)?
this: DataNode.createInterDataNodeProtocolProxy(id, getConf(),
dnConf.socketTimeout, dnConf.connectToDnViaHostname);
ReplicaRecoveryInfo info = callInitReplicaRecovery(datanode, rBlock);
if (info != null &&
info.getGenerationStamp() >= block.getGenerationStamp() &&
info.getNumBytes() > 0) {
syncList.add(new BlockRecord(id, datanode, info));
}
} catch (RecoveryInProgressException ripE) {
InterDatanodeProtocol.LOG.warn(
"Recovery for replica " + block + " on data-node " + id
+ " is already in progress. Recovery id = "
+ rBlock.getNewGenerationStamp() + " is aborted.", ripE);
return;
} catch (IOException e) {
++errorCount;
InterDatanodeProtocol.LOG.warn(
"Failed to obtain replica info for block (=" + block
+ ") from datanode (=" + id + ")", e);
}
}
if (errorCount == datanodeids.length) {
throw new IOException("All datanodes failed: block=" + block
+ ", datanodeids=" + Arrays.asList(datanodeids));
}
syncBlock(rBlock, syncList);
}
/**
* Get the NameNode corresponding to the given block pool.
*
* @param bpid Block pool Id
* @return Namenode corresponding to the bpid
* @throws IOException if unable to get the corresponding NameNode
*/
public DatanodeProtocolClientSideTranslatorPB getActiveNamenodeForBP(String bpid)
throws IOException {
BPOfferService bpos = blockPoolManager.get(bpid);
if (bpos == null) {
throw new IOException("No block pool offer service for bpid=" + bpid);
}
DatanodeProtocolClientSideTranslatorPB activeNN = bpos.getActiveNN();
if (activeNN == null) {
throw new IOException(
"Block pool " + bpid + " has not recognized an active NN");
}
return activeNN;
}
/** Block synchronization */
void syncBlock(RecoveringBlock rBlock,
List<BlockRecord> syncList) throws IOException {
ExtendedBlock block = rBlock.getBlock();
final String bpid = block.getBlockPoolId();
DatanodeProtocolClientSideTranslatorPB nn =
getActiveNamenodeForBP(block.getBlockPoolId());
long recoveryId = rBlock.getNewGenerationStamp();
boolean isTruncateRecovery = rBlock.getNewBlock() != null;
long blockId = (isTruncateRecovery) ?
rBlock.getNewBlock().getBlockId() : block.getBlockId();
if (LOG.isDebugEnabled()) {
LOG.debug("block=" + block + ", (length=" + block.getNumBytes()
+ "), syncList=" + syncList);
}
// syncList.isEmpty() means that all data-nodes do not have the block
// or their replicas have 0 length.
// The block can be deleted.
if (syncList.isEmpty()) {
nn.commitBlockSynchronization(block, recoveryId, 0,
true, true, DatanodeID.EMPTY_ARRAY, null);
return;
}
// Calculate the best available replica state.
ReplicaState bestState = ReplicaState.RWR;
long finalizedLength = -1;
for(BlockRecord r : syncList) {
assert r.rInfo.getNumBytes() > 0 : "zero length replica";
ReplicaState rState = r.rInfo.getOriginalReplicaState();
if(rState.getValue() < bestState.getValue())
bestState = rState;
if(rState == ReplicaState.FINALIZED) {
if(finalizedLength > 0 && finalizedLength != r.rInfo.getNumBytes())
throw new IOException("Inconsistent size of finalized replicas. " +
"Replica " + r.rInfo + " expected size: " + finalizedLength);
finalizedLength = r.rInfo.getNumBytes();
}
}
// Calculate list of nodes that will participate in the recovery
// and the new block size
List<BlockRecord> participatingList = new ArrayList<BlockRecord>();
final ExtendedBlock newBlock = new ExtendedBlock(bpid, blockId,
-1, recoveryId);
switch(bestState) {
case FINALIZED:
assert finalizedLength > 0 : "finalizedLength is not positive";
for(BlockRecord r : syncList) {
ReplicaState rState = r.rInfo.getOriginalReplicaState();
if(rState == ReplicaState.FINALIZED ||
rState == ReplicaState.RBW &&
r.rInfo.getNumBytes() == finalizedLength)
participatingList.add(r);
}
newBlock.setNumBytes(finalizedLength);
break;
case RBW:
case RWR:
long minLength = Long.MAX_VALUE;
for(BlockRecord r : syncList) {
ReplicaState rState = r.rInfo.getOriginalReplicaState();
if(rState == bestState) {
minLength = Math.min(minLength, r.rInfo.getNumBytes());
participatingList.add(r);
}
}
newBlock.setNumBytes(minLength);
break;
case RUR:
case TEMPORARY:
assert false : "bad replica state: " + bestState;
}
if(isTruncateRecovery)
newBlock.setNumBytes(rBlock.getNewBlock().getNumBytes());
List<DatanodeID> failedList = new ArrayList<DatanodeID>();
final List<BlockRecord> successList = new ArrayList<BlockRecord>();
for(BlockRecord r : participatingList) {
try {
r.updateReplicaUnderRecovery(bpid, recoveryId, blockId,
newBlock.getNumBytes());
successList.add(r);
} catch (IOException e) {
InterDatanodeProtocol.LOG.warn("Failed to updateBlock (newblock="
+ newBlock + ", datanode=" + r.id + ")", e);
failedList.add(r.id);
}
}
// If any of the data-nodes failed, the recovery fails, because
// we never know the actual state of the replica on failed data-nodes.
// The recovery should be started over.
if(!failedList.isEmpty()) {
StringBuilder b = new StringBuilder();
for(DatanodeID id : failedList) {
b.append("\n " + id);
}
throw new IOException("Cannot recover " + block + ", the following "
+ failedList.size() + " data-nodes failed {" + b + "\n}");
}
// Notify the name-node about successfully recovered replicas.
final DatanodeID[] datanodes = new DatanodeID[successList.size()];
final String[] storages = new String[datanodes.length];
for(int i = 0; i < datanodes.length; i++) {
final BlockRecord r = successList.get(i);
datanodes[i] = r.id;
storages[i] = r.storageID;
}
nn.commitBlockSynchronization(block,
newBlock.getGenerationStamp(), newBlock.getNumBytes(), true, false,
datanodes, storages);
}
private static void logRecoverBlock(String who, RecoveringBlock rb) {
ExtendedBlock block = rb.getBlock();
DatanodeInfo[] targets = rb.getLocations();
LOG.info(who + " calls recoverBlock(" + block
+ ", targets=[" + Joiner.on(", ").join(targets) + "]"
+ ((rb.getNewBlock() == null) ? ", newGenerationStamp="
+ rb.getNewGenerationStamp() : ", newBlock=" + rb.getNewBlock())
+ ")");
}
@Override // ClientDataNodeProtocol
public long getReplicaVisibleLength(final ExtendedBlock block) throws IOException {
checkReadAccess(block);
return data.getReplicaVisibleLength(block);
}
private void checkReadAccess(final ExtendedBlock block) throws IOException {
if (isBlockTokenEnabled) {
Set<TokenIdentifier> tokenIds = UserGroupInformation.getCurrentUser()
.getTokenIdentifiers();
if (tokenIds.size() != 1) {
throw new IOException("Can't continue since none or more than one "
+ "BlockTokenIdentifier is found.");
}
for (TokenIdentifier tokenId : tokenIds) {
BlockTokenIdentifier id = (BlockTokenIdentifier) tokenId;
if (LOG.isDebugEnabled()) {
LOG.debug("Got: " + id.toString());
}
blockPoolTokenSecretManager.checkAccess(id, null, block,
BlockTokenIdentifier.AccessMode.READ);
}
}
}
/**
* Transfer a replica to the datanode targets.
* @param b the block to transfer.
* The corresponding replica must be an RBW or a Finalized.
* Its GS and numBytes will be set to
* the stored GS and the visible length.
* @param targets targets to transfer the block to
* @param client client name
*/
void transferReplicaForPipelineRecovery(final ExtendedBlock b,
final DatanodeInfo[] targets, final StorageType[] targetStorageTypes,
final String client) throws IOException {
final long storedGS;
final long visible;
final BlockConstructionStage stage;
//get replica information
synchronized(data) {
Block storedBlock = data.getStoredBlock(b.getBlockPoolId(),
b.getBlockId());
if (null == storedBlock) {
throw new IOException(b + " not found in datanode.");
}
storedGS = storedBlock.getGenerationStamp();
if (storedGS < b.getGenerationStamp()) {
throw new IOException(storedGS
+ " = storedGS < b.getGenerationStamp(), b=" + b);
}
// Update the genstamp with storedGS
b.setGenerationStamp(storedGS);
if (data.isValidRbw(b)) {
stage = BlockConstructionStage.TRANSFER_RBW;
} else if (data.isValidBlock(b)) {
stage = BlockConstructionStage.TRANSFER_FINALIZED;
} else {
final String r = data.getReplicaString(b.getBlockPoolId(), b.getBlockId());
throw new IOException(b + " is neither a RBW nor a Finalized, r=" + r);
}
visible = data.getReplicaVisibleLength(b);
}
//set visible length
b.setNumBytes(visible);
if (targets.length > 0) {
new DataTransfer(targets, targetStorageTypes, b, stage, client).run();
}
}
/**
* Finalize a pending upgrade in response to DNA_FINALIZE.
* @param blockPoolId the block pool to finalize
*/
void finalizeUpgradeForPool(String blockPoolId) throws IOException {
storage.finalizeUpgrade(blockPoolId);
}
static InetSocketAddress getStreamingAddr(Configuration conf) {
return NetUtils.createSocketAddr(
conf.getTrimmed(DFS_DATANODE_ADDRESS_KEY, DFS_DATANODE_ADDRESS_DEFAULT));
}
@Override // DataNodeMXBean
public String getVersion() {
return VersionInfo.getVersion();
}
@Override // DataNodeMXBean
public String getRpcPort(){
InetSocketAddress ipcAddr = NetUtils.createSocketAddr(
this.getConf().get(DFS_DATANODE_IPC_ADDRESS_KEY));
return Integer.toString(ipcAddr.getPort());
}
@Override // DataNodeMXBean
public String getHttpPort(){
return this.getConf().get("dfs.datanode.info.port");
}
/**
* @return the datanode's http port
*/
public int getInfoPort() {
return infoPort;
}
/**
* @return the datanode's https port
*/
public int getInfoSecurePort() {
return infoSecurePort;
}
/**
* Returned information is a JSON representation of a map with
* name node host name as the key and block pool Id as the value.
* Note that, if there are multiple NNs in an NA nameservice,
* a given block pool may be represented twice.
*/
@Override // DataNodeMXBean
public String getNamenodeAddresses() {
final Map<String, String> info = new HashMap<String, String>();
for (BPOfferService bpos : blockPoolManager.getAllNamenodeThreads()) {
if (bpos != null) {
for (BPServiceActor actor : bpos.getBPServiceActors()) {
info.put(actor.getNNSocketAddress().getHostName(),
bpos.getBlockPoolId());
}
}
}
return JSON.toString(info);
}
/**
* Returned information is a JSON representation of a map with
* volume name as the key and value is a map of volume attribute
* keys to its values
*/
@Override // DataNodeMXBean
public String getVolumeInfo() {
Preconditions.checkNotNull(data, "Storage not yet initialized");
return JSON.toString(data.getVolumeInfoMap());
}
@Override // DataNodeMXBean
public synchronized String getClusterId() {
return clusterId;
}
public void refreshNamenodes(Configuration conf) throws IOException {
blockPoolManager.refreshNamenodes(conf);
}
@Override // ClientDatanodeProtocol
public void refreshNamenodes() throws IOException {
checkSuperuserPrivilege();
conf = new Configuration();
refreshNamenodes(conf);
}
@Override // ClientDatanodeProtocol
public void deleteBlockPool(String blockPoolId, boolean force)
throws IOException {
checkSuperuserPrivilege();
LOG.info("deleteBlockPool command received for block pool " + blockPoolId
+ ", force=" + force);
if (blockPoolManager.get(blockPoolId) != null) {
LOG.warn("The block pool "+blockPoolId+
" is still running, cannot be deleted.");
throw new IOException(
"The block pool is still running. First do a refreshNamenodes to " +
"shutdown the block pool service");
}
data.deleteBlockPool(blockPoolId, force);
}
@Override // ClientDatanodeProtocol
public synchronized void shutdownDatanode(boolean forUpgrade) throws IOException {
checkSuperuserPrivilege();
LOG.info("shutdownDatanode command received (upgrade=" + forUpgrade +
"). Shutting down Datanode...");
// Shutdown can be called only once.
if (shutdownInProgress) {
throw new IOException("Shutdown already in progress.");
}
shutdownInProgress = true;
shutdownForUpgrade = forUpgrade;
// Asynchronously start the shutdown process so that the rpc response can be
// sent back.
Thread shutdownThread = new Thread() {
@Override public void run() {
if (!shutdownForUpgrade) {
// Delay the shutdown a bit if not doing for restart.
try {
Thread.sleep(1000);
} catch (InterruptedException ie) { }
}
shutdown();
}
};
shutdownThread.setDaemon(true);
shutdownThread.start();
}
@Override //ClientDatanodeProtocol
public DatanodeLocalInfo getDatanodeInfo() {
long uptime = ManagementFactory.getRuntimeMXBean().getUptime()/1000;
return new DatanodeLocalInfo(VersionInfo.getVersion(),
confVersion, uptime);
}
@Override // ClientDatanodeProtocol
public void startReconfiguration() throws IOException {
checkSuperuserPrivilege();
startReconfigurationTask();
}
@Override // ClientDatanodeProtocol
public ReconfigurationTaskStatus getReconfigurationStatus() throws IOException {
checkSuperuserPrivilege();
return getReconfigurationTaskStatus();
}
@Override // ClientDatanodeProtocol
public List<String> listReconfigurableProperties()
throws IOException {
return RECONFIGURABLE_PROPERTIES;
}
@Override // ClientDatanodeProtocol
public void triggerBlockReport(BlockReportOptions options)
throws IOException {
checkSuperuserPrivilege();
for (BPOfferService bpos : blockPoolManager.getAllNamenodeThreads()) {
if (bpos != null) {
for (BPServiceActor actor : bpos.getBPServiceActors()) {
actor.triggerBlockReport(options);
}
}
}
}
/**
* @param addr rpc address of the namenode
* @return true if the datanode is connected to a NameNode at the
* given address
*/
public boolean isConnectedToNN(InetSocketAddress addr) {
for (BPOfferService bpos : getAllBpOs()) {
for (BPServiceActor bpsa : bpos.getBPServiceActors()) {
if (addr.equals(bpsa.getNNSocketAddress())) {
return bpsa.isAlive();
}
}
}
return false;
}
/**
* @param bpid block pool Id
* @return true - if BPOfferService thread is alive
*/
public boolean isBPServiceAlive(String bpid) {
BPOfferService bp = blockPoolManager.get(bpid);
return bp != null ? bp.isAlive() : false;
}
boolean isRestarting() {
return shutdownForUpgrade;
}
/**
* A datanode is considered to be fully started if all the BP threads are
* alive and all the block pools are initialized.
*
* @return true - if the data node is fully started
*/
public boolean isDatanodeFullyStarted() {
for (BPOfferService bp : blockPoolManager.getAllNamenodeThreads()) {
if (!bp.isInitialized() || !bp.isAlive()) {
return false;
}
}
return true;
}
@VisibleForTesting
public DatanodeID getDatanodeId() {
return id;
}
@VisibleForTesting
public void clearAllBlockSecretKeys() {
blockPoolTokenSecretManager.clearAllKeysForTesting();
}
/**
* Get current value of the max balancer bandwidth in bytes per second.
*
* @return Balancer bandwidth in bytes per second for this datanode.
*/
public Long getBalancerBandwidth() {
DataXceiverServer dxcs =
(DataXceiverServer) this.dataXceiverServer.getRunnable();
return dxcs.balanceThrottler.getBandwidth();
}
public DNConf getDnConf() {
return dnConf;
}
public String getDatanodeUuid() {
return storage == null ? null : storage.getDatanodeUuid();
}
boolean shouldRun() {
return shouldRun;
}
@VisibleForTesting
DataStorage getStorage() {
return storage;
}
public ShortCircuitRegistry getShortCircuitRegistry() {
return shortCircuitRegistry;
}
/**
* Check the disk error
*/
private void checkDiskError() {
Set<File> unhealthyDataDirs = data.checkDataDir();
if (unhealthyDataDirs != null && !unhealthyDataDirs.isEmpty()) {
try {
// Remove all unhealthy volumes from DataNode.
removeVolumes(unhealthyDataDirs, false);
} catch (IOException e) {
LOG.warn("Error occurred when removing unhealthy storage dirs: "
+ e.getMessage(), e);
}
StringBuilder sb = new StringBuilder("DataNode failed volumes:");
for (File dataDir : unhealthyDataDirs) {
sb.append(dataDir.getAbsolutePath() + ";");
}
handleDiskError(sb.toString());
}
}
/**
* Starts a new thread which will check for disk error check request
* every 5 sec
*/
private void startCheckDiskErrorThread() {
checkDiskErrorThread = new Thread(new Runnable() {
@Override
public void run() {
while(shouldRun) {
boolean tempFlag ;
synchronized(checkDiskErrorMutex) {
tempFlag = checkDiskErrorFlag;
checkDiskErrorFlag = false;
}
if(tempFlag) {
try {
checkDiskError();
} catch (Exception e) {
LOG.warn("Unexpected exception occurred while checking disk error " + e);
checkDiskErrorThread = null;
return;
}
synchronized(checkDiskErrorMutex) {
lastDiskErrorCheck = Time.monotonicNow();
}
}
try {
Thread.sleep(checkDiskErrorInterval);
} catch (InterruptedException e) {
LOG.debug("InterruptedException in check disk error thread", e);
checkDiskErrorThread = null;
return;
}
}
}
});
}
public long getLastDiskErrorCheck() {
synchronized(checkDiskErrorMutex) {
return lastDiskErrorCheck;
}
}
@Override
public SpanReceiverInfo[] listSpanReceivers() throws IOException {
checkSuperuserPrivilege();
return spanReceiverHost.listSpanReceivers();
}
@Override
public long addSpanReceiver(SpanReceiverInfo info) throws IOException {
checkSuperuserPrivilege();
return spanReceiverHost.addSpanReceiver(info);
}
@Override
public void removeSpanReceiver(long id) throws IOException {
checkSuperuserPrivilege();
spanReceiverHost.removeSpanReceiver(id);
}
}
| 120,126 | 35.803615 | 104 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActorActionException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import java.io.IOException;
public class BPServiceActorActionException extends IOException {
/**
* An exception for BPSerivceActor call related issues
*/
private static final long serialVersionUID = 1L;
public BPServiceActorActionException(String message) {
super(message);
}
}
| 1,150 | 32.852941 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/AvailableSpaceVolumeChoosingPolicy.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode.fsdataset;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_THRESHOLD_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_THRESHOLD_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_KEY;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
/**
* A DN volume choosing policy which takes into account the amount of free
* space on each of the available volumes when considering where to assign a
* new replica allocation. By default this policy prefers assigning replicas to
* those volumes with more available free space, so as to over time balance the
* available space of all the volumes within a DN.
*/
public class AvailableSpaceVolumeChoosingPolicy<V extends FsVolumeSpi>
implements VolumeChoosingPolicy<V>, Configurable {
private static final Log LOG = LogFactory.getLog(AvailableSpaceVolumeChoosingPolicy.class);
private final Random random;
private long balancedSpaceThreshold = DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_THRESHOLD_DEFAULT;
private float balancedPreferencePercent = DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_DEFAULT;
AvailableSpaceVolumeChoosingPolicy(Random random) {
this.random = random;
}
public AvailableSpaceVolumeChoosingPolicy() {
this(new Random());
}
@Override
public synchronized void setConf(Configuration conf) {
balancedSpaceThreshold = conf.getLong(
DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_THRESHOLD_KEY,
DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_THRESHOLD_DEFAULT);
balancedPreferencePercent = conf.getFloat(
DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_KEY,
DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_DEFAULT);
LOG.info("Available space volume choosing policy initialized: " +
DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_THRESHOLD_KEY +
" = " + balancedSpaceThreshold + ", " +
DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_KEY +
" = " + balancedPreferencePercent);
if (balancedPreferencePercent > 1.0) {
LOG.warn("The value of " + DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_KEY +
" is greater than 1.0 but should be in the range 0.0 - 1.0");
}
if (balancedPreferencePercent < 0.5) {
LOG.warn("The value of " + DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_KEY +
" is less than 0.5 so volumes with less available disk space will receive more block allocations");
}
}
@Override
public synchronized Configuration getConf() {
// Nothing to do. Only added to fulfill the Configurable contract.
return null;
}
private final VolumeChoosingPolicy<V> roundRobinPolicyBalanced =
new RoundRobinVolumeChoosingPolicy<V>();
private final VolumeChoosingPolicy<V> roundRobinPolicyHighAvailable =
new RoundRobinVolumeChoosingPolicy<V>();
private final VolumeChoosingPolicy<V> roundRobinPolicyLowAvailable =
new RoundRobinVolumeChoosingPolicy<V>();
@Override
public synchronized V chooseVolume(List<V> volumes,
long replicaSize) throws IOException {
if (volumes.size() < 1) {
throw new DiskOutOfSpaceException("No more available volumes");
}
AvailableSpaceVolumeList volumesWithSpaces =
new AvailableSpaceVolumeList(volumes);
if (volumesWithSpaces.areAllVolumesWithinFreeSpaceThreshold()) {
// If they're actually not too far out of whack, fall back on pure round
// robin.
V volume = roundRobinPolicyBalanced.chooseVolume(volumes, replicaSize);
if (LOG.isDebugEnabled()) {
LOG.debug("All volumes are within the configured free space balance " +
"threshold. Selecting " + volume + " for write of block size " +
replicaSize);
}
return volume;
} else {
V volume = null;
// If none of the volumes with low free space have enough space for the
// replica, always try to choose a volume with a lot of free space.
long mostAvailableAmongLowVolumes = volumesWithSpaces
.getMostAvailableSpaceAmongVolumesWithLowAvailableSpace();
List<V> highAvailableVolumes = extractVolumesFromPairs(
volumesWithSpaces.getVolumesWithHighAvailableSpace());
List<V> lowAvailableVolumes = extractVolumesFromPairs(
volumesWithSpaces.getVolumesWithLowAvailableSpace());
float preferencePercentScaler =
(highAvailableVolumes.size() * balancedPreferencePercent) +
(lowAvailableVolumes.size() * (1 - balancedPreferencePercent));
float scaledPreferencePercent =
(highAvailableVolumes.size() * balancedPreferencePercent) /
preferencePercentScaler;
if (mostAvailableAmongLowVolumes < replicaSize ||
random.nextFloat() < scaledPreferencePercent) {
volume = roundRobinPolicyHighAvailable.chooseVolume(
highAvailableVolumes, replicaSize);
if (LOG.isDebugEnabled()) {
LOG.debug("Volumes are imbalanced. Selecting " + volume +
" from high available space volumes for write of block size "
+ replicaSize);
}
} else {
volume = roundRobinPolicyLowAvailable.chooseVolume(
lowAvailableVolumes, replicaSize);
if (LOG.isDebugEnabled()) {
LOG.debug("Volumes are imbalanced. Selecting " + volume +
" from low available space volumes for write of block size "
+ replicaSize);
}
}
return volume;
}
}
/**
* Used to keep track of the list of volumes we're choosing from.
*/
private class AvailableSpaceVolumeList {
private final List<AvailableSpaceVolumePair> volumes;
public AvailableSpaceVolumeList(List<V> volumes) throws IOException {
this.volumes = new ArrayList<AvailableSpaceVolumePair>();
for (V volume : volumes) {
this.volumes.add(new AvailableSpaceVolumePair(volume));
}
}
/**
* @return true if all volumes' free space is within the
* configured threshold, false otherwise.
*/
public boolean areAllVolumesWithinFreeSpaceThreshold() {
long leastAvailable = Long.MAX_VALUE;
long mostAvailable = 0;
for (AvailableSpaceVolumePair volume : volumes) {
leastAvailable = Math.min(leastAvailable, volume.getAvailable());
mostAvailable = Math.max(mostAvailable, volume.getAvailable());
}
return (mostAvailable - leastAvailable) < balancedSpaceThreshold;
}
/**
* @return the minimum amount of space available on a single volume,
* across all volumes.
*/
private long getLeastAvailableSpace() {
long leastAvailable = Long.MAX_VALUE;
for (AvailableSpaceVolumePair volume : volumes) {
leastAvailable = Math.min(leastAvailable, volume.getAvailable());
}
return leastAvailable;
}
/**
* @return the maximum amount of space available across volumes with low space.
*/
public long getMostAvailableSpaceAmongVolumesWithLowAvailableSpace() {
long mostAvailable = Long.MIN_VALUE;
for (AvailableSpaceVolumePair volume : getVolumesWithLowAvailableSpace()) {
mostAvailable = Math.max(mostAvailable, volume.getAvailable());
}
return mostAvailable;
}
/**
* @return the list of volumes with relatively low available space.
*/
public List<AvailableSpaceVolumePair> getVolumesWithLowAvailableSpace() {
long leastAvailable = getLeastAvailableSpace();
List<AvailableSpaceVolumePair> ret = new ArrayList<AvailableSpaceVolumePair>();
for (AvailableSpaceVolumePair volume : volumes) {
if (volume.getAvailable() <= leastAvailable + balancedSpaceThreshold) {
ret.add(volume);
}
}
return ret;
}
/**
* @return the list of volumes with a lot of available space.
*/
public List<AvailableSpaceVolumePair> getVolumesWithHighAvailableSpace() {
long leastAvailable = getLeastAvailableSpace();
List<AvailableSpaceVolumePair> ret = new ArrayList<AvailableSpaceVolumePair>();
for (AvailableSpaceVolumePair volume : volumes) {
if (volume.getAvailable() > leastAvailable + balancedSpaceThreshold) {
ret.add(volume);
}
}
return ret;
}
}
/**
* Used so that we only check the available space on a given volume once, at
* the beginning of {@link AvailableSpaceVolumeChoosingPolicy#chooseVolume(List, long)}.
*/
private class AvailableSpaceVolumePair {
private final V volume;
private final long availableSpace;
public AvailableSpaceVolumePair(V volume) throws IOException {
this.volume = volume;
this.availableSpace = volume.getAvailable();
}
public long getAvailable() {
return availableSpace;
}
public V getVolume() {
return volume;
}
}
private List<V> extractVolumesFromPairs(List<AvailableSpaceVolumePair> volumes) {
List<V> ret = new ArrayList<V>();
for (AvailableSpaceVolumePair volume : volumes) {
ret.add(volume.getVolume());
}
return ret;
}
}
| 11,014 | 39.645756 | 146 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/RoundRobinVolumeChoosingPolicy.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode.fsdataset;
import java.io.IOException;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
/**
* Choose volumes in round-robin order.
*/
public class RoundRobinVolumeChoosingPolicy<V extends FsVolumeSpi>
implements VolumeChoosingPolicy<V> {
public static final Log LOG = LogFactory.getLog(RoundRobinVolumeChoosingPolicy.class);
private int curVolume = 0;
@Override
public synchronized V chooseVolume(final List<V> volumes, long blockSize)
throws IOException {
if(volumes.size() < 1) {
throw new DiskOutOfSpaceException("No more available volumes");
}
// since volumes could've been removed because of the failure
// make sure we are not out of bounds
if(curVolume >= volumes.size()) {
curVolume = 0;
}
int startVolume = curVolume;
long maxAvailable = 0;
while (true) {
final V volume = volumes.get(curVolume);
curVolume = (curVolume + 1) % volumes.size();
long availableVolumeSize = volume.getAvailable();
if (availableVolumeSize > blockSize) {
return volume;
}
if (availableVolumeSize > maxAvailable) {
maxAvailable = availableVolumeSize;
}
if (curVolume == startVolume) {
throw new DiskOutOfSpaceException("Out of space: "
+ "The volume with the most available space (=" + maxAvailable
+ " B) is less than the block size (=" + blockSize + " B).");
}
}
}
}
| 2,446 | 32.520548 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/ReplicaOutputStreams.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode.fsdataset;
import java.io.Closeable;
import java.io.FileOutputStream;
import java.io.OutputStream;
import java.io.IOException;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.DataChecksum;
/**
* Contains the output streams for the data and checksum of a replica.
*/
public class ReplicaOutputStreams implements Closeable {
private final OutputStream dataOut;
private final OutputStream checksumOut;
private final DataChecksum checksum;
private final boolean isTransientStorage;
/**
* Create an object with a data output stream, a checksum output stream
* and a checksum.
*/
public ReplicaOutputStreams(OutputStream dataOut, OutputStream checksumOut,
DataChecksum checksum, boolean isTransientStorage) {
this.dataOut = dataOut;
this.checksumOut = checksumOut;
this.checksum = checksum;
this.isTransientStorage = isTransientStorage;
}
/** @return the data output stream. */
public OutputStream getDataOut() {
return dataOut;
}
/** @return the checksum output stream. */
public OutputStream getChecksumOut() {
return checksumOut;
}
/** @return the checksum. */
public DataChecksum getChecksum() {
return checksum;
}
/** @return is writing to a transient storage? */
public boolean isTransientStorage() {
return isTransientStorage;
}
@Override
public void close() {
IOUtils.closeStream(dataOut);
IOUtils.closeStream(checksumOut);
}
/**
* Sync the data stream if it supports it.
*/
public void syncDataOut() throws IOException {
if (dataOut instanceof FileOutputStream) {
((FileOutputStream)dataOut).getChannel().force(true);
}
}
/**
* Sync the checksum stream if it supports it.
*/
public void syncChecksumOut() throws IOException {
if (checksumOut instanceof FileOutputStream) {
((FileOutputStream)checksumOut).getChannel().force(true);
}
}
}
| 2,780 | 28.585106 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/LengthInputStream.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode.fsdataset;
import java.io.FilterInputStream;
import java.io.InputStream;
/**
* An input stream with length.
*/
public class LengthInputStream extends FilterInputStream {
private final long length;
/**
* Create an stream.
* @param in the underlying input stream.
* @param length the length of the stream.
*/
public LengthInputStream(InputStream in, long length) {
super(in);
this.length = length;
}
/** @return the length. */
public long getLength() {
return length;
}
public InputStream getWrappedStream() {
return in;
}
}
| 1,432 | 28.854167 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/ReplicaInputStreams.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode.fsdataset;
import java.io.Closeable;
import java.io.InputStream;
import org.apache.hadoop.io.IOUtils;
/**
* Contains the input streams for the data and checksum of a replica.
*/
public class ReplicaInputStreams implements Closeable {
private final InputStream dataIn;
private final InputStream checksumIn;
private final FsVolumeReference volumeRef;
/** Create an object with a data input stream and a checksum input stream. */
public ReplicaInputStreams(InputStream dataStream, InputStream checksumStream,
FsVolumeReference volumeRef) {
this.volumeRef = volumeRef;
this.dataIn = dataStream;
this.checksumIn = checksumStream;
}
/** @return the data input stream. */
public InputStream getDataIn() {
return dataIn;
}
/** @return the checksum input stream. */
public InputStream getChecksumIn() {
return checksumIn;
}
@Override
public void close() {
IOUtils.closeStream(dataIn);
IOUtils.closeStream(checksumIn);
IOUtils.cleanup(null, volumeRef);
}
}
| 1,877 | 31.947368 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/VolumeChoosingPolicy.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode.fsdataset;
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
/**
* This interface specifies the policy for choosing volumes to store replicas.
*/
@InterfaceAudience.Private
public interface VolumeChoosingPolicy<V extends FsVolumeSpi> {
/**
* Choose a volume to place a replica,
* given a list of volumes and the replica size sought for storage.
*
* The implementations of this interface must be thread-safe.
*
* @param volumes - a list of available volumes.
* @param replicaSize - the size of the replica for which a volume is sought.
* @return the chosen volume.
* @throws IOException when disks are unavailable or are full.
*/
public V chooseVolume(List<V> volumes, long replicaSize) throws IOException;
}
| 1,663 | 37.697674 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeReference.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode.fsdataset;
import java.io.Closeable;
import java.io.IOException;
/**
* This holds volume reference count as AutoClosable resource.
* It increases the reference count by one in the constructor, and decreases
* the reference count by one in {@link #close()}.
*
* <pre>
* {@code
* try (FsVolumeReference ref = volume.obtainReference()) {
* // Do IOs on the volume
* volume.createRwb(...);
* ...
* }
* }
* </pre>
*/
public interface FsVolumeReference extends Closeable {
/**
* Decrease the reference count of the volume.
* @throws IOException it never throws IOException.
*/
@Override
void close() throws IOException;
/**
* Returns the underlying volume object. Return null if the reference was
* released.
*/
FsVolumeSpi getVolume();
}
| 1,655 | 30.846154 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode.fsdataset;
import java.io.Closeable;
import java.io.EOFException;
import java.io.File;
import java.io.FileDescriptor;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.nio.channels.ClosedChannelException;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsBlocksMetadata;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataStorage;
import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica;
import org.apache.hadoop.hdfs.server.datanode.Replica;
import org.apache.hadoop.hdfs.server.datanode.ReplicaInPipelineInterface;
import org.apache.hadoop.hdfs.server.datanode.ReplicaHandler;
import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
import org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException;
import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
import org.apache.hadoop.hdfs.server.datanode.UnexpectedReplicaStateException;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetFactory;
import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean;
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary;
import org.apache.hadoop.util.ReflectionUtils;
/**
* This is a service provider interface for the underlying storage that
* stores replicas for a data node.
* The default implementation stores replicas on local drives.
*/
@InterfaceAudience.Private
public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
/**
* A factory for creating {@link FsDatasetSpi} objects.
*/
abstract class Factory<D extends FsDatasetSpi<?>> {
/** @return the configured factory. */
public static Factory<?> getFactory(Configuration conf) {
@SuppressWarnings("rawtypes")
final Class<? extends Factory> clazz = conf.getClass(
DFSConfigKeys.DFS_DATANODE_FSDATASET_FACTORY_KEY,
FsDatasetFactory.class,
Factory.class);
return ReflectionUtils.newInstance(clazz, conf);
}
/** Create a new object. */
public abstract D newInstance(DataNode datanode, DataStorage storage,
Configuration conf) throws IOException;
/** Does the factory create simulated objects? */
public boolean isSimulated() {
return false;
}
}
/**
* It behaviors as an unmodifiable list of FsVolume. Individual FsVolume can
* be obtained by using {@link #get(int)}.
*
* This also holds the reference counts for these volumes. It releases all the
* reference counts in {@link #close()}.
*/
class FsVolumeReferences implements Iterable<FsVolumeSpi>, Closeable {
private final List<FsVolumeReference> references;
public <S extends FsVolumeSpi> FsVolumeReferences(List<S> curVolumes) {
references = new ArrayList<>();
for (FsVolumeSpi v : curVolumes) {
try {
references.add(v.obtainReference());
} catch (ClosedChannelException e) {
// This volume has been closed.
}
}
}
private static class FsVolumeSpiIterator implements
Iterator<FsVolumeSpi> {
private final List<FsVolumeReference> references;
private int idx = 0;
FsVolumeSpiIterator(List<FsVolumeReference> refs) {
references = refs;
}
@Override
public boolean hasNext() {
return idx < references.size();
}
@Override
public FsVolumeSpi next() {
int refIdx = idx++;
return references.get(refIdx).getVolume();
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
}
@Override
public Iterator<FsVolumeSpi> iterator() {
return new FsVolumeSpiIterator(references);
}
/**
* Get the number of volumes.
*/
public int size() {
return references.size();
}
/**
* Get the volume for a given index.
*/
public FsVolumeSpi get(int index) {
return references.get(index).getVolume();
}
@Override
public void close() throws IOException {
IOException ioe = null;
for (FsVolumeReference ref : references) {
try {
ref.close();
} catch (IOException e) {
ioe = e;
}
}
references.clear();
if (ioe != null) {
throw ioe;
}
}
}
/**
* Returns a list of FsVolumes that hold reference counts.
*
* The caller must release the reference of each volume by calling
* {@link FsVolumeReferences#close()}.
*/
FsVolumeReferences getFsVolumeReferences();
/**
* Add a new volume to the FsDataset.<p/>
*
* If the FSDataset supports block scanning, this function registers
* the new volume with the block scanner.
*
* @param location The storage location for the new volume.
* @param nsInfos Namespace information for the new volume.
*/
void addVolume(
final StorageLocation location,
final List<NamespaceInfo> nsInfos) throws IOException;
/**
* Removes a collection of volumes from FsDataset.
*
* If the FSDataset supports block scanning, this function removes
* the volumes from the block scanner.
*
* @param volumes The paths of the volumes to be removed.
* @param clearFailure set true to clear the failure information about the
* volumes.
*/
void removeVolumes(Set<File> volumes, boolean clearFailure);
/** @return a storage with the given storage ID */
DatanodeStorage getStorage(final String storageUuid);
/** @return one or more storage reports for attached volumes. */
StorageReport[] getStorageReports(String bpid)
throws IOException;
/** @return the volume that contains a replica of the block. */
V getVolume(ExtendedBlock b);
/** @return a volume information map (name => info). */
Map<String, Object> getVolumeInfoMap();
/**
* Returns info about volume failures.
*
* @return info about volume failures, possibly null
*/
VolumeFailureSummary getVolumeFailureSummary();
/** @return a list of finalized blocks for the given block pool. */
List<FinalizedReplica> getFinalizedBlocks(String bpid);
/** @return a list of finalized blocks for the given block pool. */
List<FinalizedReplica> getFinalizedBlocksOnPersistentStorage(String bpid);
/**
* Check whether the in-memory block record matches the block on the disk,
* and, in case that they are not matched, update the record or mark it
* as corrupted.
*/
void checkAndUpdate(String bpid, long blockId, File diskFile,
File diskMetaFile, FsVolumeSpi vol) throws IOException;
/**
* @param b - the block
* @return a stream if the meta-data of the block exists;
* otherwise, return null.
* @throws IOException
*/
LengthInputStream getMetaDataInputStream(ExtendedBlock b
) throws IOException;
/**
* Returns the specified block's on-disk length (excluding metadata).
* @return the specified block's on-disk length (excluding metadta)
* @throws IOException on error
*/
long getLength(ExtendedBlock b) throws IOException;
/**
* Get reference to the replica meta info in the replicasMap.
* To be called from methods that are synchronized on {@link FSDataset}
* @return replica from the replicas map
*/
@Deprecated
Replica getReplica(String bpid, long blockId);
/**
* @return replica meta information
*/
String getReplicaString(String bpid, long blockId);
/**
* @return the generation stamp stored with the block.
*/
Block getStoredBlock(String bpid, long blkid) throws IOException;
/**
* Returns an input stream at specified offset of the specified block.
* @param b block
* @param seekOffset offset with in the block to seek to
* @return an input stream to read the contents of the specified block,
* starting at the offset
* @throws IOException
*/
InputStream getBlockInputStream(ExtendedBlock b, long seekOffset)
throws IOException;
/**
* Returns an input stream at specified offset of the specified block.
* The block is still in the tmp directory and is not finalized
* @return an input stream to read the contents of the specified block,
* starting at the offset
* @throws IOException
*/
ReplicaInputStreams getTmpInputStreams(ExtendedBlock b, long blkoff,
long ckoff) throws IOException;
/**
* Creates a temporary replica and returns the meta information of the replica
* .
*
* @param b block
* @return the meta info of the replica which is being written to
* @throws IOException if an error occurs
*/
ReplicaHandler createTemporary(StorageType storageType,
ExtendedBlock b) throws IOException;
/**
* Creates a RBW replica and returns the meta info of the replica
*
* @param b block
* @return the meta info of the replica which is being written to
* @throws IOException if an error occurs
*/
ReplicaHandler createRbw(StorageType storageType,
ExtendedBlock b, boolean allowLazyPersist) throws IOException;
/**
* Recovers a RBW replica and returns the meta info of the replica.
*
* @param b block
* @param newGS the new generation stamp for the replica
* @param minBytesRcvd the minimum number of bytes that the replica could have
* @param maxBytesRcvd the maximum number of bytes that the replica could have
* @return the meta info of the replica which is being written to
* @throws IOException if an error occurs
*/
ReplicaHandler recoverRbw(ExtendedBlock b,
long newGS, long minBytesRcvd, long maxBytesRcvd) throws IOException;
/**
* Covert a temporary replica to a RBW.
* @param temporary the temporary replica being converted
* @return the result RBW
*/
ReplicaInPipelineInterface convertTemporaryToRbw(
ExtendedBlock temporary) throws IOException;
/**
* Append to a finalized replica and returns the meta info of the replica.
*
* @param b block
* @param newGS the new generation stamp for the replica
* @param expectedBlockLen the number of bytes the replica is expected to have
* @return the meata info of the replica which is being written to
* @throws IOException
*/
ReplicaHandler append(ExtendedBlock b, long newGS,
long expectedBlockLen) throws IOException;
/**
* Recover a failed append to a finalized replica and returns the meta
* info of the replica.
*
* @param b block
* @param newGS the new generation stamp for the replica
* @param expectedBlockLen the number of bytes the replica is expected to have
* @return the meta info of the replica which is being written to
* @throws IOException
*/
ReplicaHandler recoverAppend(
ExtendedBlock b, long newGS, long expectedBlockLen) throws IOException;
/**
* Recover a failed pipeline close.
* It bumps the replica's generation stamp and finalize it if RBW replica
*
* @param b block
* @param newGS the new generation stamp for the replica
* @param expectedBlockLen the number of bytes the replica is expected to have
* @return the storage uuid of the replica.
* @throws IOException
*/
String recoverClose(ExtendedBlock b, long newGS, long expectedBlockLen
) throws IOException;
/**
* Finalizes the block previously opened for writing using writeToBlock.
* The block size is what is in the parameter b and it must match the amount
* of data written
* @throws IOException
* @throws ReplicaNotFoundException if the replica can not be found when the
* block is been finalized. For instance, the block resides on an HDFS volume
* that has been removed.
*/
void finalizeBlock(ExtendedBlock b) throws IOException;
/**
* Unfinalizes the block previously opened for writing using writeToBlock.
* The temporary file associated with this block is deleted.
* @throws IOException
*/
void unfinalizeBlock(ExtendedBlock b) throws IOException;
/**
* Returns one block report per volume.
* @param bpid Block Pool Id
* @return - a map of DatanodeStorage to block report for the volume.
*/
Map<DatanodeStorage, BlockListAsLongs> getBlockReports(String bpid);
/**
* Returns the cache report - the full list of cached block IDs of a
* block pool.
* @param bpid Block Pool Id
* @return the cache report - the full list of cached block IDs.
*/
List<Long> getCacheReport(String bpid);
/** Does the dataset contain the block? */
boolean contains(ExtendedBlock block);
/**
* Check if a block is valid.
*
* @param b The block to check.
* @param minLength The minimum length that the block must have. May be 0.
* @param state If this is null, it is ignored. If it is non-null, we
* will check that the replica has this state.
*
* @throws ReplicaNotFoundException If the replica is not found
*
* @throws UnexpectedReplicaStateException If the replica is not in the
* expected state.
* @throws FileNotFoundException If the block file is not found or there
* was an error locating it.
* @throws EOFException If the replica length is too short.
*
* @throws IOException May be thrown from the methods called.
*/
void checkBlock(ExtendedBlock b, long minLength, ReplicaState state)
throws ReplicaNotFoundException, UnexpectedReplicaStateException,
FileNotFoundException, EOFException, IOException;
/**
* Is the block valid?
* @return - true if the specified block is valid
*/
boolean isValidBlock(ExtendedBlock b);
/**
* Is the block a valid RBW?
* @return - true if the specified block is a valid RBW
*/
boolean isValidRbw(ExtendedBlock b);
/**
* Invalidates the specified blocks
* @param bpid Block pool Id
* @param invalidBlks - the blocks to be invalidated
* @throws IOException
*/
void invalidate(String bpid, Block invalidBlks[]) throws IOException;
/**
* Caches the specified blocks
* @param bpid Block pool id
* @param blockIds - block ids to cache
*/
void cache(String bpid, long[] blockIds);
/**
* Uncaches the specified blocks
* @param bpid Block pool id
* @param blockIds - blocks ids to uncache
*/
void uncache(String bpid, long[] blockIds);
/**
* Determine if the specified block is cached.
* @param bpid Block pool id
* @param blockIds - block id
* @return true if the block is cached
*/
boolean isCached(String bpid, long blockId);
/**
* Check if all the data directories are healthy
* @return A set of unhealthy data directories.
*/
Set<File> checkDataDir();
/**
* Shutdown the FSDataset
*/
void shutdown();
/**
* Sets the file pointer of the checksum stream so that the last checksum
* will be overwritten
* @param b block
* @param outs The streams for the data file and checksum file
* @param checksumSize number of bytes each checksum has
* @throws IOException
*/
void adjustCrcChannelPosition(ExtendedBlock b,
ReplicaOutputStreams outs, int checksumSize) throws IOException;
/**
* Checks how many valid storage volumes there are in the DataNode.
* @return true if more than the minimum number of valid volumes are left
* in the FSDataSet.
*/
boolean hasEnoughResource();
/**
* Get visible length of the specified replica.
*/
long getReplicaVisibleLength(final ExtendedBlock block) throws IOException;
/**
* Initialize a replica recovery.
* @return actual state of the replica on this data-node or
* null if data-node does not have the replica.
*/
ReplicaRecoveryInfo initReplicaRecovery(RecoveringBlock rBlock
) throws IOException;
/**
* Update replica's generation stamp and length and finalize it.
* @return the ID of storage that stores the block
*/
String updateReplicaUnderRecovery(ExtendedBlock oldBlock,
long recoveryId, long newBlockId, long newLength) throws IOException;
/**
* add new block pool ID
* @param bpid Block pool Id
* @param conf Configuration
*/
void addBlockPool(String bpid, Configuration conf) throws IOException;
/**
* Shutdown and remove the block pool from underlying storage.
* @param bpid Block pool Id to be removed
*/
void shutdownBlockPool(String bpid) ;
/**
* Deletes the block pool directories. If force is false, directories are
* deleted only if no block files exist for the block pool. If force
* is true entire directory for the blockpool is deleted along with its
* contents.
* @param bpid BlockPool Id to be deleted.
* @param force If force is false, directories are deleted only if no
* block files exist for the block pool, otherwise entire
* directory for the blockpool is deleted along with its contents.
* @throws IOException
*/
void deleteBlockPool(String bpid, boolean force) throws IOException;
/**
* Get {@link BlockLocalPathInfo} for the given block.
*/
BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock b
) throws IOException;
/**
* Get a {@link HdfsBlocksMetadata} corresponding to the list of blocks in
* <code>blocks</code>.
*
* @param bpid pool to query
* @param blockIds List of block ids for which to return metadata
* @return metadata Metadata for the list of blocks
* @throws IOException
*/
HdfsBlocksMetadata getHdfsBlocksMetadata(String bpid,
long[] blockIds) throws IOException;
/**
* Enable 'trash' for the given dataset. When trash is enabled, files are
* moved to a separate trash directory instead of being deleted immediately.
* This can be useful for example during rolling upgrades.
*/
void enableTrash(String bpid);
/**
* Clear trash
*/
void clearTrash(String bpid);
/**
* @return true when trash is enabled
*/
boolean trashEnabled(String bpid);
/**
* Create a marker file indicating that a rolling upgrade is in progress.
*/
void setRollingUpgradeMarker(String bpid) throws IOException;
/**
* Delete the rolling upgrade marker file if it exists.
* @param bpid
*/
void clearRollingUpgradeMarker(String bpid) throws IOException;
/**
* submit a sync_file_range request to AsyncDiskService.
*/
void submitBackgroundSyncFileRangeRequest(final ExtendedBlock block,
final FileDescriptor fd, final long offset, final long nbytes,
final int flags);
/**
* Callback from RamDiskAsyncLazyPersistService upon async lazy persist task end
*/
void onCompleteLazyPersist(String bpId, long blockId,
long creationTime, File[] savedFiles, V targetVolume);
/**
* Callback from RamDiskAsyncLazyPersistService upon async lazy persist task fail
*/
void onFailLazyPersist(String bpId, long blockId);
/**
* Move block from one storage to another storage
*/
ReplicaInfo moveBlockAcrossStorage(final ExtendedBlock block,
StorageType targetStorageType) throws IOException;
/**
* Set a block to be pinned on this datanode so that it cannot be moved
* by Balancer/Mover.
*
* It is a no-op when dfs.datanode.block-pinning.enabled is set to false.
*/
void setPinning(ExtendedBlock block) throws IOException;
/**
* Check whether the block was pinned
*/
boolean getPinning(ExtendedBlock block) throws IOException;
/**
* Confirm whether the block is deleting
*/
boolean isDeletingBlock(String bpid, long blockId);
}
| 21,505 | 32.342636 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode.fsdataset;
import java.io.Closeable;
import java.io.File;
import java.io.IOException;
import java.nio.channels.ClosedChannelException;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
/**
* This is an interface for the underlying volume.
*/
public interface FsVolumeSpi {
/**
* Obtain a reference object that had increased 1 reference count of the
* volume.
*
* It is caller's responsibility to close {@link FsVolumeReference} to decrease
* the reference count on the volume.
*/
FsVolumeReference obtainReference() throws ClosedChannelException;
/** @return the StorageUuid of the volume */
String getStorageID();
/** @return a list of block pools. */
String[] getBlockPoolList();
/** @return the available storage space in bytes. */
long getAvailable() throws IOException;
/** @return the base path to the volume */
String getBasePath();
/** @return the path to the volume */
String getPath(String bpid) throws IOException;
/** @return the directory for the finalized blocks in the block pool. */
File getFinalizedDir(String bpid) throws IOException;
StorageType getStorageType();
/** Returns true if the volume is NOT backed by persistent storage. */
boolean isTransientStorage();
/**
* Reserve disk space for an RBW block so a writer does not run out of
* space before the block is full.
*/
void reserveSpaceForRbw(long bytesToReserve);
/**
* Release disk space previously reserved for RBW block.
*/
void releaseReservedSpace(long bytesToRelease);
/**
* Release reserved memory for an RBW block written to transient storage
* i.e. RAM.
* bytesToRelease will be rounded down to the OS page size since locked
* memory reservation must always be a multiple of the page size.
*/
void releaseLockedMemory(long bytesToRelease);
/**
* BlockIterator will return ExtendedBlock entries from a block pool in
* this volume. The entries will be returned in sorted order.<p/>
*
* BlockIterator objects themselves do not always have internal
* synchronization, so they can only safely be used by a single thread at a
* time.<p/>
*
* Closing the iterator does not save it. You must call save to save it.
*/
interface BlockIterator extends Closeable {
/**
* Get the next block.<p/>
*
* Note that this block may be removed in between the time we list it,
* and the time the caller tries to use it, or it may represent a stale
* entry. Callers should handle the case where the returned block no
* longer exists.
*
* @return The next block, or null if there are no
* more blocks. Null if there was an error
* determining the next block.
*
* @throws IOException If there was an error getting the next block in
* this volume. In this case, EOF will be set on
* the iterator.
*/
ExtendedBlock nextBlock() throws IOException;
/**
* Returns true if we got to the end of the block pool.
*/
boolean atEnd();
/**
* Repositions the iterator at the beginning of the block pool.
*/
void rewind();
/**
* Save this block iterator to the underlying volume.
* Any existing saved block iterator with this name will be overwritten.
* maxStalenessMs will not be saved.
*
* @throws IOException If there was an error when saving the block
* iterator.
*/
void save() throws IOException;
/**
* Set the maximum staleness of entries that we will return.<p/>
*
* A maximum staleness of 0 means we will never return stale entries; a
* larger value will allow us to reduce resource consumption in exchange
* for returning more potentially stale entries. Even with staleness set
* to 0, consumers of this API must handle race conditions where block
* disappear before they can be processed.
*/
void setMaxStalenessMs(long maxStalenessMs);
/**
* Get the wall-clock time, measured in milliseconds since the Epoch,
* when this iterator was created.
*/
long getIterStartMs();
/**
* Get the wall-clock time, measured in milliseconds since the Epoch,
* when this iterator was last saved. Returns iterStartMs if the
* iterator was never saved.
*/
long getLastSavedMs();
/**
* Get the id of the block pool which this iterator traverses.
*/
String getBlockPoolId();
}
/**
* Create a new block iterator. It will start at the beginning of the
* block set.
*
* @param bpid The block pool id to iterate over.
* @param name The name of the block iterator to create.
*
* @return The new block iterator.
*/
BlockIterator newBlockIterator(String bpid, String name);
/**
* Load a saved block iterator.
*
* @param bpid The block pool id to iterate over.
* @param name The name of the block iterator to load.
*
* @return The saved block iterator.
* @throws IOException If there was an IO error loading the saved
* block iterator.
*/
BlockIterator loadBlockIterator(String bpid, String name) throws IOException;
/**
* Get the FSDatasetSpi which this volume is a part of.
*/
FsDatasetSpi getDataset();
}
| 6,397 | 32.673684 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskAsyncLazyPersistService.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
import java.io.File;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
/**
* This class is a container of multiple thread pools, one for each non-RamDisk
* volume with a maximum thread count of 1 so that we can schedule async lazy
* persist operations easily with volume arrival and departure handled.
*
* This class and {@link org.apache.hadoop.util.AsyncDiskService} are similar.
* They should be combined.
*/
class RamDiskAsyncLazyPersistService {
public static final Log LOG = LogFactory.getLog(RamDiskAsyncLazyPersistService.class);
// ThreadPool core pool size
private static final int CORE_THREADS_PER_VOLUME = 1;
// ThreadPool maximum pool size
private static final int MAXIMUM_THREADS_PER_VOLUME = 1;
// ThreadPool keep-alive time for threads over core pool size
private static final long THREADS_KEEP_ALIVE_SECONDS = 60;
private final DataNode datanode;
private final ThreadGroup threadGroup;
private Map<File, ThreadPoolExecutor> executors
= new HashMap<File, ThreadPoolExecutor>();
private final static HdfsConfiguration EMPTY_HDFS_CONF = new HdfsConfiguration();
/**
* Create a RamDiskAsyncLazyPersistService with a set of volumes (specified by their
* root directories).
*
* The RamDiskAsyncLazyPersistService uses one ThreadPool per volume to do the async
* disk operations.
*/
RamDiskAsyncLazyPersistService(DataNode datanode) {
this.datanode = datanode;
this.threadGroup = new ThreadGroup(getClass().getSimpleName());
}
private void addExecutorForVolume(final File volume) {
ThreadFactory threadFactory = new ThreadFactory() {
@Override
public Thread newThread(Runnable r) {
Thread t = new Thread(threadGroup, r);
t.setName("Async RamDisk lazy persist worker for volume " + volume);
return t;
}
};
ThreadPoolExecutor executor = new ThreadPoolExecutor(
CORE_THREADS_PER_VOLUME, MAXIMUM_THREADS_PER_VOLUME,
THREADS_KEEP_ALIVE_SECONDS, TimeUnit.SECONDS,
new LinkedBlockingQueue<Runnable>(), threadFactory);
// This can reduce the number of running threads
executor.allowCoreThreadTimeOut(true);
executors.put(volume, executor);
}
/**
* Starts AsyncLazyPersistService for a new volume
* @param volume the root of the new data volume.
*/
synchronized void addVolume(File volume) {
if (executors == null) {
throw new RuntimeException("AsyncLazyPersistService is already shutdown");
}
ThreadPoolExecutor executor = executors.get(volume);
if (executor != null) {
throw new RuntimeException("Volume " + volume + " is already existed.");
}
addExecutorForVolume(volume);
}
/**
* Stops AsyncLazyPersistService for a volume.
* @param volume the root of the volume.
*/
synchronized void removeVolume(File volume) {
if (executors == null) {
throw new RuntimeException("AsyncDiskService is already shutdown");
}
ThreadPoolExecutor executor = executors.get(volume);
if (executor == null) {
throw new RuntimeException("Can not find volume " + volume
+ " to remove.");
} else {
executor.shutdown();
executors.remove(volume);
}
}
/**
* Query if the thread pool exist for the volume
* @param volume the root of a volume
* @return true if there is one thread pool for the volume
* false otherwise
*/
synchronized boolean queryVolume(File volume) {
if (executors == null) {
throw new RuntimeException("AsyncLazyPersistService is already shutdown");
}
ThreadPoolExecutor executor = executors.get(volume);
return (executor != null);
}
/**
* Execute the task sometime in the future, using ThreadPools.
*/
synchronized void execute(File root, Runnable task) {
if (executors == null) {
throw new RuntimeException("AsyncLazyPersistService is already shutdown");
}
ThreadPoolExecutor executor = executors.get(root);
if (executor == null) {
throw new RuntimeException("Cannot find root " + root
+ " for execution of task " + task);
} else {
executor.execute(task);
}
}
/**
* Gracefully shut down all ThreadPool. Will wait for all lazy persist
* tasks to finish.
*/
synchronized void shutdown() {
if (executors == null) {
LOG.warn("AsyncLazyPersistService has already shut down.");
} else {
LOG.info("Shutting down all async lazy persist service threads");
for (Map.Entry<File, ThreadPoolExecutor> e : executors.entrySet()) {
e.getValue().shutdown();
}
// clear the executor map so that calling execute again will fail.
executors = null;
LOG.info("All async lazy persist service threads have been shut down");
}
}
/**
* Asynchronously lazy persist the block from the RamDisk to Disk.
*/
void submitLazyPersistTask(String bpId, long blockId,
long genStamp, long creationTime,
File metaFile, File blockFile,
FsVolumeReference target) throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("LazyWriter schedule async task to persist RamDisk block pool id: "
+ bpId + " block id: " + blockId);
}
FsVolumeImpl volume = (FsVolumeImpl)target.getVolume();
File lazyPersistDir = volume.getLazyPersistDir(bpId);
if (!lazyPersistDir.exists() && !lazyPersistDir.mkdirs()) {
FsDatasetImpl.LOG.warn("LazyWriter failed to create " + lazyPersistDir);
throw new IOException("LazyWriter fail to find or create lazy persist dir: "
+ lazyPersistDir.toString());
}
ReplicaLazyPersistTask lazyPersistTask = new ReplicaLazyPersistTask(
bpId, blockId, genStamp, creationTime, blockFile, metaFile,
target, lazyPersistDir);
execute(volume.getCurrentDir(), lazyPersistTask);
}
class ReplicaLazyPersistTask implements Runnable {
final String bpId;
final long blockId;
final long genStamp;
final long creationTime;
final File blockFile;
final File metaFile;
final FsVolumeReference targetVolume;
final File lazyPersistDir;
ReplicaLazyPersistTask(String bpId, long blockId,
long genStamp, long creationTime,
File blockFile, File metaFile,
FsVolumeReference targetVolume, File lazyPersistDir) {
this.bpId = bpId;
this.blockId = blockId;
this.genStamp = genStamp;
this.creationTime = creationTime;
this.blockFile = blockFile;
this.metaFile = metaFile;
this.targetVolume = targetVolume;
this.lazyPersistDir = lazyPersistDir;
}
@Override
public String toString() {
// Called in AsyncLazyPersistService.execute for displaying error messages.
return "LazyWriter async task of persist RamDisk block pool id:"
+ bpId + " block pool id: "
+ blockId + " with block file " + blockFile
+ " and meta file " + metaFile + " to target volume " + targetVolume;}
@Override
public void run() {
boolean succeeded = false;
final FsDatasetImpl dataset = (FsDatasetImpl)datanode.getFSDataset();
try (FsVolumeReference ref = this.targetVolume) {
int smallBufferSize = DFSUtil.getSmallBufferSize(EMPTY_HDFS_CONF);
// No FsDatasetImpl lock for the file copy
File targetFiles[] = FsDatasetImpl.copyBlockFiles(
blockId, genStamp, metaFile, blockFile, lazyPersistDir, true,
smallBufferSize);
// Lock FsDataSetImpl during onCompleteLazyPersist callback
dataset.onCompleteLazyPersist(bpId, blockId,
creationTime, targetFiles, (FsVolumeImpl) targetVolume.getVolume());
succeeded = true;
} catch (Exception e){
FsDatasetImpl.LOG.warn(
"LazyWriter failed to async persist RamDisk block pool id: "
+ bpId + "block Id: " + blockId, e);
} finally {
if (!succeeded) {
dataset.onFailLazyPersist(bpId, blockId);
}
}
}
}
}
| 9,392 | 34.988506 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/VolumeFailureInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
/**
* Tracks information about failure of a data volume.
*/
final class VolumeFailureInfo {
private final String failedStorageLocation;
private final long failureDate;
private final long estimatedCapacityLost;
/**
* Creates a new VolumeFailureInfo, when the capacity lost from this volume
* failure is unknown. Typically, this means the volume failed immediately at
* startup, so there was never a chance to query its capacity.
*
* @param failedStorageLocation storage location that has failed
* @param failureDate date/time of failure in milliseconds since epoch
*/
public VolumeFailureInfo(String failedStorageLocation, long failureDate) {
this(failedStorageLocation, failureDate, 0);
}
/**
* Creates a new VolumeFailureInfo.
*
* @param failedStorageLocation storage location that has failed
* @param failureDate date/time of failure in milliseconds since epoch
* @param estimatedCapacityLost estimate of capacity lost in bytes
*/
public VolumeFailureInfo(String failedStorageLocation, long failureDate,
long estimatedCapacityLost) {
this.failedStorageLocation = failedStorageLocation;
this.failureDate = failureDate;
this.estimatedCapacityLost = estimatedCapacityLost;
}
/**
* Returns the storage location that has failed.
*
* @return storage location that has failed
*/
public String getFailedStorageLocation() {
return this.failedStorageLocation;
}
/**
* Returns date/time of failure
*
* @return date/time of failure in milliseconds since epoch
*/
public long getFailureDate() {
return this.failureDate;
}
/**
* Returns estimate of capacity lost. This is said to be an estimate, because
* in some cases it's impossible to know the capacity of the volume, such as if
* we never had a chance to query its capacity before the failure occurred.
*
* @return estimate of capacity lost in bytes
*/
public long getEstimatedCapacityLost() {
return this.estimatedCapacityLost;
}
}
| 2,907 | 34.036145 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
/**
* Maintains the replica map.
*/
class ReplicaMap {
// Object using which this class is synchronized
private final Object mutex;
// Map of block pool Id to another map of block Id to ReplicaInfo.
private final Map<String, Map<Long, ReplicaInfo>> map =
new HashMap<String, Map<Long, ReplicaInfo>>();
ReplicaMap(Object mutex) {
if (mutex == null) {
throw new HadoopIllegalArgumentException(
"Object to synchronize on cannot be null");
}
this.mutex = mutex;
}
String[] getBlockPoolList() {
synchronized(mutex) {
return map.keySet().toArray(new String[map.keySet().size()]);
}
}
private void checkBlockPool(String bpid) {
if (bpid == null) {
throw new IllegalArgumentException("Block Pool Id is null");
}
}
private void checkBlock(Block b) {
if (b == null) {
throw new IllegalArgumentException("Block is null");
}
}
/**
* Get the meta information of the replica that matches both block id
* and generation stamp
* @param bpid block pool id
* @param block block with its id as the key
* @return the replica's meta information
* @throws IllegalArgumentException if the input block or block pool is null
*/
ReplicaInfo get(String bpid, Block block) {
checkBlockPool(bpid);
checkBlock(block);
ReplicaInfo replicaInfo = get(bpid, block.getBlockId());
if (replicaInfo != null &&
block.getGenerationStamp() == replicaInfo.getGenerationStamp()) {
return replicaInfo;
}
return null;
}
/**
* Get the meta information of the replica that matches the block id
* @param bpid block pool id
* @param blockId a block's id
* @return the replica's meta information
*/
ReplicaInfo get(String bpid, long blockId) {
checkBlockPool(bpid);
synchronized(mutex) {
Map<Long, ReplicaInfo> m = map.get(bpid);
return m != null ? m.get(blockId) : null;
}
}
/**
* Add a replica's meta information into the map
*
* @param bpid block pool id
* @param replicaInfo a replica's meta information
* @return previous meta information of the replica
* @throws IllegalArgumentException if the input parameter is null
*/
ReplicaInfo add(String bpid, ReplicaInfo replicaInfo) {
checkBlockPool(bpid);
checkBlock(replicaInfo);
synchronized(mutex) {
Map<Long, ReplicaInfo> m = map.get(bpid);
if (m == null) {
// Add an entry for block pool if it does not exist already
m = new HashMap<Long, ReplicaInfo>();
map.put(bpid, m);
}
return m.put(replicaInfo.getBlockId(), replicaInfo);
}
}
/**
* Add all entries from the given replica map into the local replica map.
*/
void addAll(ReplicaMap other) {
map.putAll(other.map);
}
/**
* Remove the replica's meta information from the map that matches
* the input block's id and generation stamp
* @param bpid block pool id
* @param block block with its id as the key
* @return the removed replica's meta information
* @throws IllegalArgumentException if the input block is null
*/
ReplicaInfo remove(String bpid, Block block) {
checkBlockPool(bpid);
checkBlock(block);
synchronized(mutex) {
Map<Long, ReplicaInfo> m = map.get(bpid);
if (m != null) {
Long key = Long.valueOf(block.getBlockId());
ReplicaInfo replicaInfo = m.get(key);
if (replicaInfo != null &&
block.getGenerationStamp() == replicaInfo.getGenerationStamp()) {
return m.remove(key);
}
}
}
return null;
}
/**
* Remove the replica's meta information from the map if present
* @param bpid block pool id
* @param blockId block id of the replica to be removed
* @return the removed replica's meta information
*/
ReplicaInfo remove(String bpid, long blockId) {
checkBlockPool(bpid);
synchronized(mutex) {
Map<Long, ReplicaInfo> m = map.get(bpid);
if (m != null) {
return m.remove(blockId);
}
}
return null;
}
/**
* Get the size of the map for given block pool
* @param bpid block pool id
* @return the number of replicas in the map
*/
int size(String bpid) {
Map<Long, ReplicaInfo> m = null;
synchronized(mutex) {
m = map.get(bpid);
return m != null ? m.size() : 0;
}
}
/**
* Get a collection of the replicas for given block pool
* This method is <b>not synchronized</b>. It needs to be synchronized
* externally using the mutex, both for getting the replicas
* values from the map and iterating over it. Mutex can be accessed using
* {@link #getMutext()} method.
*
* @param bpid block pool id
* @return a collection of the replicas belonging to the block pool
*/
Collection<ReplicaInfo> replicas(String bpid) {
Map<Long, ReplicaInfo> m = null;
m = map.get(bpid);
return m != null ? m.values() : null;
}
void initBlockPool(String bpid) {
checkBlockPool(bpid);
synchronized(mutex) {
Map<Long, ReplicaInfo> m = map.get(bpid);
if (m == null) {
// Add an entry for block pool if it does not exist already
m = new HashMap<Long, ReplicaInfo>();
map.put(bpid, m);
}
}
}
void cleanUpBlockPool(String bpid) {
checkBlockPool(bpid);
synchronized(mutex) {
map.remove(bpid);
}
}
/**
* Give access to mutex used for synchronizing ReplicasMap
* @return object used as lock
*/
Object getMutext() {
return mutex;
}
}
| 6,711 | 28.568282 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskReplicaTracker.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
import com.google.common.base.Preconditions;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.Time;
import java.io.File;
import java.util.concurrent.atomic.AtomicLong;
@InterfaceAudience.Private
@InterfaceStability.Unstable
public abstract class RamDiskReplicaTracker {
static final Log LOG = LogFactory.getLog(RamDiskReplicaTracker.class);
FsDatasetImpl fsDataset;
static class RamDiskReplica implements Comparable<RamDiskReplica> {
private final String bpid;
private final long blockId;
private File savedBlockFile;
private File savedMetaFile;
private long lockedBytesReserved;
private long creationTime;
protected AtomicLong numReads = new AtomicLong(0);
protected boolean isPersisted;
/**
* RAM_DISK volume that holds the original replica.
*/
final FsVolumeSpi ramDiskVolume;
/**
* Persistent volume that holds or will hold the saved replica.
*/
FsVolumeImpl lazyPersistVolume;
RamDiskReplica(final String bpid, final long blockId,
final FsVolumeImpl ramDiskVolume,
long lockedBytesReserved) {
this.bpid = bpid;
this.blockId = blockId;
this.ramDiskVolume = ramDiskVolume;
this.lockedBytesReserved = lockedBytesReserved;
lazyPersistVolume = null;
savedMetaFile = null;
savedBlockFile = null;
creationTime = Time.monotonicNow();
isPersisted = false;
}
long getBlockId() {
return blockId;
}
String getBlockPoolId() {
return bpid;
}
FsVolumeImpl getLazyPersistVolume() {
return lazyPersistVolume;
}
void setLazyPersistVolume(FsVolumeImpl volume) {
Preconditions.checkState(!volume.isTransientStorage());
this.lazyPersistVolume = volume;
}
File getSavedBlockFile() {
return savedBlockFile;
}
File getSavedMetaFile() {
return savedMetaFile;
}
long getNumReads() { return numReads.get(); }
long getCreationTime() { return creationTime; }
boolean getIsPersisted() {return isPersisted; }
/**
* Record the saved meta and block files on the given volume.
*
* @param files Meta and block files, in that order.
*/
void recordSavedBlockFiles(File[] files) {
this.savedMetaFile = files[0];
this.savedBlockFile = files[1];
}
@Override
public int hashCode() {
return bpid.hashCode() ^ (int) blockId;
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (other == null || getClass() != other.getClass()) {
return false;
}
RamDiskReplica otherState = (RamDiskReplica) other;
return (otherState.bpid.equals(bpid) && otherState.blockId == blockId);
}
// Delete the saved meta and block files. Failure to delete can be
// ignored, the directory scanner will retry the deletion later.
void deleteSavedFiles() {
if (savedBlockFile != null) {
if (!savedBlockFile.delete()) {
LOG.warn("Failed to delete block file " + savedBlockFile);
}
savedBlockFile = null;
}
if (savedMetaFile != null) {
if (!savedMetaFile.delete()) {
LOG.warn("Failed to delete meta file " + savedMetaFile);
}
savedMetaFile = null;
}
}
@Override
public int compareTo(RamDiskReplica other) {
int bpidResult = bpid.compareTo(other.bpid);
if (bpidResult == 0)
if (blockId == other.blockId) {
return 0;
} else if (blockId < other.blockId) {
return -1;
} else {
return 1;
}
return bpidResult;
}
@Override
public String toString() {
return "[BlockPoolID=" + bpid + "; BlockId=" + blockId + "]";
}
public long getLockedBytesReserved() {
return lockedBytesReserved;
}
}
/**
* Get an instance of the configured RamDiskReplicaTracker based on the
* the configuration property
* {@link org.apache.hadoop.hdfs.DFSConfigKeys#DFS_DATANODE_RAM_DISK_REPLICA_TRACKER_KEY}.
*
* @param conf the configuration to be used
* @param dataset the FsDataset object.
* @return an instance of RamDiskReplicaTracker
*/
static RamDiskReplicaTracker getInstance(final Configuration conf,
final FsDatasetImpl fsDataset) {
final Class<? extends RamDiskReplicaTracker> trackerClass = conf.getClass(
DFSConfigKeys.DFS_DATANODE_RAM_DISK_REPLICA_TRACKER_KEY,
DFSConfigKeys.DFS_DATANODE_RAM_DISK_REPLICA_TRACKER_DEFAULT,
RamDiskReplicaTracker.class);
final RamDiskReplicaTracker tracker = ReflectionUtils.newInstance(
trackerClass, conf);
tracker.initialize(fsDataset);
return tracker;
}
void initialize(final FsDatasetImpl fsDataset) {
this.fsDataset = fsDataset;
}
/**
* Start tracking a new finalized replica on RAM disk.
*
* @param transientVolume RAM disk volume that stores the replica.
*/
abstract void addReplica(final String bpid, final long blockId,
final FsVolumeImpl transientVolume,
long lockedBytesReserved);
/**
* Invoked when a replica is opened by a client. This may be used as
* a heuristic by the eviction scheme.
*/
abstract void touch(final String bpid, final long blockId);
/**
* Get the next replica to write to persistent storage.
*/
abstract RamDiskReplica dequeueNextReplicaToPersist();
/**
* Invoked if a replica that was previously dequeued for persistence
* could not be successfully persisted. Add it back so it can be retried
* later.
*/
abstract void reenqueueReplicaNotPersisted(
final RamDiskReplica ramDiskReplica);
/**
* Invoked when the Lazy persist operation is started by the DataNode.
* @param checkpointVolume
*/
abstract void recordStartLazyPersist(
final String bpid, final long blockId, FsVolumeImpl checkpointVolume);
/**
* Invoked when the Lazy persist operation is complete.
*
* @param savedFiles The saved meta and block files, in that order.
*/
abstract void recordEndLazyPersist(
final String bpid, final long blockId, final File[] savedFiles);
/**
* Return a candidate replica to remove from RAM Disk. The exact replica
* to be returned may depend on the eviction scheme utilized.
*
* @return
*/
abstract RamDiskReplica getNextCandidateForEviction();
/**
* Return the number of replicas pending persistence to disk.
*/
abstract int numReplicasNotPersisted();
/**
* Discard all state we are tracking for the given replica.
*/
abstract void discardReplica(
final String bpid, final long blockId,
boolean deleteSavedCopies);
/**
* Return RamDiskReplica info given block pool id and block id
* Return null if it does not exist in RamDisk
*/
abstract RamDiskReplica getReplica(
final String bpid, final long blockId);
}
| 8,306 | 29.207273 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
import java.io.File;
import java.io.FileDescriptor;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.nativeio.NativeIO;
import org.apache.hadoop.io.nativeio.NativeIOException;
/**
* This class is a container of multiple thread pools, each for a volume,
* so that we can schedule async disk operations easily.
*
* Examples of async disk operations are deletion of block files.
* We don't want to create a new thread for each of the deletion request, and
* we don't want to do all deletions in the heartbeat thread since deletion
* can be slow, and we don't want to use a single thread pool because that
* is inefficient when we have more than 1 volume. AsyncDiskService is the
* solution for these.
* Another example of async disk operation is requesting sync_file_range().
*
* This class and {@link org.apache.hadoop.util.AsyncDiskService} are similar.
* They should be combined.
*/
class FsDatasetAsyncDiskService {
public static final Log LOG = LogFactory.getLog(FsDatasetAsyncDiskService.class);
// ThreadPool core pool size
private static final int CORE_THREADS_PER_VOLUME = 1;
// ThreadPool maximum pool size
private static final int MAXIMUM_THREADS_PER_VOLUME = 4;
// ThreadPool keep-alive time for threads over core pool size
private static final long THREADS_KEEP_ALIVE_SECONDS = 60;
private final DataNode datanode;
private final FsDatasetImpl fsdatasetImpl;
private final ThreadGroup threadGroup;
private Map<File, ThreadPoolExecutor> executors
= new HashMap<File, ThreadPoolExecutor>();
private Map<String, Set<Long>> deletedBlockIds
= new HashMap<String, Set<Long>>();
private static final int MAX_DELETED_BLOCKS = 64;
private int numDeletedBlocks = 0;
/**
* Create a AsyncDiskServices with a set of volumes (specified by their
* root directories).
*
* The AsyncDiskServices uses one ThreadPool per volume to do the async
* disk operations.
*/
FsDatasetAsyncDiskService(DataNode datanode, FsDatasetImpl fsdatasetImpl) {
this.datanode = datanode;
this.fsdatasetImpl = fsdatasetImpl;
this.threadGroup = new ThreadGroup(getClass().getSimpleName());
}
private void addExecutorForVolume(final File volume) {
ThreadFactory threadFactory = new ThreadFactory() {
int counter = 0;
@Override
public Thread newThread(Runnable r) {
int thisIndex;
synchronized (this) {
thisIndex = counter++;
}
Thread t = new Thread(threadGroup, r);
t.setName("Async disk worker #" + thisIndex +
" for volume " + volume);
return t;
}
};
ThreadPoolExecutor executor = new ThreadPoolExecutor(
CORE_THREADS_PER_VOLUME, MAXIMUM_THREADS_PER_VOLUME,
THREADS_KEEP_ALIVE_SECONDS, TimeUnit.SECONDS,
new LinkedBlockingQueue<Runnable>(), threadFactory);
// This can reduce the number of running threads
executor.allowCoreThreadTimeOut(true);
executors.put(volume, executor);
}
/**
* Starts AsyncDiskService for a new volume
* @param volume the root of the new data volume.
*/
synchronized void addVolume(File volume) {
if (executors == null) {
throw new RuntimeException("AsyncDiskService is already shutdown");
}
ThreadPoolExecutor executor = executors.get(volume);
if (executor != null) {
throw new RuntimeException("Volume " + volume + " is already existed.");
}
addExecutorForVolume(volume);
}
/**
* Stops AsyncDiskService for a volume.
* @param volume the root of the volume.
*/
synchronized void removeVolume(File volume) {
if (executors == null) {
throw new RuntimeException("AsyncDiskService is already shutdown");
}
ThreadPoolExecutor executor = executors.get(volume);
if (executor == null) {
throw new RuntimeException("Can not find volume " + volume
+ " to remove.");
} else {
executor.shutdown();
executors.remove(volume);
}
}
synchronized long countPendingDeletions() {
long count = 0;
for (ThreadPoolExecutor exec : executors.values()) {
count += exec.getTaskCount() - exec.getCompletedTaskCount();
}
return count;
}
/**
* Execute the task sometime in the future, using ThreadPools.
*/
synchronized void execute(File root, Runnable task) {
if (executors == null) {
throw new RuntimeException("AsyncDiskService is already shutdown");
}
ThreadPoolExecutor executor = executors.get(root);
if (executor == null) {
throw new RuntimeException("Cannot find root " + root
+ " for execution of task " + task);
} else {
executor.execute(task);
}
}
/**
* Gracefully shut down all ThreadPool. Will wait for all deletion
* tasks to finish.
*/
synchronized void shutdown() {
if (executors == null) {
LOG.warn("AsyncDiskService has already shut down.");
} else {
LOG.info("Shutting down all async disk service threads");
for (Map.Entry<File, ThreadPoolExecutor> e : executors.entrySet()) {
e.getValue().shutdown();
}
// clear the executor map so that calling execute again will fail.
executors = null;
LOG.info("All async disk service threads have been shut down");
}
}
public void submitSyncFileRangeRequest(FsVolumeImpl volume,
final FileDescriptor fd, final long offset, final long nbytes,
final int flags) {
execute(volume.getCurrentDir(), new Runnable() {
@Override
public void run() {
try {
NativeIO.POSIX.syncFileRangeIfPossible(fd, offset, nbytes, flags);
} catch (NativeIOException e) {
LOG.warn("sync_file_range error", e);
}
}
});
}
/**
* Delete the block file and meta file from the disk asynchronously, adjust
* dfsUsed statistics accordingly.
*/
void deleteAsync(FsVolumeReference volumeRef, File blockFile, File metaFile,
ExtendedBlock block, String trashDirectory) {
LOG.info("Scheduling " + block.getLocalBlock()
+ " file " + blockFile + " for deletion");
ReplicaFileDeleteTask deletionTask = new ReplicaFileDeleteTask(
volumeRef, blockFile, metaFile, block, trashDirectory);
execute(((FsVolumeImpl) volumeRef.getVolume()).getCurrentDir(), deletionTask);
}
/** A task for deleting a block file and its associated meta file, as well
* as decrement the dfs usage of the volume.
* Optionally accepts a trash directory. If one is specified then the files
* are moved to trash instead of being deleted. If none is specified then the
* files are deleted immediately.
*/
class ReplicaFileDeleteTask implements Runnable {
final FsVolumeReference volumeRef;
final FsVolumeImpl volume;
final File blockFile;
final File metaFile;
final ExtendedBlock block;
final String trashDirectory;
ReplicaFileDeleteTask(FsVolumeReference volumeRef, File blockFile,
File metaFile, ExtendedBlock block, String trashDirectory) {
this.volumeRef = volumeRef;
this.volume = (FsVolumeImpl) volumeRef.getVolume();
this.blockFile = blockFile;
this.metaFile = metaFile;
this.block = block;
this.trashDirectory = trashDirectory;
}
@Override
public String toString() {
// Called in AsyncDiskService.execute for displaying error messages.
return "deletion of block " + block.getBlockPoolId() + " "
+ block.getLocalBlock() + " with block file " + blockFile
+ " and meta file " + metaFile + " from volume " + volume;
}
private boolean deleteFiles() {
return blockFile.delete() && (metaFile.delete() || !metaFile.exists());
}
private boolean moveFiles() {
File trashDirFile = new File(trashDirectory);
if (!trashDirFile.exists() && !trashDirFile.mkdirs()) {
LOG.error("Failed to create trash directory " + trashDirectory);
return false;
}
if (LOG.isDebugEnabled()) {
LOG.debug("Moving files " + blockFile.getName() + " and " +
metaFile.getName() + " to trash.");
}
File newBlockFile = new File(trashDirectory, blockFile.getName());
File newMetaFile = new File(trashDirectory, metaFile.getName());
return (blockFile.renameTo(newBlockFile) &&
metaFile.renameTo(newMetaFile));
}
@Override
public void run() {
final long blockLength = blockFile.length();
final long metaLength = metaFile.length();
boolean result;
result = (trashDirectory == null) ? deleteFiles() : moveFiles();
if (!result) {
LOG.warn("Unexpected error trying to "
+ (trashDirectory == null ? "delete" : "move")
+ " block " + block.getBlockPoolId() + " " + block.getLocalBlock()
+ " at file " + blockFile + ". Ignored.");
} else {
if(block.getLocalBlock().getNumBytes() != BlockCommand.NO_ACK){
datanode.notifyNamenodeDeletedBlock(block, volume.getStorageID());
}
volume.onBlockFileDeletion(block.getBlockPoolId(), blockLength);
volume.onMetaFileDeletion(block.getBlockPoolId(), metaLength);
LOG.info("Deleted " + block.getBlockPoolId() + " "
+ block.getLocalBlock() + " file " + blockFile);
}
updateDeletedBlockId(block);
IOUtils.cleanup(null, volumeRef);
}
}
private synchronized void updateDeletedBlockId(ExtendedBlock block) {
Set<Long> blockIds = deletedBlockIds.get(block.getBlockPoolId());
if (blockIds == null) {
blockIds = new HashSet<Long>();
deletedBlockIds.put(block.getBlockPoolId(), blockIds);
}
blockIds.add(block.getBlockId());
numDeletedBlocks++;
if (numDeletedBlocks == MAX_DELETED_BLOCKS) {
for (Entry<String, Set<Long>> e : deletedBlockIds.entrySet()) {
String bpid = e.getKey();
Set<Long> bs = e.getValue();
fsdatasetImpl.removeDeletedBlocks(bpid, bs);
bs.clear();
}
numDeletedBlocks = 0;
}
}
}
| 11,609 | 34.944272 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
import java.io.BufferedInputStream;
import java.io.DataInputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStreamWriter;
import java.io.RandomAccessFile;
import java.io.Writer;
import java.util.Iterator;
import java.util.Scanner;
import org.apache.commons.io.FileUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.DU;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica;
import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
import org.apache.hadoop.hdfs.server.datanode.DataStorage;
import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil;
import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica;
import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
import org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten;
import org.apache.hadoop.hdfs.server.datanode.ReplicaWaitingToBeRecovered;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.nativeio.NativeIO;
import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.DiskChecker;
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
import org.apache.hadoop.util.ShutdownHookManager;
import org.apache.hadoop.util.Time;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.io.Files;
/**
* A block pool slice represents a portion of a block pool stored on a volume.
* Taken together, all BlockPoolSlices sharing a block pool ID across a
* cluster represent a single block pool.
*
* This class is synchronized by {@link FsVolumeImpl}.
*/
class BlockPoolSlice {
static final Log LOG = LogFactory.getLog(BlockPoolSlice.class);
private final String bpid;
private final FsVolumeImpl volume; // volume to which this BlockPool belongs to
private final File currentDir; // StorageDirectory/current/bpid/current
// directory where finalized replicas are stored
private final File finalizedDir;
private final File lazypersistDir;
private final File rbwDir; // directory store RBW replica
private final File tmpDir; // directory store Temporary replica
private final int ioFileBufferSize;
private static final String DU_CACHE_FILE = "dfsUsed";
private volatile boolean dfsUsedSaved = false;
private static final int SHUTDOWN_HOOK_PRIORITY = 30;
private final boolean deleteDuplicateReplicas;
private static final String REPLICA_CACHE_FILE = "replicas";
private final long replicaCacheExpiry = 5*60*1000;
// TODO:FEDERATION scalability issue - a thread per DU is needed
private final DU dfsUsage;
/**
* Create a blook pool slice
* @param bpid Block pool Id
* @param volume {@link FsVolumeImpl} to which this BlockPool belongs to
* @param bpDir directory corresponding to the BlockPool
* @param conf configuration
* @throws IOException
*/
BlockPoolSlice(String bpid, FsVolumeImpl volume, File bpDir,
Configuration conf) throws IOException {
this.bpid = bpid;
this.volume = volume;
this.currentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT);
this.finalizedDir = new File(
currentDir, DataStorage.STORAGE_DIR_FINALIZED);
this.lazypersistDir = new File(currentDir, DataStorage.STORAGE_DIR_LAZY_PERSIST);
if (!this.finalizedDir.exists()) {
if (!this.finalizedDir.mkdirs()) {
throw new IOException("Failed to mkdirs " + this.finalizedDir);
}
}
this.ioFileBufferSize = DFSUtil.getIoFileBufferSize(conf);
this.deleteDuplicateReplicas = conf.getBoolean(
DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION,
DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION_DEFAULT);
// Files that were being written when the datanode was last shutdown
// are now moved back to the data directory. It is possible that
// in the future, we might want to do some sort of datanode-local
// recovery for these blocks. For example, crc validation.
//
this.tmpDir = new File(bpDir, DataStorage.STORAGE_DIR_TMP);
if (tmpDir.exists()) {
FileUtil.fullyDelete(tmpDir);
}
this.rbwDir = new File(currentDir, DataStorage.STORAGE_DIR_RBW);
final boolean supportAppends = conf.getBoolean(
DFSConfigKeys.DFS_SUPPORT_APPEND_KEY,
DFSConfigKeys.DFS_SUPPORT_APPEND_DEFAULT);
if (rbwDir.exists() && !supportAppends) {
FileUtil.fullyDelete(rbwDir);
}
if (!rbwDir.mkdirs()) { // create rbw directory if not exist
if (!rbwDir.isDirectory()) {
throw new IOException("Mkdirs failed to create " + rbwDir.toString());
}
}
if (!tmpDir.mkdirs()) {
if (!tmpDir.isDirectory()) {
throw new IOException("Mkdirs failed to create " + tmpDir.toString());
}
}
// Use cached value initially if available. Or the following call will
// block until the initial du command completes.
this.dfsUsage = new DU(bpDir, conf, loadDfsUsed());
this.dfsUsage.start();
// Make the dfs usage to be saved during shutdown.
ShutdownHookManager.get().addShutdownHook(
new Runnable() {
@Override
public void run() {
if (!dfsUsedSaved) {
saveDfsUsed();
}
}
}, SHUTDOWN_HOOK_PRIORITY);
}
File getDirectory() {
return currentDir.getParentFile();
}
File getFinalizedDir() {
return finalizedDir;
}
File getLazypersistDir() {
return lazypersistDir;
}
File getRbwDir() {
return rbwDir;
}
File getTmpDir() {
return tmpDir;
}
/** Run DU on local drives. It must be synchronized from caller. */
void decDfsUsed(long value) {
dfsUsage.decDfsUsed(value);
}
long getDfsUsed() throws IOException {
return dfsUsage.getUsed();
}
void incDfsUsed(long value) {
dfsUsage.incDfsUsed(value);
}
/**
* Read in the cached DU value and return it if it is less than 600 seconds
* old (DU update interval). Slight imprecision of dfsUsed is not critical
* and skipping DU can significantly shorten the startup time.
* If the cached value is not available or too old, -1 is returned.
*/
long loadDfsUsed() {
long cachedDfsUsed;
long mtime;
Scanner sc;
try {
sc = new Scanner(new File(currentDir, DU_CACHE_FILE), "UTF-8");
} catch (FileNotFoundException fnfe) {
return -1;
}
try {
// Get the recorded dfsUsed from the file.
if (sc.hasNextLong()) {
cachedDfsUsed = sc.nextLong();
} else {
return -1;
}
// Get the recorded mtime from the file.
if (sc.hasNextLong()) {
mtime = sc.nextLong();
} else {
return -1;
}
// Return the cached value if mtime is okay.
if (mtime > 0 && (Time.now() - mtime < 600000L)) {
FsDatasetImpl.LOG.info("Cached dfsUsed found for " + currentDir + ": " +
cachedDfsUsed);
return cachedDfsUsed;
}
return -1;
} finally {
sc.close();
}
}
/**
* Write the current dfsUsed to the cache file.
*/
void saveDfsUsed() {
File outFile = new File(currentDir, DU_CACHE_FILE);
if (outFile.exists() && !outFile.delete()) {
FsDatasetImpl.LOG.warn("Failed to delete old dfsUsed file in " +
outFile.getParent());
}
try {
long used = getDfsUsed();
try (Writer out = new OutputStreamWriter(
new FileOutputStream(outFile), "UTF-8")) {
// mtime is written last, so that truncated writes won't be valid.
out.write(Long.toString(used) + " " + Long.toString(Time.now()));
out.flush();
}
} catch (IOException ioe) {
// If write failed, the volume might be bad. Since the cache file is
// not critical, log the error and continue.
FsDatasetImpl.LOG.warn("Failed to write dfsUsed to " + outFile, ioe);
}
}
/**
* Temporary files. They get moved to the finalized block directory when
* the block is finalized.
*/
File createTmpFile(Block b) throws IOException {
File f = new File(tmpDir, b.getBlockName());
return DatanodeUtil.createTmpFile(b, f);
}
/**
* RBW files. They get moved to the finalized block directory when
* the block is finalized.
*/
File createRbwFile(Block b) throws IOException {
File f = new File(rbwDir, b.getBlockName());
return DatanodeUtil.createTmpFile(b, f);
}
File addBlock(Block b, File f) throws IOException {
File blockDir = DatanodeUtil.idToBlockDir(finalizedDir, b.getBlockId());
if (!blockDir.exists()) {
if (!blockDir.mkdirs()) {
throw new IOException("Failed to mkdirs " + blockDir);
}
}
File blockFile = FsDatasetImpl.moveBlockFiles(b, f, blockDir);
File metaFile = FsDatasetUtil.getMetaFile(blockFile, b.getGenerationStamp());
dfsUsage.incDfsUsed(b.getNumBytes()+metaFile.length());
return blockFile;
}
/**
* Move a persisted replica from lazypersist directory to a subdirectory
* under finalized.
*/
File activateSavedReplica(Block b, File metaFile, File blockFile)
throws IOException {
final File blockDir = DatanodeUtil.idToBlockDir(finalizedDir, b.getBlockId());
final File targetBlockFile = new File(blockDir, blockFile.getName());
final File targetMetaFile = new File(blockDir, metaFile.getName());
FileUtils.moveFile(blockFile, targetBlockFile);
FsDatasetImpl.LOG.info("Moved " + blockFile + " to " + targetBlockFile);
FileUtils.moveFile(metaFile, targetMetaFile);
FsDatasetImpl.LOG.info("Moved " + metaFile + " to " + targetMetaFile);
return targetBlockFile;
}
void checkDirs() throws DiskErrorException {
DiskChecker.checkDirs(finalizedDir);
DiskChecker.checkDir(tmpDir);
DiskChecker.checkDir(rbwDir);
}
void getVolumeMap(ReplicaMap volumeMap,
final RamDiskReplicaTracker lazyWriteReplicaMap)
throws IOException {
// Recover lazy persist replicas, they will be added to the volumeMap
// when we scan the finalized directory.
if (lazypersistDir.exists()) {
int numRecovered = moveLazyPersistReplicasToFinalized(lazypersistDir);
FsDatasetImpl.LOG.info(
"Recovered " + numRecovered + " replicas from " + lazypersistDir);
}
boolean success = readReplicasFromCache(volumeMap, lazyWriteReplicaMap);
if (!success) {
// add finalized replicas
addToReplicasMap(volumeMap, finalizedDir, lazyWriteReplicaMap, true);
// add rbw replicas
addToReplicasMap(volumeMap, rbwDir, lazyWriteReplicaMap, false);
}
}
/**
* Recover an unlinked tmp file on datanode restart. If the original block
* does not exist, then the tmp file is renamed to be the
* original file name and the original name is returned; otherwise the tmp
* file is deleted and null is returned.
*/
File recoverTempUnlinkedBlock(File unlinkedTmp) throws IOException {
File blockFile = FsDatasetUtil.getOrigFile(unlinkedTmp);
if (blockFile.exists()) {
// If the original block file still exists, then no recovery is needed.
if (!unlinkedTmp.delete()) {
throw new IOException("Unable to cleanup unlinked tmp file " +
unlinkedTmp);
}
return null;
} else {
if (!unlinkedTmp.renameTo(blockFile)) {
throw new IOException("Unable to rename unlinked tmp file " +
unlinkedTmp);
}
return blockFile;
}
}
/**
* Move replicas in the lazy persist directory to their corresponding locations
* in the finalized directory.
* @return number of replicas recovered.
*/
private int moveLazyPersistReplicasToFinalized(File source)
throws IOException {
File files[] = FileUtil.listFiles(source);
int numRecovered = 0;
for (File file : files) {
if (file.isDirectory()) {
numRecovered += moveLazyPersistReplicasToFinalized(file);
}
if (Block.isMetaFilename(file.getName())) {
File metaFile = file;
File blockFile = Block.metaToBlockFile(metaFile);
long blockId = Block.filename2id(blockFile.getName());
File targetDir = DatanodeUtil.idToBlockDir(finalizedDir, blockId);
if (blockFile.exists()) {
if (!targetDir.exists() && !targetDir.mkdirs()) {
LOG.warn("Failed to mkdirs " + targetDir);
continue;
}
final File targetMetaFile = new File(targetDir, metaFile.getName());
try {
NativeIO.renameTo(metaFile, targetMetaFile);
} catch (IOException e) {
LOG.warn("Failed to move meta file from "
+ metaFile + " to " + targetMetaFile, e);
continue;
}
final File targetBlockFile = new File(targetDir, blockFile.getName());
try {
NativeIO.renameTo(blockFile, targetBlockFile);
} catch (IOException e) {
LOG.warn("Failed to move block file from "
+ blockFile + " to " + targetBlockFile, e);
continue;
}
if (targetBlockFile.exists() && targetMetaFile.exists()) {
++numRecovered;
} else {
// Failure should be rare.
LOG.warn("Failed to move " + blockFile + " to " + targetDir);
}
}
}
}
FileUtil.fullyDelete(source);
return numRecovered;
}
private void addReplicaToReplicasMap(Block block, ReplicaMap volumeMap,
final RamDiskReplicaTracker lazyWriteReplicaMap,boolean isFinalized)
throws IOException {
ReplicaInfo newReplica = null;
long blockId = block.getBlockId();
long genStamp = block.getGenerationStamp();
if (isFinalized) {
newReplica = new FinalizedReplica(blockId,
block.getNumBytes(), genStamp, volume, DatanodeUtil
.idToBlockDir(finalizedDir, blockId));
} else {
File file = new File(rbwDir, block.getBlockName());
boolean loadRwr = true;
File restartMeta = new File(file.getParent() +
File.pathSeparator + "." + file.getName() + ".restart");
Scanner sc = null;
try {
sc = new Scanner(restartMeta, "UTF-8");
// The restart meta file exists
if (sc.hasNextLong() && (sc.nextLong() > Time.now())) {
// It didn't expire. Load the replica as a RBW.
// We don't know the expected block length, so just use 0
// and don't reserve any more space for writes.
newReplica = new ReplicaBeingWritten(blockId,
validateIntegrityAndSetLength(file, genStamp),
genStamp, volume, file.getParentFile(), null, 0);
loadRwr = false;
}
sc.close();
if (!restartMeta.delete()) {
FsDatasetImpl.LOG.warn("Failed to delete restart meta file: " +
restartMeta.getPath());
}
} catch (FileNotFoundException fnfe) {
// nothing to do hereFile dir =
} finally {
if (sc != null) {
sc.close();
}
}
// Restart meta doesn't exist or expired.
if (loadRwr) {
newReplica = new ReplicaWaitingToBeRecovered(blockId,
validateIntegrityAndSetLength(file, genStamp),
genStamp, volume, file.getParentFile());
}
}
ReplicaInfo oldReplica = volumeMap.get(bpid, newReplica.getBlockId());
if (oldReplica == null) {
volumeMap.add(bpid, newReplica);
} else {
// We have multiple replicas of the same block so decide which one
// to keep.
newReplica = resolveDuplicateReplicas(newReplica, oldReplica, volumeMap);
}
// If we are retaining a replica on transient storage make sure
// it is in the lazyWriteReplicaMap so it can be persisted
// eventually.
if (newReplica.getVolume().isTransientStorage()) {
lazyWriteReplicaMap.addReplica(bpid, blockId,
(FsVolumeImpl) newReplica.getVolume(), 0);
} else {
lazyWriteReplicaMap.discardReplica(bpid, blockId, false);
}
}
/**
* Add replicas under the given directory to the volume map
* @param volumeMap the replicas map
* @param dir an input directory
* @param lazyWriteReplicaMap Map of replicas on transient
* storage.
* @param isFinalized true if the directory has finalized replicas;
* false if the directory has rbw replicas
*/
void addToReplicasMap(ReplicaMap volumeMap, File dir,
final RamDiskReplicaTracker lazyWriteReplicaMap,
boolean isFinalized)
throws IOException {
File files[] = FileUtil.listFiles(dir);
for (File file : files) {
if (file.isDirectory()) {
addToReplicasMap(volumeMap, file, lazyWriteReplicaMap, isFinalized);
}
if (isFinalized && FsDatasetUtil.isUnlinkTmpFile(file)) {
file = recoverTempUnlinkedBlock(file);
if (file == null) { // the original block still exists, so we cover it
// in another iteration and can continue here
continue;
}
}
if (!Block.isBlockFilename(file))
continue;
long genStamp = FsDatasetUtil.getGenerationStampFromFile(
files, file);
long blockId = Block.filename2id(file.getName());
Block block = new Block(blockId, file.length(), genStamp);
addReplicaToReplicasMap(block, volumeMap, lazyWriteReplicaMap,
isFinalized);
}
}
/**
* This method is invoked during DN startup when volumes are scanned to
* build up the volumeMap.
*
* Given two replicas, decide which one to keep. The preference is as
* follows:
* 1. Prefer the replica with the higher generation stamp.
* 2. If generation stamps are equal, prefer the replica with the
* larger on-disk length.
* 3. If on-disk length is the same, prefer the replica on persistent
* storage volume.
* 4. All other factors being equal, keep replica1.
*
* The other replica is removed from the volumeMap and is deleted from
* its storage volume.
*
* @param replica1
* @param replica2
* @param volumeMap
* @return the replica that is retained.
* @throws IOException
*/
ReplicaInfo resolveDuplicateReplicas(
final ReplicaInfo replica1, final ReplicaInfo replica2,
final ReplicaMap volumeMap) throws IOException {
if (!deleteDuplicateReplicas) {
// Leave both block replicas in place.
return replica1;
}
final ReplicaInfo replicaToDelete =
selectReplicaToDelete(replica1, replica2);
final ReplicaInfo replicaToKeep =
(replicaToDelete != replica1) ? replica1 : replica2;
// Update volumeMap and delete the replica
volumeMap.add(bpid, replicaToKeep);
if (replicaToDelete != null) {
deleteReplica(replicaToDelete);
}
return replicaToKeep;
}
@VisibleForTesting
static ReplicaInfo selectReplicaToDelete(final ReplicaInfo replica1,
final ReplicaInfo replica2) {
ReplicaInfo replicaToKeep;
ReplicaInfo replicaToDelete;
// it's the same block so don't ever delete it, even if GS or size
// differs. caller should keep the one it just discovered on disk
if (replica1.getBlockFile().equals(replica2.getBlockFile())) {
return null;
}
if (replica1.getGenerationStamp() != replica2.getGenerationStamp()) {
replicaToKeep = replica1.getGenerationStamp() > replica2.getGenerationStamp()
? replica1 : replica2;
} else if (replica1.getNumBytes() != replica2.getNumBytes()) {
replicaToKeep = replica1.getNumBytes() > replica2.getNumBytes() ?
replica1 : replica2;
} else if (replica1.getVolume().isTransientStorage() &&
!replica2.getVolume().isTransientStorage()) {
replicaToKeep = replica2;
} else {
replicaToKeep = replica1;
}
replicaToDelete = (replicaToKeep == replica1) ? replica2 : replica1;
if (LOG.isDebugEnabled()) {
LOG.debug("resolveDuplicateReplicas decide to keep " + replicaToKeep
+ ". Will try to delete " + replicaToDelete);
}
return replicaToDelete;
}
private void deleteReplica(final ReplicaInfo replicaToDelete) {
// Delete the files on disk. Failure here is okay.
final File blockFile = replicaToDelete.getBlockFile();
if (!blockFile.delete()) {
LOG.warn("Failed to delete block file " + blockFile);
}
final File metaFile = replicaToDelete.getMetaFile();
if (!metaFile.delete()) {
LOG.warn("Failed to delete meta file " + metaFile);
}
}
/**
* Find out the number of bytes in the block that match its crc.
*
* This algorithm assumes that data corruption caused by unexpected
* datanode shutdown occurs only in the last crc chunk. So it checks
* only the last chunk.
*
* @param blockFile the block file
* @param genStamp generation stamp of the block
* @return the number of valid bytes
*/
private long validateIntegrityAndSetLength(File blockFile, long genStamp) {
DataInputStream checksumIn = null;
InputStream blockIn = null;
try {
final File metaFile = FsDatasetUtil.getMetaFile(blockFile, genStamp);
long blockFileLen = blockFile.length();
long metaFileLen = metaFile.length();
int crcHeaderLen = DataChecksum.getChecksumHeaderSize();
if (!blockFile.exists() || blockFileLen == 0 ||
!metaFile.exists() || metaFileLen < crcHeaderLen) {
return 0;
}
checksumIn = new DataInputStream(
new BufferedInputStream(new FileInputStream(metaFile),
ioFileBufferSize));
// read and handle the common header here. For now just a version
final DataChecksum checksum = BlockMetadataHeader.readDataChecksum(
checksumIn, metaFile);
int bytesPerChecksum = checksum.getBytesPerChecksum();
int checksumSize = checksum.getChecksumSize();
long numChunks = Math.min(
(blockFileLen + bytesPerChecksum - 1)/bytesPerChecksum,
(metaFileLen - crcHeaderLen)/checksumSize);
if (numChunks == 0) {
return 0;
}
IOUtils.skipFully(checksumIn, (numChunks-1)*checksumSize);
blockIn = new FileInputStream(blockFile);
long lastChunkStartPos = (numChunks-1)*bytesPerChecksum;
IOUtils.skipFully(blockIn, lastChunkStartPos);
int lastChunkSize = (int)Math.min(
bytesPerChecksum, blockFileLen-lastChunkStartPos);
byte[] buf = new byte[lastChunkSize+checksumSize];
checksumIn.readFully(buf, lastChunkSize, checksumSize);
IOUtils.readFully(blockIn, buf, 0, lastChunkSize);
checksum.update(buf, 0, lastChunkSize);
long validFileLength;
if (checksum.compare(buf, lastChunkSize)) { // last chunk matches crc
validFileLength = lastChunkStartPos + lastChunkSize;
} else { // last chunck is corrupt
validFileLength = lastChunkStartPos;
}
// truncate if extra bytes are present without CRC
if (blockFile.length() > validFileLength) {
RandomAccessFile blockRAF = new RandomAccessFile(blockFile, "rw");
try {
// truncate blockFile
blockRAF.setLength(validFileLength);
} finally {
blockRAF.close();
}
}
return validFileLength;
} catch (IOException e) {
FsDatasetImpl.LOG.warn(e);
return 0;
} finally {
IOUtils.closeStream(checksumIn);
IOUtils.closeStream(blockIn);
}
}
@Override
public String toString() {
return currentDir.getAbsolutePath();
}
void shutdown(BlockListAsLongs blocksListToPersist) {
saveReplicas(blocksListToPersist);
saveDfsUsed();
dfsUsedSaved = true;
dfsUsage.shutdown();
}
private boolean readReplicasFromCache(ReplicaMap volumeMap,
final RamDiskReplicaTracker lazyWriteReplicaMap) {
ReplicaMap tmpReplicaMap = new ReplicaMap(this);
File replicaFile = new File(currentDir, REPLICA_CACHE_FILE);
// Check whether the file exists or not.
if (!replicaFile.exists()) {
LOG.info("Replica Cache file: "+ replicaFile.getPath() +
" doesn't exist ");
return false;
}
long fileLastModifiedTime = replicaFile.lastModified();
if (System.currentTimeMillis() > fileLastModifiedTime + replicaCacheExpiry) {
LOG.info("Replica Cache file: " + replicaFile.getPath() +
" has gone stale");
// Just to make findbugs happy
if (!replicaFile.delete()) {
LOG.info("Replica Cache file: " + replicaFile.getPath() +
" cannot be deleted");
}
return false;
}
FileInputStream inputStream = null;
try {
inputStream = new FileInputStream(replicaFile);
BlockListAsLongs blocksList = BlockListAsLongs.readFrom(inputStream);
Iterator<BlockReportReplica> iterator = blocksList.iterator();
while (iterator.hasNext()) {
BlockReportReplica replica = iterator.next();
switch (replica.getState()) {
case FINALIZED:
addReplicaToReplicasMap(replica, tmpReplicaMap, lazyWriteReplicaMap, true);
break;
case RUR:
case RBW:
case RWR:
addReplicaToReplicasMap(replica, tmpReplicaMap, lazyWriteReplicaMap, false);
break;
default:
break;
}
}
inputStream.close();
// Now it is safe to add the replica into volumeMap
// In case of any exception during parsing this cache file, fall back
// to scan all the files on disk.
for (ReplicaInfo info: tmpReplicaMap.replicas(bpid)) {
volumeMap.add(bpid, info);
}
LOG.info("Successfully read replica from cache file : "
+ replicaFile.getPath());
return true;
} catch (Exception e) {
// Any exception we need to revert back to read from disk
// Log the error and return false
LOG.info("Exception occured while reading the replicas cache file: "
+ replicaFile.getPath(), e );
return false;
}
finally {
if (!replicaFile.delete()) {
LOG.info("Failed to delete replica cache file: " +
replicaFile.getPath());
}
// close the inputStream
IOUtils.closeStream(inputStream);
}
}
private void saveReplicas(BlockListAsLongs blocksListToPersist) {
if (blocksListToPersist == null ||
blocksListToPersist.getNumberOfBlocks()== 0) {
return;
}
File tmpFile = new File(currentDir, REPLICA_CACHE_FILE + ".tmp");
if (tmpFile.exists() && !tmpFile.delete()) {
LOG.warn("Failed to delete tmp replicas file in " +
tmpFile.getPath());
return;
}
File replicaCacheFile = new File(currentDir, REPLICA_CACHE_FILE);
if (replicaCacheFile.exists() && !replicaCacheFile.delete()) {
LOG.warn("Failed to delete replicas file in " +
replicaCacheFile.getPath());
return;
}
FileOutputStream out = null;
try {
out = new FileOutputStream(tmpFile);
blocksListToPersist.writeTo(out);
out.close();
// Renaming the tmp file to replicas
Files.move(tmpFile, replicaCacheFile);
} catch (Exception e) {
// If write failed, the volume might be bad. Since the cache file is
// not critical, log the error, delete both the files (tmp and cache)
// and continue.
LOG.warn("Failed to write replicas to cache ", e);
if (replicaCacheFile.exists() && !replicaCacheFile.delete()) {
LOG.warn("Failed to delete replicas file: " +
replicaCacheFile.getPath());
}
} finally {
IOUtils.closeStream(out);
if (tmpFile.exists() && !tmpFile.delete()) {
LOG.warn("Failed to delete tmp file in " +
tmpFile.getPath());
}
}
}
}
| 29,126 | 34.607579 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetFactory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataStorage;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
/**
* A factory for creating {@link FsDatasetImpl} objects.
*/
public class FsDatasetFactory extends FsDatasetSpi.Factory<FsDatasetImpl> {
@Override
public FsDatasetImpl newInstance(DataNode datanode,
DataStorage storage, Configuration conf) throws IOException {
return new FsDatasetImpl(datanode, storage, conf);
}
}
| 1,467 | 38.675676 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetUtil.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
import java.io.File;
import java.io.FilenameFilter;
import java.io.IOException;
import java.util.Arrays;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil;
/** Utility methods. */
@InterfaceAudience.Private
public class FsDatasetUtil {
static boolean isUnlinkTmpFile(File f) {
return f.getName().endsWith(DatanodeUtil.UNLINK_BLOCK_SUFFIX);
}
static File getOrigFile(File unlinkTmpFile) {
final String name = unlinkTmpFile.getName();
if (!name.endsWith(DatanodeUtil.UNLINK_BLOCK_SUFFIX)) {
throw new IllegalArgumentException("unlinkTmpFile=" + unlinkTmpFile
+ " does not end with " + DatanodeUtil.UNLINK_BLOCK_SUFFIX);
}
final int n = name.length() - DatanodeUtil.UNLINK_BLOCK_SUFFIX.length();
return new File(unlinkTmpFile.getParentFile(), name.substring(0, n));
}
static File getMetaFile(File f, long gs) {
return new File(f.getParent(),
DatanodeUtil.getMetaName(f.getName(), gs));
}
/** Find the corresponding meta data file from a given block file */
public static File findMetaFile(final File blockFile) throws IOException {
final String prefix = blockFile.getName() + "_";
final File parent = blockFile.getParentFile();
final File[] matches = parent.listFiles(new FilenameFilter() {
@Override
public boolean accept(File dir, String name) {
return dir.equals(parent) && name.startsWith(prefix)
&& name.endsWith(Block.METADATA_EXTENSION);
}
});
if (matches == null || matches.length == 0) {
throw new IOException("Meta file not found, blockFile=" + blockFile);
}
if (matches.length > 1) {
throw new IOException("Found more than one meta files: "
+ Arrays.asList(matches));
}
return matches[0];
}
/**
* Find the meta-file for the specified block file
* and then return the generation stamp from the name of the meta-file.
*/
static long getGenerationStampFromFile(File[] listdir, File blockFile) {
String blockName = blockFile.getName();
for (int j = 0; j < listdir.length; j++) {
String path = listdir[j].getName();
if (!path.startsWith(blockName)) {
continue;
}
if (blockFile == listdir[j]) {
continue;
}
return Block.getGenerationStamp(listdir[j].getName());
}
FsDatasetImpl.LOG.warn("Block " + blockFile + " does not have a metafile!");
return HdfsConstants.GRANDFATHER_GENERATION_STAMP;
}
/** Find the corresponding meta data file from a given block file */
static long parseGenerationStamp(File blockFile, File metaFile
) throws IOException {
final String metaname = metaFile.getName();
final String gs = metaname.substring(blockFile.getName().length() + 1,
metaname.length() - Block.METADATA_EXTENSION.length());
try {
return Long.parseLong(gs);
} catch(NumberFormatException nfe) {
throw new IOException("Failed to parse generation stamp: blockFile="
+ blockFile + ", metaFile=" + metaFile, nfe);
}
}
}
| 4,079 | 36.777778 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MappableBlock.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
import java.io.BufferedInputStream;
import java.io.Closeable;
import java.io.DataInputStream;
import java.io.FileInputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.MappedByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.channels.FileChannel.MapMode;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.ChecksumException;
import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
import org.apache.hadoop.io.nativeio.NativeIO;
import org.apache.hadoop.util.DataChecksum;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
/**
* Represents an HDFS block that is mmapped by the DataNode.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class MappableBlock implements Closeable {
private MappedByteBuffer mmap;
private final long length;
MappableBlock(MappedByteBuffer mmap, long length) {
this.mmap = mmap;
this.length = length;
assert length > 0;
}
public long getLength() {
return length;
}
/**
* Load the block.
*
* mmap and mlock the block, and then verify its checksum.
*
* @param length The current length of the block.
* @param blockIn The block input stream. Should be positioned at the
* start. The caller must close this.
* @param metaIn The meta file input stream. Should be positioned at
* the start. The caller must close this.
* @param blockFileName The block file name, for logging purposes.
*
* @return The Mappable block.
*/
public static MappableBlock load(long length,
FileInputStream blockIn, FileInputStream metaIn,
String blockFileName) throws IOException {
MappableBlock mappableBlock = null;
MappedByteBuffer mmap = null;
FileChannel blockChannel = null;
try {
blockChannel = blockIn.getChannel();
if (blockChannel == null) {
throw new IOException("Block InputStream has no FileChannel.");
}
mmap = blockChannel.map(MapMode.READ_ONLY, 0, length);
NativeIO.POSIX.getCacheManipulator().mlock(blockFileName, mmap, length);
verifyChecksum(length, metaIn, blockChannel, blockFileName);
mappableBlock = new MappableBlock(mmap, length);
} finally {
IOUtils.closeQuietly(blockChannel);
if (mappableBlock == null) {
if (mmap != null) {
NativeIO.POSIX.munmap(mmap); // unmapping also unlocks
}
}
}
return mappableBlock;
}
/**
* Verifies the block's checksum. This is an I/O intensive operation.
*/
private static void verifyChecksum(long length,
FileInputStream metaIn, FileChannel blockChannel, String blockFileName)
throws IOException, ChecksumException {
// Verify the checksum from the block's meta file
// Get the DataChecksum from the meta file header
BlockMetadataHeader header =
BlockMetadataHeader.readHeader(new DataInputStream(
new BufferedInputStream(metaIn, BlockMetadataHeader
.getHeaderSize())));
FileChannel metaChannel = null;
try {
metaChannel = metaIn.getChannel();
if (metaChannel == null) {
throw new IOException("Block InputStream meta file has no FileChannel.");
}
DataChecksum checksum = header.getChecksum();
final int bytesPerChecksum = checksum.getBytesPerChecksum();
final int checksumSize = checksum.getChecksumSize();
final int numChunks = (8*1024*1024) / bytesPerChecksum;
ByteBuffer blockBuf = ByteBuffer.allocate(numChunks*bytesPerChecksum);
ByteBuffer checksumBuf = ByteBuffer.allocate(numChunks*checksumSize);
// Verify the checksum
int bytesVerified = 0;
while (bytesVerified < length) {
Preconditions.checkState(bytesVerified % bytesPerChecksum == 0,
"Unexpected partial chunk before EOF");
assert bytesVerified % bytesPerChecksum == 0;
int bytesRead = fillBuffer(blockChannel, blockBuf);
if (bytesRead == -1) {
throw new IOException("checksum verification failed: premature EOF");
}
blockBuf.flip();
// Number of read chunks, including partial chunk at end
int chunks = (bytesRead+bytesPerChecksum-1) / bytesPerChecksum;
checksumBuf.limit(chunks*checksumSize);
fillBuffer(metaChannel, checksumBuf);
checksumBuf.flip();
checksum.verifyChunkedSums(blockBuf, checksumBuf, blockFileName,
bytesVerified);
// Success
bytesVerified += bytesRead;
blockBuf.clear();
checksumBuf.clear();
}
} finally {
IOUtils.closeQuietly(metaChannel);
}
}
/**
* Reads bytes into a buffer until EOF or the buffer's limit is reached
*/
private static int fillBuffer(FileChannel channel, ByteBuffer buf)
throws IOException {
int bytesRead = channel.read(buf);
if (bytesRead < 0) {
//EOF
return bytesRead;
}
while (buf.remaining() > 0) {
int n = channel.read(buf);
if (n < 0) {
//EOF
return bytesRead;
}
bytesRead += n;
}
return bytesRead;
}
@Override
public void close() {
if (mmap != null) {
NativeIO.POSIX.munmap(mmap);
mmap = null;
}
}
}
| 6,364 | 33.972527 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_CACHE_REVOCATION_TIMEOUT_MS;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_CACHE_REVOCATION_TIMEOUT_MS_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_CACHE_REVOCATION_POLLING_MS;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_CACHE_REVOCATION_POLLING_MS_DEFAULT;
import com.google.common.base.Preconditions;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map.Entry;
import java.util.concurrent.Executor;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.commons.io.IOUtils;
import org.apache.commons.lang.time.DurationFormatUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.ChecksumException;
import org.apache.hadoop.hdfs.ExtendedBlockId;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil;
import org.apache.hadoop.io.nativeio.NativeIO;
import org.apache.hadoop.util.Time;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Manages caching for an FsDatasetImpl by using the mmap(2) and mlock(2)
* system calls to lock blocks into memory. Block checksums are verified upon
* entry into the cache.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class FsDatasetCache {
/**
* MappableBlocks that we know about.
*/
private static final class Value {
final State state;
final MappableBlock mappableBlock;
Value(MappableBlock mappableBlock, State state) {
this.mappableBlock = mappableBlock;
this.state = state;
}
}
private enum State {
/**
* The MappableBlock is in the process of being cached.
*/
CACHING,
/**
* The MappableBlock was in the process of being cached, but it was
* cancelled. Only the FsDatasetCache#WorkerTask can remove cancelled
* MappableBlock objects.
*/
CACHING_CANCELLED,
/**
* The MappableBlock is in the cache.
*/
CACHED,
/**
* The MappableBlock is in the process of uncaching.
*/
UNCACHING;
/**
* Whether we should advertise this block as cached to the NameNode and
* clients.
*/
public boolean shouldAdvertise() {
return (this == CACHED);
}
}
private static final Logger LOG = LoggerFactory.getLogger(FsDatasetCache
.class);
/**
* Stores MappableBlock objects and the states they're in.
*/
private final HashMap<ExtendedBlockId, Value> mappableBlockMap =
new HashMap<ExtendedBlockId, Value>();
private final AtomicLong numBlocksCached = new AtomicLong(0);
private final FsDatasetImpl dataset;
private final ThreadPoolExecutor uncachingExecutor;
private final ScheduledThreadPoolExecutor deferredUncachingExecutor;
private final long revocationMs;
private final long revocationPollingMs;
/**
* The approximate amount of cache space in use.
*
* This number is an overestimate, counting bytes that will be used only
* if pending caching operations succeed. It does not take into account
* pending uncaching operations.
*
* This overestimate is more useful to the NameNode than an underestimate,
* since we don't want the NameNode to assign us more replicas than
* we can cache, because of the current batch of operations.
*/
private final UsedBytesCount usedBytesCount;
public static class PageRounder {
private final long osPageSize =
NativeIO.POSIX.getCacheManipulator().getOperatingSystemPageSize();
/**
* Round up a number to the operating system page size.
*/
public long roundUp(long count) {
return (count + osPageSize - 1) & (~(osPageSize - 1));
}
/**
* Round down a number to the operating system page size.
*/
public long roundDown(long count) {
return count & (~(osPageSize - 1));
}
}
private class UsedBytesCount {
private final AtomicLong usedBytes = new AtomicLong(0);
private final PageRounder rounder = new PageRounder();
/**
* Try to reserve more bytes.
*
* @param count The number of bytes to add. We will round this
* up to the page size.
*
* @return The new number of usedBytes if we succeeded;
* -1 if we failed.
*/
long reserve(long count) {
count = rounder.roundUp(count);
while (true) {
long cur = usedBytes.get();
long next = cur + count;
if (next > maxBytes) {
return -1;
}
if (usedBytes.compareAndSet(cur, next)) {
return next;
}
}
}
/**
* Release some bytes that we're using.
*
* @param count The number of bytes to release. We will round this
* up to the page size.
*
* @return The new number of usedBytes.
*/
long release(long count) {
count = rounder.roundUp(count);
return usedBytes.addAndGet(-count);
}
/**
* Release some bytes that we're using rounded down to the page size.
*
* @param count The number of bytes to release. We will round this
* down to the page size.
*
* @return The new number of usedBytes.
*/
long releaseRoundDown(long count) {
count = rounder.roundDown(count);
return usedBytes.addAndGet(-count);
}
long get() {
return usedBytes.get();
}
}
/**
* The total cache capacity in bytes.
*/
private final long maxBytes;
/**
* Number of cache commands that could not be completed successfully
*/
final AtomicLong numBlocksFailedToCache = new AtomicLong(0);
/**
* Number of uncache commands that could not be completed successfully
*/
final AtomicLong numBlocksFailedToUncache = new AtomicLong(0);
public FsDatasetCache(FsDatasetImpl dataset) {
this.dataset = dataset;
this.maxBytes = dataset.datanode.getDnConf().getMaxLockedMemory();
ThreadFactory workerFactory = new ThreadFactoryBuilder()
.setDaemon(true)
.setNameFormat("FsDatasetCache-%d-" + dataset.toString())
.build();
this.usedBytesCount = new UsedBytesCount();
this.uncachingExecutor = new ThreadPoolExecutor(
0, 1,
60, TimeUnit.SECONDS,
new LinkedBlockingQueue<Runnable>(),
workerFactory);
this.uncachingExecutor.allowCoreThreadTimeOut(true);
this.deferredUncachingExecutor = new ScheduledThreadPoolExecutor(
1, workerFactory);
this.revocationMs = dataset.datanode.getConf().getLong(
DFS_DATANODE_CACHE_REVOCATION_TIMEOUT_MS,
DFS_DATANODE_CACHE_REVOCATION_TIMEOUT_MS_DEFAULT);
long confRevocationPollingMs = dataset.datanode.getConf().getLong(
DFS_DATANODE_CACHE_REVOCATION_POLLING_MS,
DFS_DATANODE_CACHE_REVOCATION_POLLING_MS_DEFAULT);
long minRevocationPollingMs = revocationMs / 2;
if (minRevocationPollingMs < confRevocationPollingMs) {
throw new RuntimeException("configured value " +
confRevocationPollingMs + "for " +
DFS_DATANODE_CACHE_REVOCATION_POLLING_MS +
" is too high. It must not be more than half of the " +
"value of " + DFS_DATANODE_CACHE_REVOCATION_TIMEOUT_MS +
". Reconfigure this to " + minRevocationPollingMs);
}
this.revocationPollingMs = confRevocationPollingMs;
}
/**
* @return List of cached blocks suitable for translation into a
* {@link BlockListAsLongs} for a cache report.
*/
synchronized List<Long> getCachedBlocks(String bpid) {
List<Long> blocks = new ArrayList<Long>();
for (Iterator<Entry<ExtendedBlockId, Value>> iter =
mappableBlockMap.entrySet().iterator(); iter.hasNext(); ) {
Entry<ExtendedBlockId, Value> entry = iter.next();
if (entry.getKey().getBlockPoolId().equals(bpid)) {
if (entry.getValue().state.shouldAdvertise()) {
blocks.add(entry.getKey().getBlockId());
}
}
}
return blocks;
}
/**
* Attempt to begin caching a block.
*/
synchronized void cacheBlock(long blockId, String bpid,
String blockFileName, long length, long genstamp,
Executor volumeExecutor) {
ExtendedBlockId key = new ExtendedBlockId(blockId, bpid);
Value prevValue = mappableBlockMap.get(key);
if (prevValue != null) {
LOG.debug("Block with id {}, pool {} already exists in the "
+ "FsDatasetCache with state {}", blockId, bpid, prevValue.state
);
numBlocksFailedToCache.incrementAndGet();
return;
}
mappableBlockMap.put(key, new Value(null, State.CACHING));
volumeExecutor.execute(
new CachingTask(key, blockFileName, length, genstamp));
LOG.debug("Initiating caching for Block with id {}, pool {}", blockId,
bpid);
}
synchronized void uncacheBlock(String bpid, long blockId) {
ExtendedBlockId key = new ExtendedBlockId(blockId, bpid);
Value prevValue = mappableBlockMap.get(key);
boolean deferred = false;
if (!dataset.datanode.getShortCircuitRegistry().
processBlockMunlockRequest(key)) {
deferred = true;
}
if (prevValue == null) {
LOG.debug("Block with id {}, pool {} does not need to be uncached, "
+ "because it is not currently in the mappableBlockMap.", blockId,
bpid);
numBlocksFailedToUncache.incrementAndGet();
return;
}
switch (prevValue.state) {
case CACHING:
LOG.debug("Cancelling caching for block with id {}, pool {}.", blockId,
bpid);
mappableBlockMap.put(key,
new Value(prevValue.mappableBlock, State.CACHING_CANCELLED));
break;
case CACHED:
mappableBlockMap.put(key,
new Value(prevValue.mappableBlock, State.UNCACHING));
if (deferred) {
if (LOG.isDebugEnabled()) {
LOG.debug("{} is anchored, and can't be uncached now. Scheduling it " +
"for uncaching in {} ",
key, DurationFormatUtils.formatDurationHMS(revocationPollingMs));
}
deferredUncachingExecutor.schedule(
new UncachingTask(key, revocationMs),
revocationPollingMs, TimeUnit.MILLISECONDS);
} else {
LOG.debug("{} has been scheduled for immediate uncaching.", key);
uncachingExecutor.execute(new UncachingTask(key, 0));
}
break;
default:
LOG.debug("Block with id {}, pool {} does not need to be uncached, "
+ "because it is in state {}.", blockId, bpid, prevValue.state);
numBlocksFailedToUncache.incrementAndGet();
break;
}
}
/**
* Try to reserve more bytes.
*
* @param count The number of bytes to add. We will round this
* up to the page size.
*
* @return The new number of usedBytes if we succeeded;
* -1 if we failed.
*/
long reserve(long count) {
return usedBytesCount.reserve(count);
}
/**
* Release some bytes that we're using.
*
* @param count The number of bytes to release. We will round this
* up to the page size.
*
* @return The new number of usedBytes.
*/
long release(long count) {
return usedBytesCount.release(count);
}
/**
* Release some bytes that we're using rounded down to the page size.
*
* @param count The number of bytes to release. We will round this
* down to the page size.
*
* @return The new number of usedBytes.
*/
long releaseRoundDown(long count) {
return usedBytesCount.releaseRoundDown(count);
}
/**
* Get the OS page size.
*
* @return the OS page size.
*/
long getOsPageSize() {
return usedBytesCount.rounder.osPageSize;
}
/**
* Round up to the OS page size.
*/
long roundUpPageSize(long count) {
return usedBytesCount.rounder.roundUp(count);
}
/**
* Background worker that mmaps, mlocks, and checksums a block
*/
private class CachingTask implements Runnable {
private final ExtendedBlockId key;
private final String blockFileName;
private final long length;
private final long genstamp;
CachingTask(ExtendedBlockId key, String blockFileName, long length, long genstamp) {
this.key = key;
this.blockFileName = blockFileName;
this.length = length;
this.genstamp = genstamp;
}
@Override
public void run() {
boolean success = false;
FileInputStream blockIn = null, metaIn = null;
MappableBlock mappableBlock = null;
ExtendedBlock extBlk = new ExtendedBlock(key.getBlockPoolId(),
key.getBlockId(), length, genstamp);
long newUsedBytes = reserve(length);
boolean reservedBytes = false;
try {
if (newUsedBytes < 0) {
LOG.warn("Failed to cache " + key + ": could not reserve " + length +
" more bytes in the cache: " +
DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY +
" of " + maxBytes + " exceeded.");
return;
}
reservedBytes = true;
try {
blockIn = (FileInputStream)dataset.getBlockInputStream(extBlk, 0);
metaIn = DatanodeUtil.getMetaDataInputStream(extBlk, dataset);
} catch (ClassCastException e) {
LOG.warn("Failed to cache " + key +
": Underlying blocks are not backed by files.", e);
return;
} catch (FileNotFoundException e) {
LOG.info("Failed to cache " + key + ": failed to find backing " +
"files.");
return;
} catch (IOException e) {
LOG.warn("Failed to cache " + key + ": failed to open file", e);
return;
}
try {
mappableBlock = MappableBlock.
load(length, blockIn, metaIn, blockFileName);
} catch (ChecksumException e) {
// Exception message is bogus since this wasn't caused by a file read
LOG.warn("Failed to cache " + key + ": checksum verification failed.");
return;
} catch (IOException e) {
LOG.warn("Failed to cache " + key, e);
return;
}
synchronized (FsDatasetCache.this) {
Value value = mappableBlockMap.get(key);
Preconditions.checkNotNull(value);
Preconditions.checkState(value.state == State.CACHING ||
value.state == State.CACHING_CANCELLED);
if (value.state == State.CACHING_CANCELLED) {
mappableBlockMap.remove(key);
LOG.warn("Caching of " + key + " was cancelled.");
return;
}
mappableBlockMap.put(key, new Value(mappableBlock, State.CACHED));
}
LOG.debug("Successfully cached {}. We are now caching {} bytes in"
+ " total.", key, newUsedBytes);
dataset.datanode.getShortCircuitRegistry().processBlockMlockEvent(key);
numBlocksCached.addAndGet(1);
dataset.datanode.getMetrics().incrBlocksCached(1);
success = true;
} finally {
IOUtils.closeQuietly(blockIn);
IOUtils.closeQuietly(metaIn);
if (!success) {
if (reservedBytes) {
release(length);
}
LOG.debug("Caching of {} was aborted. We are now caching only {} "
+ "bytes in total.", key, usedBytesCount.get());
if (mappableBlock != null) {
mappableBlock.close();
}
numBlocksFailedToCache.incrementAndGet();
synchronized (FsDatasetCache.this) {
mappableBlockMap.remove(key);
}
}
}
}
}
private class UncachingTask implements Runnable {
private final ExtendedBlockId key;
private final long revocationTimeMs;
UncachingTask(ExtendedBlockId key, long revocationDelayMs) {
this.key = key;
if (revocationDelayMs == 0) {
this.revocationTimeMs = 0;
} else {
this.revocationTimeMs = revocationDelayMs + Time.monotonicNow();
}
}
private boolean shouldDefer() {
/* If revocationTimeMs == 0, this is an immediate uncache request.
* No clients were anchored at the time we made the request. */
if (revocationTimeMs == 0) {
return false;
}
/* Let's check if any clients still have this block anchored. */
boolean anchored =
!dataset.datanode.getShortCircuitRegistry().
processBlockMunlockRequest(key);
if (!anchored) {
LOG.debug("Uncaching {} now that it is no longer in use " +
"by any clients.", key);
return false;
}
long delta = revocationTimeMs - Time.monotonicNow();
if (delta < 0) {
LOG.warn("Forcibly uncaching {} after {} " +
"because client(s) {} refused to stop using it.", key,
DurationFormatUtils.formatDurationHMS(revocationTimeMs),
dataset.datanode.getShortCircuitRegistry().getClientNames(key));
return false;
}
LOG.info("Replica {} still can't be uncached because some " +
"clients continue to use it. Will wait for {}", key,
DurationFormatUtils.formatDurationHMS(delta));
return true;
}
@Override
public void run() {
Value value;
if (shouldDefer()) {
deferredUncachingExecutor.schedule(
this, revocationPollingMs, TimeUnit.MILLISECONDS);
return;
}
synchronized (FsDatasetCache.this) {
value = mappableBlockMap.get(key);
}
Preconditions.checkNotNull(value);
Preconditions.checkArgument(value.state == State.UNCACHING);
IOUtils.closeQuietly(value.mappableBlock);
synchronized (FsDatasetCache.this) {
mappableBlockMap.remove(key);
}
long newUsedBytes = release(value.mappableBlock.getLength());
numBlocksCached.addAndGet(-1);
dataset.datanode.getMetrics().incrBlocksUncached(1);
if (revocationTimeMs != 0) {
LOG.debug("Uncaching of {} completed. usedBytes = {}",
key, newUsedBytes);
} else {
LOG.debug("Deferred uncaching of {} completed. usedBytes = {}",
key, newUsedBytes);
}
}
}
// Stats related methods for FSDatasetMBean
/**
* Get the approximate amount of cache space used.
*/
public long getCacheUsed() {
return usedBytesCount.get();
}
/**
* Get the maximum amount of bytes we can cache. This is a constant.
*/
public long getCacheCapacity() {
return maxBytes;
}
public long getNumBlocksFailedToCache() {
return numBlocksFailedToCache.get();
}
public long getNumBlocksFailedToUncache() {
return numBlocksFailedToUncache.get();
}
public long getNumBlocksCached() {
return numBlocksCached.get();
}
public synchronized boolean isCached(String bpid, long blockId) {
ExtendedBlockId block = new ExtendedBlockId(blockId, bpid);
Value val = mappableBlockMap.get(block);
return (val != null) && val.state.shouldAdvertise();
}
}
| 20,713 | 32.248796 | 100 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
import java.io.File;
import java.io.IOException;
import java.nio.channels.ClosedChannelException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import java.util.Set;
import java.util.concurrent.atomic.AtomicReference;
import com.google.common.collect.Lists;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.VolumeChoosingPolicy;
import org.apache.hadoop.hdfs.server.datanode.BlockScanner;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
import org.apache.hadoop.util.Time;
class FsVolumeList {
private final AtomicReference<FsVolumeImpl[]> volumes =
new AtomicReference<>(new FsVolumeImpl[0]);
// Tracks volume failures, sorted by volume path.
private final Map<String, VolumeFailureInfo> volumeFailureInfos =
Collections.synchronizedMap(new TreeMap<String, VolumeFailureInfo>());
private Object checkDirsMutex = new Object();
private final VolumeChoosingPolicy<FsVolumeImpl> blockChooser;
private final BlockScanner blockScanner;
FsVolumeList(List<VolumeFailureInfo> initialVolumeFailureInfos,
BlockScanner blockScanner,
VolumeChoosingPolicy<FsVolumeImpl> blockChooser) {
this.blockChooser = blockChooser;
this.blockScanner = blockScanner;
for (VolumeFailureInfo volumeFailureInfo: initialVolumeFailureInfos) {
volumeFailureInfos.put(volumeFailureInfo.getFailedStorageLocation(),
volumeFailureInfo);
}
}
/**
* Return an immutable list view of all the volumes.
*/
List<FsVolumeImpl> getVolumes() {
return Collections.unmodifiableList(Arrays.asList(volumes.get()));
}
private FsVolumeReference chooseVolume(List<FsVolumeImpl> list, long blockSize)
throws IOException {
while (true) {
FsVolumeImpl volume = blockChooser.chooseVolume(list, blockSize);
try {
return volume.obtainReference();
} catch (ClosedChannelException e) {
FsDatasetImpl.LOG.warn("Chosen a closed volume: " + volume);
// blockChooser.chooseVolume returns DiskOutOfSpaceException when the list
// is empty, indicating that all volumes are closed.
list.remove(volume);
}
}
}
/**
* Get next volume.
*
* @param blockSize free space needed on the volume
* @param storageType the desired {@link StorageType}
* @return next volume to store the block in.
*/
FsVolumeReference getNextVolume(StorageType storageType, long blockSize)
throws IOException {
// Get a snapshot of currently available volumes.
final FsVolumeImpl[] curVolumes = volumes.get();
final List<FsVolumeImpl> list = new ArrayList<>(curVolumes.length);
for(FsVolumeImpl v : curVolumes) {
if (v.getStorageType() == storageType) {
list.add(v);
}
}
return chooseVolume(list, blockSize);
}
/**
* Get next volume.
*
* @param blockSize free space needed on the volume
* @return next volume to store the block in.
*/
FsVolumeReference getNextTransientVolume(long blockSize) throws IOException {
// Get a snapshot of currently available volumes.
final List<FsVolumeImpl> curVolumes = getVolumes();
final List<FsVolumeImpl> list = new ArrayList<>(curVolumes.size());
for(FsVolumeImpl v : curVolumes) {
if (v.isTransientStorage()) {
list.add(v);
}
}
return chooseVolume(list, blockSize);
}
long getDfsUsed() throws IOException {
long dfsUsed = 0L;
for (FsVolumeImpl v : volumes.get()) {
try(FsVolumeReference ref = v.obtainReference()) {
dfsUsed += v.getDfsUsed();
} catch (ClosedChannelException e) {
// ignore.
}
}
return dfsUsed;
}
long getBlockPoolUsed(String bpid) throws IOException {
long dfsUsed = 0L;
for (FsVolumeImpl v : volumes.get()) {
try (FsVolumeReference ref = v.obtainReference()) {
dfsUsed += v.getBlockPoolUsed(bpid);
} catch (ClosedChannelException e) {
// ignore.
}
}
return dfsUsed;
}
long getCapacity() {
long capacity = 0L;
for (FsVolumeImpl v : volumes.get()) {
try (FsVolumeReference ref = v.obtainReference()) {
capacity += v.getCapacity();
} catch (IOException e) {
// ignore.
}
}
return capacity;
}
long getRemaining() throws IOException {
long remaining = 0L;
for (FsVolumeSpi vol : volumes.get()) {
try (FsVolumeReference ref = vol.obtainReference()) {
remaining += vol.getAvailable();
} catch (ClosedChannelException e) {
// ignore
}
}
return remaining;
}
void getAllVolumesMap(final String bpid,
final ReplicaMap volumeMap,
final RamDiskReplicaTracker ramDiskReplicaMap)
throws IOException {
long totalStartTime = Time.monotonicNow();
final List<IOException> exceptions = Collections.synchronizedList(
new ArrayList<IOException>());
List<Thread> replicaAddingThreads = new ArrayList<Thread>();
for (final FsVolumeImpl v : volumes.get()) {
Thread t = new Thread() {
public void run() {
try (FsVolumeReference ref = v.obtainReference()) {
FsDatasetImpl.LOG.info("Adding replicas to map for block pool " +
bpid + " on volume " + v + "...");
long startTime = Time.monotonicNow();
v.getVolumeMap(bpid, volumeMap, ramDiskReplicaMap);
long timeTaken = Time.monotonicNow() - startTime;
FsDatasetImpl.LOG.info("Time to add replicas to map for block pool"
+ " " + bpid + " on volume " + v + ": " + timeTaken + "ms");
} catch (ClosedChannelException e) {
FsDatasetImpl.LOG.info("The volume " + v + " is closed while " +
"addng replicas, ignored.");
} catch (IOException ioe) {
FsDatasetImpl.LOG.info("Caught exception while adding replicas " +
"from " + v + ". Will throw later.", ioe);
exceptions.add(ioe);
}
}
};
replicaAddingThreads.add(t);
t.start();
}
for (Thread t : replicaAddingThreads) {
try {
t.join();
} catch (InterruptedException ie) {
throw new IOException(ie);
}
}
if (!exceptions.isEmpty()) {
throw exceptions.get(0);
}
long totalTimeTaken = Time.monotonicNow() - totalStartTime;
FsDatasetImpl.LOG.info("Total time to add all replicas to map: "
+ totalTimeTaken + "ms");
}
/**
* Calls {@link FsVolumeImpl#checkDirs()} on each volume.
*
* Use checkDirsMutext to allow only one instance of checkDirs() call
*
* @return list of all the failed volumes.
*/
Set<File> checkDirs() {
synchronized(checkDirsMutex) {
Set<File> failedVols = null;
// Make a copy of volumes for performing modification
final List<FsVolumeImpl> volumeList = getVolumes();
for(Iterator<FsVolumeImpl> i = volumeList.iterator(); i.hasNext(); ) {
final FsVolumeImpl fsv = i.next();
try (FsVolumeReference ref = fsv.obtainReference()) {
fsv.checkDirs();
} catch (DiskErrorException e) {
FsDatasetImpl.LOG.warn("Removing failed volume " + fsv + ": ", e);
if (failedVols == null) {
failedVols = new HashSet<>(1);
}
failedVols.add(new File(fsv.getBasePath()).getAbsoluteFile());
addVolumeFailureInfo(fsv);
removeVolume(fsv);
} catch (ClosedChannelException e) {
FsDatasetImpl.LOG.debug("Caught exception when obtaining " +
"reference count on closed volume", e);
} catch (IOException e) {
FsDatasetImpl.LOG.error("Unexpected IOException", e);
}
}
if (failedVols != null && failedVols.size() > 0) {
FsDatasetImpl.LOG.warn("Completed checkDirs. Found " + failedVols.size()
+ " failure volumes.");
}
return failedVols;
}
}
@Override
public String toString() {
return Arrays.toString(volumes.get());
}
/**
* Dynamically add new volumes to the existing volumes that this DN manages.
*
* @param ref a reference to the new FsVolumeImpl instance.
*/
void addVolume(FsVolumeReference ref) {
FsVolumeImpl volume = (FsVolumeImpl) ref.getVolume();
while (true) {
final FsVolumeImpl[] curVolumes = volumes.get();
final List<FsVolumeImpl> volumeList = Lists.newArrayList(curVolumes);
volumeList.add(volume);
if (volumes.compareAndSet(curVolumes,
volumeList.toArray(new FsVolumeImpl[volumeList.size()]))) {
break;
} else {
if (FsDatasetImpl.LOG.isDebugEnabled()) {
FsDatasetImpl.LOG.debug(
"The volume list has been changed concurrently, " +
"retry to remove volume: " + ref.getVolume().getStorageID());
}
}
}
if (blockScanner != null) {
blockScanner.addVolumeScanner(ref);
} else {
// If the volume is not put into a volume scanner, it does not need to
// hold the reference.
IOUtils.cleanup(FsDatasetImpl.LOG, ref);
}
// If the volume is used to replace a failed volume, it needs to reset the
// volume failure info for this volume.
removeVolumeFailureInfo(new File(volume.getBasePath()));
FsDatasetImpl.LOG.info("Added new volume: " +
volume.getStorageID());
}
/**
* Dynamically remove a volume in the list.
* @param target the volume instance to be removed.
*/
private void removeVolume(FsVolumeImpl target) {
while (true) {
final FsVolumeImpl[] curVolumes = volumes.get();
final List<FsVolumeImpl> volumeList = Lists.newArrayList(curVolumes);
if (volumeList.remove(target)) {
if (volumes.compareAndSet(curVolumes,
volumeList.toArray(new FsVolumeImpl[volumeList.size()]))) {
if (blockScanner != null) {
blockScanner.removeVolumeScanner(target);
}
try {
target.closeAndWait();
} catch (IOException e) {
FsDatasetImpl.LOG.warn(
"Error occurs when waiting volume to close: " + target, e);
}
target.shutdown();
FsDatasetImpl.LOG.info("Removed volume: " + target);
break;
} else {
if (FsDatasetImpl.LOG.isDebugEnabled()) {
FsDatasetImpl.LOG.debug(
"The volume list has been changed concurrently, " +
"retry to remove volume: " + target);
}
}
} else {
if (FsDatasetImpl.LOG.isDebugEnabled()) {
FsDatasetImpl.LOG.debug("Volume " + target +
" does not exist or is removed by others.");
}
break;
}
}
}
/**
* Dynamically remove volume in the list.
* @param volume the volume to be removed.
* @param clearFailure set true to remove failure info for this volume.
*/
void removeVolume(File volume, boolean clearFailure) {
// Make a copy of volumes to remove one volume.
final FsVolumeImpl[] curVolumes = volumes.get();
final List<FsVolumeImpl> volumeList = Lists.newArrayList(curVolumes);
for (Iterator<FsVolumeImpl> it = volumeList.iterator(); it.hasNext(); ) {
FsVolumeImpl fsVolume = it.next();
String basePath, targetPath;
basePath = new File(fsVolume.getBasePath()).getAbsolutePath();
targetPath = volume.getAbsolutePath();
if (basePath.equals(targetPath)) {
// Make sure the removed volume is the one in the curVolumes.
removeVolume(fsVolume);
}
}
if (clearFailure) {
removeVolumeFailureInfo(volume);
}
}
VolumeFailureInfo[] getVolumeFailureInfos() {
Collection<VolumeFailureInfo> infos = volumeFailureInfos.values();
return infos.toArray(new VolumeFailureInfo[infos.size()]);
}
void addVolumeFailureInfo(VolumeFailureInfo volumeFailureInfo) {
volumeFailureInfos.put(volumeFailureInfo.getFailedStorageLocation(),
volumeFailureInfo);
}
private void addVolumeFailureInfo(FsVolumeImpl vol) {
addVolumeFailureInfo(new VolumeFailureInfo(
new File(vol.getBasePath()).getAbsolutePath(),
Time.now(),
vol.getCapacity()));
}
private void removeVolumeFailureInfo(File vol) {
volumeFailureInfos.remove(vol.getAbsolutePath());
}
void addBlockPool(final String bpid, final Configuration conf) throws IOException {
long totalStartTime = Time.monotonicNow();
final List<IOException> exceptions = Collections.synchronizedList(
new ArrayList<IOException>());
List<Thread> blockPoolAddingThreads = new ArrayList<Thread>();
for (final FsVolumeImpl v : volumes.get()) {
Thread t = new Thread() {
public void run() {
try (FsVolumeReference ref = v.obtainReference()) {
FsDatasetImpl.LOG.info("Scanning block pool " + bpid +
" on volume " + v + "...");
long startTime = Time.monotonicNow();
v.addBlockPool(bpid, conf);
long timeTaken = Time.monotonicNow() - startTime;
FsDatasetImpl.LOG.info("Time taken to scan block pool " + bpid +
" on " + v + ": " + timeTaken + "ms");
} catch (ClosedChannelException e) {
// ignore.
} catch (IOException ioe) {
FsDatasetImpl.LOG.info("Caught exception while scanning " + v +
". Will throw later.", ioe);
exceptions.add(ioe);
}
}
};
blockPoolAddingThreads.add(t);
t.start();
}
for (Thread t : blockPoolAddingThreads) {
try {
t.join();
} catch (InterruptedException ie) {
throw new IOException(ie);
}
}
if (!exceptions.isEmpty()) {
throw exceptions.get(0);
}
long totalTimeTaken = Time.monotonicNow() - totalStartTime;
FsDatasetImpl.LOG.info("Total time to scan all replicas for block pool " +
bpid + ": " + totalTimeTaken + "ms");
}
void removeBlockPool(String bpid, Map<DatanodeStorage, BlockListAsLongs>
blocksPerVolume) {
for (FsVolumeImpl v : volumes.get()) {
v.shutdownBlockPool(bpid, blocksPerVolume.get(v.toDatanodeStorage()));
}
}
void shutdown() {
for (FsVolumeImpl volume : volumes.get()) {
if(volume != null) {
volume.shutdown();
}
}
}
}
| 15,886 | 33.993392 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskReplicaLruTracker.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
import com.google.common.collect.TreeMultimap;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.util.Time;
import java.io.File;
import java.util.*;
/**
* An implementation of RamDiskReplicaTracker that uses an LRU
* eviction scheme.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class RamDiskReplicaLruTracker extends RamDiskReplicaTracker {
private class RamDiskReplicaLru extends RamDiskReplica {
long lastUsedTime;
private RamDiskReplicaLru(String bpid, long blockId,
FsVolumeImpl ramDiskVolume,
long lockedBytesReserved) {
super(bpid, blockId, ramDiskVolume, lockedBytesReserved);
}
@Override
public int hashCode() {
return super.hashCode();
}
@Override
public boolean equals(Object other) {
return super.equals(other);
}
}
/**
* Map of blockpool ID to <map of blockID to ReplicaInfo>.
*/
Map<String, Map<Long, RamDiskReplicaLru>> replicaMaps;
/**
* Queue of replicas that need to be written to disk.
* Stale entries are GC'd by dequeueNextReplicaToPersist.
*/
Queue<RamDiskReplicaLru> replicasNotPersisted;
/**
* Map of persisted replicas ordered by their last use times.
*/
TreeMultimap<Long, RamDiskReplicaLru> replicasPersisted;
RamDiskReplicaLruTracker() {
replicaMaps = new HashMap<>();
replicasNotPersisted = new LinkedList<>();
replicasPersisted = TreeMultimap.create();
}
@Override
synchronized void addReplica(final String bpid, final long blockId,
final FsVolumeImpl transientVolume,
long lockedBytesReserved) {
Map<Long, RamDiskReplicaLru> map = replicaMaps.get(bpid);
if (map == null) {
map = new HashMap<>();
replicaMaps.put(bpid, map);
}
RamDiskReplicaLru ramDiskReplicaLru =
new RamDiskReplicaLru(bpid, blockId, transientVolume,
lockedBytesReserved);
map.put(blockId, ramDiskReplicaLru);
replicasNotPersisted.add(ramDiskReplicaLru);
}
@Override
synchronized void touch(final String bpid,
final long blockId) {
Map<Long, RamDiskReplicaLru> map = replicaMaps.get(bpid);
RamDiskReplicaLru ramDiskReplicaLru = map.get(blockId);
if (ramDiskReplicaLru == null) {
return;
}
ramDiskReplicaLru.numReads.getAndIncrement();
// Reinsert the replica with its new timestamp.
if (replicasPersisted.remove(ramDiskReplicaLru.lastUsedTime, ramDiskReplicaLru)) {
ramDiskReplicaLru.lastUsedTime = Time.monotonicNow();
replicasPersisted.put(ramDiskReplicaLru.lastUsedTime, ramDiskReplicaLru);
}
}
@Override
synchronized void recordStartLazyPersist(
final String bpid, final long blockId, FsVolumeImpl checkpointVolume) {
Map<Long, RamDiskReplicaLru> map = replicaMaps.get(bpid);
RamDiskReplicaLru ramDiskReplicaLru = map.get(blockId);
ramDiskReplicaLru.setLazyPersistVolume(checkpointVolume);
}
@Override
synchronized void recordEndLazyPersist(
final String bpid, final long blockId, final File[] savedFiles) {
Map<Long, RamDiskReplicaLru> map = replicaMaps.get(bpid);
RamDiskReplicaLru ramDiskReplicaLru = map.get(blockId);
if (ramDiskReplicaLru == null) {
throw new IllegalStateException("Unknown replica bpid=" +
bpid + "; blockId=" + blockId);
}
ramDiskReplicaLru.recordSavedBlockFiles(savedFiles);
if (replicasNotPersisted.peek() == ramDiskReplicaLru) {
// Common case.
replicasNotPersisted.remove();
} else {
// Caller error? Fallback to O(n) removal.
replicasNotPersisted.remove(ramDiskReplicaLru);
}
ramDiskReplicaLru.lastUsedTime = Time.monotonicNow();
replicasPersisted.put(ramDiskReplicaLru.lastUsedTime, ramDiskReplicaLru);
ramDiskReplicaLru.isPersisted = true;
}
@Override
synchronized RamDiskReplicaLru dequeueNextReplicaToPersist() {
while (replicasNotPersisted.size() != 0) {
RamDiskReplicaLru ramDiskReplicaLru = replicasNotPersisted.remove();
Map<Long, RamDiskReplicaLru> replicaMap =
replicaMaps.get(ramDiskReplicaLru.getBlockPoolId());
if (replicaMap != null && replicaMap.get(ramDiskReplicaLru.getBlockId()) != null) {
return ramDiskReplicaLru;
}
// The replica no longer exists, look for the next one.
}
return null;
}
@Override
synchronized void reenqueueReplicaNotPersisted(final RamDiskReplica ramDiskReplicaLru) {
replicasNotPersisted.add((RamDiskReplicaLru) ramDiskReplicaLru);
}
@Override
synchronized int numReplicasNotPersisted() {
return replicasNotPersisted.size();
}
@Override
synchronized RamDiskReplicaLru getNextCandidateForEviction() {
final Iterator<RamDiskReplicaLru> it = replicasPersisted.values().iterator();
while (it.hasNext()) {
final RamDiskReplicaLru ramDiskReplicaLru = it.next();
it.remove();
Map<Long, RamDiskReplicaLru> replicaMap =
replicaMaps.get(ramDiskReplicaLru.getBlockPoolId());
if (replicaMap != null && replicaMap.get(ramDiskReplicaLru.getBlockId()) != null) {
return ramDiskReplicaLru;
}
// The replica no longer exists, look for the next one.
}
return null;
}
/**
* Discard any state we are tracking for the given replica. This could mean
* the block is either deleted from the block space or the replica is no longer
* on transient storage.
*
* @param deleteSavedCopies true if we should delete the saved copies on
* persistent storage. This should be set by the
* caller when the block is no longer needed.
*/
@Override
synchronized void discardReplica(
final String bpid, final long blockId,
boolean deleteSavedCopies) {
Map<Long, RamDiskReplicaLru> map = replicaMaps.get(bpid);
if (map == null) {
return;
}
RamDiskReplicaLru ramDiskReplicaLru = map.get(blockId);
if (ramDiskReplicaLru == null) {
return;
}
if (deleteSavedCopies) {
ramDiskReplicaLru.deleteSavedFiles();
}
map.remove(blockId);
replicasPersisted.remove(ramDiskReplicaLru.lastUsedTime, ramDiskReplicaLru);
// replicasNotPersisted will be lazily GC'ed.
}
@Override
synchronized RamDiskReplica getReplica(
final String bpid, final long blockId) {
Map<Long, RamDiskReplicaLru> map = replicaMaps.get(bpid);
if (map == null) {
return null;
}
return map.get(blockId);
}
}
| 7,585 | 30.608333 | 90 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
import java.io.BufferedOutputStream;
import java.io.DataOutputStream;
import java.io.EOFException;
import java.io.File;
import java.io.FileDescriptor;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.RandomAccessFile;
import java.nio.ByteBuffer;
import java.nio.channels.ClosedChannelException;
import java.nio.channels.FileChannel;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.*;
import java.util.concurrent.Executor;
import javax.management.NotCompliantMBeanException;
import javax.management.ObjectName;
import javax.management.StandardMBean;
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.ExtendedBlockId;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsBlocksMetadata;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
import org.apache.hadoop.hdfs.server.datanode.BlockScanner;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetricHelper;
import org.apache.hadoop.hdfs.server.datanode.DataStorage;
import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil;
import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica;
import org.apache.hadoop.hdfs.server.datanode.Replica;
import org.apache.hadoop.hdfs.server.datanode.ReplicaAlreadyExistsException;
import org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten;
import org.apache.hadoop.hdfs.server.datanode.ReplicaHandler;
import org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline;
import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
import org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException;
import org.apache.hadoop.hdfs.server.datanode.ReplicaUnderRecovery;
import org.apache.hadoop.hdfs.server.datanode.ReplicaWaitingToBeRecovered;
import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
import org.apache.hadoop.hdfs.server.datanode.UnexpectedReplicaStateException;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaInputStreams;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.RoundRobinVolumeChoosingPolicy;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.VolumeChoosingPolicy;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaTracker.RamDiskReplica;
import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean;
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.MultipleIOException;
import org.apache.hadoop.io.nativeio.NativeIO;
import org.apache.hadoop.metrics2.MetricsCollector;
import org.apache.hadoop.metrics2.MetricsSystem;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.metrics2.util.MBeans;
import org.apache.hadoop.util.Daemon;
import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.Time;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
/**************************************************
* FSDataset manages a set of data blocks. Each block
* has a unique name and an extent on disk.
*
***************************************************/
@InterfaceAudience.Private
class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
static final Log LOG = LogFactory.getLog(FsDatasetImpl.class);
private final static boolean isNativeIOAvailable;
static {
isNativeIOAvailable = NativeIO.isAvailable();
if (Path.WINDOWS && !isNativeIOAvailable) {
LOG.warn("Data node cannot fully support concurrent reading"
+ " and writing without native code extensions on Windows.");
}
}
@Override // FsDatasetSpi
public FsVolumeReferences getFsVolumeReferences() {
return new FsVolumeReferences(volumes.getVolumes());
}
@Override
public DatanodeStorage getStorage(final String storageUuid) {
return storageMap.get(storageUuid);
}
@Override // FsDatasetSpi
public StorageReport[] getStorageReports(String bpid)
throws IOException {
List<StorageReport> reports;
synchronized (statsLock) {
List<FsVolumeImpl> curVolumes = volumes.getVolumes();
reports = new ArrayList<>(curVolumes.size());
for (FsVolumeImpl volume : curVolumes) {
try (FsVolumeReference ref = volume.obtainReference()) {
StorageReport sr = new StorageReport(volume.toDatanodeStorage(),
false,
volume.getCapacity(),
volume.getDfsUsed(),
volume.getAvailable(),
volume.getBlockPoolUsed(bpid));
reports.add(sr);
} catch (ClosedChannelException e) {
continue;
}
}
}
return reports.toArray(new StorageReport[reports.size()]);
}
@Override
public synchronized FsVolumeImpl getVolume(final ExtendedBlock b) {
final ReplicaInfo r = volumeMap.get(b.getBlockPoolId(), b.getLocalBlock());
return r != null? (FsVolumeImpl)r.getVolume(): null;
}
@Override // FsDatasetSpi
public synchronized Block getStoredBlock(String bpid, long blkid)
throws IOException {
File blockfile = getFile(bpid, blkid, false);
if (blockfile == null) {
return null;
}
final File metafile = FsDatasetUtil.findMetaFile(blockfile);
final long gs = FsDatasetUtil.parseGenerationStamp(blockfile, metafile);
return new Block(blkid, blockfile.length(), gs);
}
/**
* This should be primarily used for testing.
* @return clone of replica store in datanode memory
*/
ReplicaInfo fetchReplicaInfo(String bpid, long blockId) {
ReplicaInfo r = volumeMap.get(bpid, blockId);
if(r == null)
return null;
switch(r.getState()) {
case FINALIZED:
return new FinalizedReplica((FinalizedReplica)r);
case RBW:
return new ReplicaBeingWritten((ReplicaBeingWritten)r);
case RWR:
return new ReplicaWaitingToBeRecovered((ReplicaWaitingToBeRecovered)r);
case RUR:
return new ReplicaUnderRecovery((ReplicaUnderRecovery)r);
case TEMPORARY:
return new ReplicaInPipeline((ReplicaInPipeline)r);
}
return null;
}
@Override // FsDatasetSpi
public LengthInputStream getMetaDataInputStream(ExtendedBlock b)
throws IOException {
File meta = FsDatasetUtil.getMetaFile(getBlockFile(b), b.getGenerationStamp());
if (meta == null || !meta.exists()) {
return null;
}
if (isNativeIOAvailable) {
return new LengthInputStream(
NativeIO.getShareDeleteFileInputStream(meta),
meta.length());
}
return new LengthInputStream(new FileInputStream(meta), meta.length());
}
final DataNode datanode;
final DataStorage dataStorage;
private final FsVolumeList volumes;
final Map<String, DatanodeStorage> storageMap;
final FsDatasetAsyncDiskService asyncDiskService;
final Daemon lazyWriter;
final FsDatasetCache cacheManager;
private final Configuration conf;
private final int volFailuresTolerated;
private volatile boolean fsRunning;
final ReplicaMap volumeMap;
final Map<String, Set<Long>> deletingBlock;
final RamDiskReplicaTracker ramDiskReplicaTracker;
final RamDiskAsyncLazyPersistService asyncLazyPersistService;
private static final int MAX_BLOCK_EVICTIONS_PER_ITERATION = 3;
private final int smallBufferSize;
// Used for synchronizing access to usage stats
private final Object statsLock = new Object();
final LocalFileSystem localFS;
private boolean blockPinningEnabled;
/**
* An FSDataset has a directory where it loads its data files.
*/
FsDatasetImpl(DataNode datanode, DataStorage storage, Configuration conf
) throws IOException {
this.fsRunning = true;
this.datanode = datanode;
this.dataStorage = storage;
this.conf = conf;
this.smallBufferSize = DFSUtil.getSmallBufferSize(conf);
// The number of volumes required for operation is the total number
// of volumes minus the number of failed volumes we can tolerate.
volFailuresTolerated =
conf.getInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY,
DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_DEFAULT);
String[] dataDirs = conf.getTrimmedStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
Collection<StorageLocation> dataLocations = DataNode.getStorageLocations(conf);
List<VolumeFailureInfo> volumeFailureInfos = getInitialVolumeFailureInfos(
dataLocations, storage);
int volsConfigured = (dataDirs == null) ? 0 : dataDirs.length;
int volsFailed = volumeFailureInfos.size();
if (volFailuresTolerated < 0 || volFailuresTolerated >= volsConfigured) {
throw new DiskErrorException("Invalid value configured for "
+ "dfs.datanode.failed.volumes.tolerated - " + volFailuresTolerated
+ ". Value configured is either less than 0 or >= "
+ "to the number of configured volumes (" + volsConfigured + ").");
}
if (volsFailed > volFailuresTolerated) {
throw new DiskErrorException("Too many failed volumes - "
+ "current valid volumes: " + storage.getNumStorageDirs()
+ ", volumes configured: " + volsConfigured
+ ", volumes failed: " + volsFailed
+ ", volume failures tolerated: " + volFailuresTolerated);
}
storageMap = new ConcurrentHashMap<String, DatanodeStorage>();
volumeMap = new ReplicaMap(this);
ramDiskReplicaTracker = RamDiskReplicaTracker.getInstance(conf, this);
@SuppressWarnings("unchecked")
final VolumeChoosingPolicy<FsVolumeImpl> blockChooserImpl =
ReflectionUtils.newInstance(conf.getClass(
DFSConfigKeys.DFS_DATANODE_FSDATASET_VOLUME_CHOOSING_POLICY_KEY,
RoundRobinVolumeChoosingPolicy.class,
VolumeChoosingPolicy.class), conf);
volumes = new FsVolumeList(volumeFailureInfos, datanode.getBlockScanner(),
blockChooserImpl);
asyncDiskService = new FsDatasetAsyncDiskService(datanode, this);
asyncLazyPersistService = new RamDiskAsyncLazyPersistService(datanode);
deletingBlock = new HashMap<String, Set<Long>>();
for (int idx = 0; idx < storage.getNumStorageDirs(); idx++) {
addVolume(dataLocations, storage.getStorageDir(idx));
}
setupAsyncLazyPersistThreads();
cacheManager = new FsDatasetCache(this);
// Start the lazy writer once we have built the replica maps.
// We need to start the lazy writer even if MaxLockedMemory is set to
// zero because we may have un-persisted replicas in memory from before
// the process restart. To minimize the chances of data loss we'll
// ensure they get written to disk now.
if (ramDiskReplicaTracker.numReplicasNotPersisted() > 0 ||
datanode.getDnConf().getMaxLockedMemory() > 0) {
lazyWriter = new Daemon(new LazyWriter(conf));
lazyWriter.start();
} else {
lazyWriter = null;
}
registerMBean(datanode.getDatanodeUuid());
// Add a Metrics2 Source Interface. This is same
// data as MXBean. We can remove the registerMbean call
// in a release where we can break backward compatibility
MetricsSystem ms = DefaultMetricsSystem.instance();
ms.register("FSDatasetState", "FSDatasetState", this);
localFS = FileSystem.getLocal(conf);
blockPinningEnabled = conf.getBoolean(
DFSConfigKeys.DFS_DATANODE_BLOCK_PINNING_ENABLED,
DFSConfigKeys.DFS_DATANODE_BLOCK_PINNING_ENABLED_DEFAULT);
}
/**
* Gets initial volume failure information for all volumes that failed
* immediately at startup. The method works by determining the set difference
* between all configured storage locations and the actual storage locations in
* use after attempting to put all of them into service.
*
* @return each storage location that has failed
*/
private static List<VolumeFailureInfo> getInitialVolumeFailureInfos(
Collection<StorageLocation> dataLocations, DataStorage storage) {
Set<String> failedLocationSet = Sets.newHashSetWithExpectedSize(
dataLocations.size());
for (StorageLocation sl: dataLocations) {
failedLocationSet.add(sl.getFile().getAbsolutePath());
}
for (Iterator<Storage.StorageDirectory> it = storage.dirIterator();
it.hasNext(); ) {
Storage.StorageDirectory sd = it.next();
failedLocationSet.remove(sd.getRoot().getAbsolutePath());
}
List<VolumeFailureInfo> volumeFailureInfos = Lists.newArrayListWithCapacity(
failedLocationSet.size());
long failureDate = Time.now();
for (String failedStorageLocation: failedLocationSet) {
volumeFailureInfos.add(new VolumeFailureInfo(failedStorageLocation,
failureDate));
}
return volumeFailureInfos;
}
private void addVolume(Collection<StorageLocation> dataLocations,
Storage.StorageDirectory sd) throws IOException {
final File dir = sd.getCurrentDir();
final StorageType storageType =
getStorageTypeFromLocations(dataLocations, sd.getRoot());
// If IOException raises from FsVolumeImpl() or getVolumeMap(), there is
// nothing needed to be rolled back to make various data structures, e.g.,
// storageMap and asyncDiskService, consistent.
FsVolumeImpl fsVolume = new FsVolumeImpl(
this, sd.getStorageUuid(), dir, this.conf, storageType);
FsVolumeReference ref = fsVolume.obtainReference();
ReplicaMap tempVolumeMap = new ReplicaMap(this);
fsVolume.getVolumeMap(tempVolumeMap, ramDiskReplicaTracker);
synchronized (this) {
volumeMap.addAll(tempVolumeMap);
storageMap.put(sd.getStorageUuid(),
new DatanodeStorage(sd.getStorageUuid(),
DatanodeStorage.State.NORMAL,
storageType));
asyncDiskService.addVolume(sd.getCurrentDir());
volumes.addVolume(ref);
}
LOG.info("Added volume - " + dir + ", StorageType: " + storageType);
}
@VisibleForTesting
public FsVolumeImpl createFsVolume(String storageUuid, File currentDir,
StorageType storageType) throws IOException {
return new FsVolumeImpl(this, storageUuid, currentDir, conf, storageType);
}
@Override
public void addVolume(final StorageLocation location,
final List<NamespaceInfo> nsInfos)
throws IOException {
final File dir = location.getFile();
// Prepare volume in DataStorage
final DataStorage.VolumeBuilder builder;
try {
builder = dataStorage.prepareVolume(datanode, location.getFile(), nsInfos);
} catch (IOException e) {
volumes.addVolumeFailureInfo(new VolumeFailureInfo(
location.getFile().getAbsolutePath(), Time.now()));
throw e;
}
final Storage.StorageDirectory sd = builder.getStorageDirectory();
StorageType storageType = location.getStorageType();
final FsVolumeImpl fsVolume =
createFsVolume(sd.getStorageUuid(), sd.getCurrentDir(), storageType);
final ReplicaMap tempVolumeMap = new ReplicaMap(fsVolume);
ArrayList<IOException> exceptions = Lists.newArrayList();
for (final NamespaceInfo nsInfo : nsInfos) {
String bpid = nsInfo.getBlockPoolID();
try {
fsVolume.addBlockPool(bpid, this.conf);
fsVolume.getVolumeMap(bpid, tempVolumeMap, ramDiskReplicaTracker);
} catch (IOException e) {
LOG.warn("Caught exception when adding " + fsVolume +
". Will throw later.", e);
exceptions.add(e);
}
}
if (!exceptions.isEmpty()) {
try {
sd.unlock();
} catch (IOException e) {
exceptions.add(e);
}
throw MultipleIOException.createIOException(exceptions);
}
final FsVolumeReference ref = fsVolume.obtainReference();
setupAsyncLazyPersistThread(fsVolume);
builder.build();
synchronized (this) {
volumeMap.addAll(tempVolumeMap);
storageMap.put(sd.getStorageUuid(),
new DatanodeStorage(sd.getStorageUuid(),
DatanodeStorage.State.NORMAL,
storageType));
asyncDiskService.addVolume(sd.getCurrentDir());
volumes.addVolume(ref);
}
LOG.info("Added volume - " + dir + ", StorageType: " + storageType);
}
/**
* Removes a set of volumes from FsDataset.
* @param volumesToRemove a set of absolute root path of each volume.
* @param clearFailure set true to clear failure information.
*
* DataNode should call this function before calling
* {@link DataStorage#removeVolumes(java.util.Collection)}.
*/
@Override
public synchronized void removeVolumes(
Set<File> volumesToRemove, boolean clearFailure) {
// Make sure that all volumes are absolute path.
for (File vol : volumesToRemove) {
Preconditions.checkArgument(vol.isAbsolute(),
String.format("%s is not absolute path.", vol.getPath()));
}
for (int idx = 0; idx < dataStorage.getNumStorageDirs(); idx++) {
Storage.StorageDirectory sd = dataStorage.getStorageDir(idx);
final File absRoot = sd.getRoot().getAbsoluteFile();
if (volumesToRemove.contains(absRoot)) {
LOG.info("Removing " + absRoot + " from FsDataset.");
// Disable the volume from the service.
asyncDiskService.removeVolume(sd.getCurrentDir());
volumes.removeVolume(absRoot, clearFailure);
// Removed all replica information for the blocks on the volume. Unlike
// updating the volumeMap in addVolume(), this operation does not scan
// disks.
for (String bpid : volumeMap.getBlockPoolList()) {
for (Iterator<ReplicaInfo> it = volumeMap.replicas(bpid).iterator();
it.hasNext(); ) {
ReplicaInfo block = it.next();
final File absBasePath =
new File(block.getVolume().getBasePath()).getAbsoluteFile();
if (absBasePath.equals(absRoot)) {
invalidate(bpid, block);
it.remove();
}
}
}
storageMap.remove(sd.getStorageUuid());
}
}
setupAsyncLazyPersistThreads();
}
private StorageType getStorageTypeFromLocations(
Collection<StorageLocation> dataLocations, File dir) {
for (StorageLocation dataLocation : dataLocations) {
if (dataLocation.getFile().equals(dir)) {
return dataLocation.getStorageType();
}
}
return StorageType.DEFAULT;
}
/**
* Return the total space used by dfs datanode
*/
@Override // FSDatasetMBean
public long getDfsUsed() throws IOException {
synchronized(statsLock) {
return volumes.getDfsUsed();
}
}
/**
* Return the total space used by dfs datanode
*/
@Override // FSDatasetMBean
public long getBlockPoolUsed(String bpid) throws IOException {
synchronized(statsLock) {
return volumes.getBlockPoolUsed(bpid);
}
}
/**
* Return true - if there are still valid volumes on the DataNode.
*/
@Override // FsDatasetSpi
public boolean hasEnoughResource() {
return getNumFailedVolumes() <= volFailuresTolerated;
}
/**
* Return total capacity, used and unused
*/
@Override // FSDatasetMBean
public long getCapacity() {
synchronized(statsLock) {
return volumes.getCapacity();
}
}
/**
* Return how many bytes can still be stored in the FSDataset
*/
@Override // FSDatasetMBean
public long getRemaining() throws IOException {
synchronized(statsLock) {
return volumes.getRemaining();
}
}
/**
* Return the number of failed volumes in the FSDataset.
*/
@Override // FSDatasetMBean
public int getNumFailedVolumes() {
return volumes.getVolumeFailureInfos().length;
}
@Override // FSDatasetMBean
public String[] getFailedStorageLocations() {
VolumeFailureInfo[] infos = volumes.getVolumeFailureInfos();
List<String> failedStorageLocations = Lists.newArrayListWithCapacity(
infos.length);
for (VolumeFailureInfo info: infos) {
failedStorageLocations.add(info.getFailedStorageLocation());
}
return failedStorageLocations.toArray(
new String[failedStorageLocations.size()]);
}
@Override // FSDatasetMBean
public long getLastVolumeFailureDate() {
long lastVolumeFailureDate = 0;
for (VolumeFailureInfo info: volumes.getVolumeFailureInfos()) {
long failureDate = info.getFailureDate();
if (failureDate > lastVolumeFailureDate) {
lastVolumeFailureDate = failureDate;
}
}
return lastVolumeFailureDate;
}
@Override // FSDatasetMBean
public long getEstimatedCapacityLostTotal() {
long estimatedCapacityLostTotal = 0;
for (VolumeFailureInfo info: volumes.getVolumeFailureInfos()) {
estimatedCapacityLostTotal += info.getEstimatedCapacityLost();
}
return estimatedCapacityLostTotal;
}
@Override // FsDatasetSpi
public VolumeFailureSummary getVolumeFailureSummary() {
VolumeFailureInfo[] infos = volumes.getVolumeFailureInfos();
if (infos.length == 0) {
return null;
}
List<String> failedStorageLocations = Lists.newArrayListWithCapacity(
infos.length);
long lastVolumeFailureDate = 0;
long estimatedCapacityLostTotal = 0;
for (VolumeFailureInfo info: infos) {
failedStorageLocations.add(info.getFailedStorageLocation());
long failureDate = info.getFailureDate();
if (failureDate > lastVolumeFailureDate) {
lastVolumeFailureDate = failureDate;
}
estimatedCapacityLostTotal += info.getEstimatedCapacityLost();
}
return new VolumeFailureSummary(
failedStorageLocations.toArray(new String[failedStorageLocations.size()]),
lastVolumeFailureDate, estimatedCapacityLostTotal);
}
@Override // FSDatasetMBean
public long getCacheUsed() {
return cacheManager.getCacheUsed();
}
@Override // FSDatasetMBean
public long getCacheCapacity() {
return cacheManager.getCacheCapacity();
}
@Override // FSDatasetMBean
public long getNumBlocksFailedToCache() {
return cacheManager.getNumBlocksFailedToCache();
}
@Override // FSDatasetMBean
public long getNumBlocksFailedToUncache() {
return cacheManager.getNumBlocksFailedToUncache();
}
/**
* Get metrics from the metrics source
*
* @param collector to contain the resulting metrics snapshot
* @param all if true, return all metrics even if unchanged.
*/
@Override
public void getMetrics(MetricsCollector collector, boolean all) {
try {
DataNodeMetricHelper.getMetrics(collector, this, "FSDatasetState");
} catch (Exception e) {
LOG.warn("Exception thrown while metric collection. Exception : "
+ e.getMessage());
}
}
@Override // FSDatasetMBean
public long getNumBlocksCached() {
return cacheManager.getNumBlocksCached();
}
/**
* Find the block's on-disk length
*/
@Override // FsDatasetSpi
public long getLength(ExtendedBlock b) throws IOException {
return getBlockFile(b).length();
}
/**
* Get File name for a given block.
*/
private File getBlockFile(ExtendedBlock b) throws IOException {
return getBlockFile(b.getBlockPoolId(), b.getBlockId());
}
/**
* Get File name for a given block.
*/
File getBlockFile(String bpid, long blockId) throws IOException {
File f = validateBlockFile(bpid, blockId);
if(f == null) {
throw new IOException("BlockId " + blockId + " is not valid.");
}
return f;
}
/**
* Return the File associated with a block, without first
* checking that it exists. This should be used when the
* next operation is going to open the file for read anyway,
* and thus the exists check is redundant.
*
* @param touch if true then update the last access timestamp of the
* block. Currently used for blocks on transient storage.
*/
private File getBlockFileNoExistsCheck(ExtendedBlock b,
boolean touch)
throws IOException {
final File f;
synchronized(this) {
f = getFile(b.getBlockPoolId(), b.getLocalBlock().getBlockId(), touch);
}
if (f == null) {
throw new IOException("Block " + b + " is not valid");
}
return f;
}
@Override // FsDatasetSpi
public InputStream getBlockInputStream(ExtendedBlock b,
long seekOffset) throws IOException {
File blockFile = getBlockFileNoExistsCheck(b, true);
if (isNativeIOAvailable) {
return NativeIO.getShareDeleteFileInputStream(blockFile, seekOffset);
} else {
try {
return openAndSeek(blockFile, seekOffset);
} catch (FileNotFoundException fnfe) {
throw new IOException("Block " + b + " is not valid. " +
"Expected block file at " + blockFile + " does not exist.");
}
}
}
/**
* Get the meta info of a block stored in volumeMap. To find a block,
* block pool Id, block Id and generation stamp must match.
* @param b extended block
* @return the meta replica information
* @throws ReplicaNotFoundException if no entry is in the map or
* there is a generation stamp mismatch
*/
ReplicaInfo getReplicaInfo(ExtendedBlock b)
throws ReplicaNotFoundException {
ReplicaInfo info = volumeMap.get(b.getBlockPoolId(), b.getLocalBlock());
if (info == null) {
throw new ReplicaNotFoundException(
ReplicaNotFoundException.NON_EXISTENT_REPLICA + b);
}
return info;
}
/**
* Get the meta info of a block stored in volumeMap. Block is looked up
* without matching the generation stamp.
* @param bpid block pool Id
* @param blkid block Id
* @return the meta replica information; null if block was not found
* @throws ReplicaNotFoundException if no entry is in the map or
* there is a generation stamp mismatch
*/
private ReplicaInfo getReplicaInfo(String bpid, long blkid)
throws ReplicaNotFoundException {
ReplicaInfo info = volumeMap.get(bpid, blkid);
if (info == null) {
throw new ReplicaNotFoundException(
ReplicaNotFoundException.NON_EXISTENT_REPLICA + bpid + ":" + blkid);
}
return info;
}
/**
* Returns handles to the block file and its metadata file
*/
@Override // FsDatasetSpi
public synchronized ReplicaInputStreams getTmpInputStreams(ExtendedBlock b,
long blkOffset, long metaOffset) throws IOException {
ReplicaInfo info = getReplicaInfo(b);
FsVolumeReference ref = info.getVolume().obtainReference();
try {
InputStream blockInStream = openAndSeek(info.getBlockFile(), blkOffset);
try {
InputStream metaInStream = openAndSeek(info.getMetaFile(), metaOffset);
return new ReplicaInputStreams(blockInStream, metaInStream, ref);
} catch (IOException e) {
IOUtils.cleanup(null, blockInStream);
throw e;
}
} catch (IOException e) {
IOUtils.cleanup(null, ref);
throw e;
}
}
private static FileInputStream openAndSeek(File file, long offset)
throws IOException {
RandomAccessFile raf = null;
try {
raf = new RandomAccessFile(file, "r");
if (offset > 0) {
raf.seek(offset);
}
return new FileInputStream(raf.getFD());
} catch(IOException ioe) {
IOUtils.cleanup(null, raf);
throw ioe;
}
}
static File moveBlockFiles(Block b, File srcfile, File destdir)
throws IOException {
final File dstfile = new File(destdir, b.getBlockName());
final File srcmeta = FsDatasetUtil.getMetaFile(srcfile, b.getGenerationStamp());
final File dstmeta = FsDatasetUtil.getMetaFile(dstfile, b.getGenerationStamp());
try {
NativeIO.renameTo(srcmeta, dstmeta);
} catch (IOException e) {
throw new IOException("Failed to move meta file for " + b
+ " from " + srcmeta + " to " + dstmeta, e);
}
try {
NativeIO.renameTo(srcfile, dstfile);
} catch (IOException e) {
throw new IOException("Failed to move block file for " + b
+ " from " + srcfile + " to " + dstfile.getAbsolutePath(), e);
}
if (LOG.isDebugEnabled()) {
LOG.debug("addFinalizedBlock: Moved " + srcmeta + " to " + dstmeta
+ " and " + srcfile + " to " + dstfile);
}
return dstfile;
}
/**
* Copy the block and meta files for the given block to the given destination.
* @return the new meta and block files.
* @throws IOException
*/
static File[] copyBlockFiles(long blockId, long genStamp, File srcMeta,
File srcFile, File destRoot, boolean calculateChecksum,
int smallBufferSize) throws IOException {
final File destDir = DatanodeUtil.idToBlockDir(destRoot, blockId);
final File dstFile = new File(destDir, srcFile.getName());
final File dstMeta = FsDatasetUtil.getMetaFile(dstFile, genStamp);
return copyBlockFiles(srcMeta, srcFile, dstMeta, dstFile, calculateChecksum,
smallBufferSize);
}
static File[] copyBlockFiles(File srcMeta, File srcFile, File dstMeta,
File dstFile, boolean calculateChecksum,
int smallBufferSize)
throws IOException {
if (calculateChecksum) {
computeChecksum(srcMeta, dstMeta, srcFile, smallBufferSize);
} else {
try {
Storage.nativeCopyFileUnbuffered(srcMeta, dstMeta, true);
} catch (IOException e) {
throw new IOException("Failed to copy " + srcMeta + " to " + dstMeta, e);
}
}
try {
Storage.nativeCopyFileUnbuffered(srcFile, dstFile, true);
} catch (IOException e) {
throw new IOException("Failed to copy " + srcFile + " to " + dstFile, e);
}
if (LOG.isDebugEnabled()) {
if (calculateChecksum) {
LOG.debug("Copied " + srcMeta + " to " + dstMeta
+ " and calculated checksum");
} else {
LOG.debug("Copied " + srcFile + " to " + dstFile);
}
}
return new File[] {dstMeta, dstFile};
}
/**
* Move block files from one storage to another storage.
* @return Returns the Old replicaInfo
* @throws IOException
*/
@Override
public ReplicaInfo moveBlockAcrossStorage(ExtendedBlock block,
StorageType targetStorageType) throws IOException {
ReplicaInfo replicaInfo = getReplicaInfo(block);
if (replicaInfo.getState() != ReplicaState.FINALIZED) {
throw new ReplicaNotFoundException(
ReplicaNotFoundException.UNFINALIZED_REPLICA + block);
}
if (replicaInfo.getNumBytes() != block.getNumBytes()) {
throw new IOException("Corrupted replica " + replicaInfo
+ " with a length of " + replicaInfo.getNumBytes()
+ " expected length is " + block.getNumBytes());
}
if (replicaInfo.getVolume().getStorageType() == targetStorageType) {
throw new ReplicaAlreadyExistsException("Replica " + replicaInfo
+ " already exists on storage " + targetStorageType);
}
if (replicaInfo.isOnTransientStorage()) {
// Block movement from RAM_DISK will be done by LazyPersist mechanism
throw new IOException("Replica " + replicaInfo
+ " cannot be moved from storageType : "
+ replicaInfo.getVolume().getStorageType());
}
try (FsVolumeReference volumeRef = volumes.getNextVolume(
targetStorageType, block.getNumBytes())) {
File oldBlockFile = replicaInfo.getBlockFile();
File oldMetaFile = replicaInfo.getMetaFile();
FsVolumeImpl targetVolume = (FsVolumeImpl) volumeRef.getVolume();
// Copy files to temp dir first
File[] blockFiles = copyBlockFiles(block.getBlockId(),
block.getGenerationStamp(), oldMetaFile, oldBlockFile,
targetVolume.getTmpDir(block.getBlockPoolId()),
replicaInfo.isOnTransientStorage(), smallBufferSize);
ReplicaInfo newReplicaInfo = new ReplicaInPipeline(
replicaInfo.getBlockId(), replicaInfo.getGenerationStamp(),
targetVolume, blockFiles[0].getParentFile(), 0);
newReplicaInfo.setNumBytes(blockFiles[1].length());
// Finalize the copied files
newReplicaInfo = finalizeReplica(block.getBlockPoolId(), newReplicaInfo);
removeOldReplica(replicaInfo, newReplicaInfo, oldBlockFile, oldMetaFile,
oldBlockFile.length(), oldMetaFile.length(), block.getBlockPoolId());
}
// Replace the old block if any to reschedule the scanning.
return replicaInfo;
}
/**
* Compute and store the checksum for a block file that does not already have
* its checksum computed.
*
* @param srcMeta source meta file, containing only the checksum header, not a
* calculated checksum
* @param dstMeta destination meta file, into which this method will write a
* full computed checksum
* @param blockFile block file for which the checksum will be computed
* @throws IOException
*/
private static void computeChecksum(File srcMeta, File dstMeta,
File blockFile, int smallBufferSize)
throws IOException {
final DataChecksum checksum = BlockMetadataHeader.readDataChecksum(srcMeta);
final byte[] data = new byte[1 << 16];
final byte[] crcs = new byte[checksum.getChecksumSize(data.length)];
DataOutputStream metaOut = null;
try {
File parentFile = dstMeta.getParentFile();
if (parentFile != null) {
if (!parentFile.mkdirs() && !parentFile.isDirectory()) {
throw new IOException("Destination '" + parentFile
+ "' directory cannot be created");
}
}
metaOut = new DataOutputStream(new BufferedOutputStream(
new FileOutputStream(dstMeta), smallBufferSize));
BlockMetadataHeader.writeHeader(metaOut, checksum);
int offset = 0;
try (InputStream dataIn = isNativeIOAvailable ?
NativeIO.getShareDeleteFileInputStream(blockFile) :
new FileInputStream(blockFile)) {
for (int n; (n = dataIn.read(data, offset, data.length - offset)) != -1; ) {
if (n > 0) {
n += offset;
offset = n % checksum.getBytesPerChecksum();
final int length = n - offset;
if (length > 0) {
checksum.calculateChunkedSums(data, 0, length, crcs, 0);
metaOut.write(crcs, 0, checksum.getChecksumSize(length));
System.arraycopy(data, length, data, 0, offset);
}
}
}
}
// calculate and write the last crc
checksum.calculateChunkedSums(data, 0, offset, crcs, 0);
metaOut.write(crcs, 0, 4);
} finally {
IOUtils.cleanup(LOG, metaOut);
}
}
static private void truncateBlock(File blockFile, File metaFile,
long oldlen, long newlen) throws IOException {
LOG.info("truncateBlock: blockFile=" + blockFile
+ ", metaFile=" + metaFile
+ ", oldlen=" + oldlen
+ ", newlen=" + newlen);
if (newlen == oldlen) {
return;
}
if (newlen > oldlen) {
throw new IOException("Cannot truncate block to from oldlen (=" + oldlen
+ ") to newlen (=" + newlen + ")");
}
DataChecksum dcs = BlockMetadataHeader.readHeader(metaFile).getChecksum();
int checksumsize = dcs.getChecksumSize();
int bpc = dcs.getBytesPerChecksum();
long n = (newlen - 1)/bpc + 1;
long newmetalen = BlockMetadataHeader.getHeaderSize() + n*checksumsize;
long lastchunkoffset = (n - 1)*bpc;
int lastchunksize = (int)(newlen - lastchunkoffset);
byte[] b = new byte[Math.max(lastchunksize, checksumsize)];
RandomAccessFile blockRAF = new RandomAccessFile(blockFile, "rw");
try {
//truncate blockFile
blockRAF.setLength(newlen);
//read last chunk
blockRAF.seek(lastchunkoffset);
blockRAF.readFully(b, 0, lastchunksize);
} finally {
blockRAF.close();
}
//compute checksum
dcs.update(b, 0, lastchunksize);
dcs.writeValue(b, 0, false);
//update metaFile
RandomAccessFile metaRAF = new RandomAccessFile(metaFile, "rw");
try {
metaRAF.setLength(newmetalen);
metaRAF.seek(newmetalen - checksumsize);
metaRAF.write(b, 0, checksumsize);
} finally {
metaRAF.close();
}
}
@Override // FsDatasetSpi
public synchronized ReplicaHandler append(ExtendedBlock b,
long newGS, long expectedBlockLen) throws IOException {
// If the block was successfully finalized because all packets
// were successfully processed at the Datanode but the ack for
// some of the packets were not received by the client. The client
// re-opens the connection and retries sending those packets.
// The other reason is that an "append" is occurring to this block.
// check the validity of the parameter
if (newGS < b.getGenerationStamp()) {
throw new IOException("The new generation stamp " + newGS +
" should be greater than the replica " + b + "'s generation stamp");
}
ReplicaInfo replicaInfo = getReplicaInfo(b);
LOG.info("Appending to " + replicaInfo);
if (replicaInfo.getState() != ReplicaState.FINALIZED) {
throw new ReplicaNotFoundException(
ReplicaNotFoundException.UNFINALIZED_REPLICA + b);
}
if (replicaInfo.getNumBytes() != expectedBlockLen) {
throw new IOException("Corrupted replica " + replicaInfo +
" with a length of " + replicaInfo.getNumBytes() +
" expected length is " + expectedBlockLen);
}
FsVolumeReference ref = replicaInfo.getVolume().obtainReference();
ReplicaBeingWritten replica = null;
try {
replica = append(b.getBlockPoolId(), (FinalizedReplica)replicaInfo, newGS,
b.getNumBytes());
} catch (IOException e) {
IOUtils.cleanup(null, ref);
throw e;
}
return new ReplicaHandler(replica, ref);
}
/** Append to a finalized replica
* Change a finalized replica to be a RBW replica and
* bump its generation stamp to be the newGS
*
* @param bpid block pool Id
* @param replicaInfo a finalized replica
* @param newGS new generation stamp
* @param estimateBlockLen estimate generation stamp
* @return a RBW replica
* @throws IOException if moving the replica from finalized directory
* to rbw directory fails
*/
private synchronized ReplicaBeingWritten append(String bpid,
FinalizedReplica replicaInfo, long newGS, long estimateBlockLen)
throws IOException {
// If the block is cached, start uncaching it.
cacheManager.uncacheBlock(bpid, replicaInfo.getBlockId());
// unlink the finalized replica
replicaInfo.unlinkBlock(1);
// construct a RBW replica with the new GS
File blkfile = replicaInfo.getBlockFile();
FsVolumeImpl v = (FsVolumeImpl)replicaInfo.getVolume();
if (v.getAvailable() < estimateBlockLen - replicaInfo.getNumBytes()) {
throw new DiskOutOfSpaceException("Insufficient space for appending to "
+ replicaInfo);
}
File newBlkFile = new File(v.getRbwDir(bpid), replicaInfo.getBlockName());
File oldmeta = replicaInfo.getMetaFile();
ReplicaBeingWritten newReplicaInfo = new ReplicaBeingWritten(
replicaInfo.getBlockId(), replicaInfo.getNumBytes(), newGS,
v, newBlkFile.getParentFile(), Thread.currentThread(), estimateBlockLen);
File newmeta = newReplicaInfo.getMetaFile();
// rename meta file to rbw directory
if (LOG.isDebugEnabled()) {
LOG.debug("Renaming " + oldmeta + " to " + newmeta);
}
try {
NativeIO.renameTo(oldmeta, newmeta);
} catch (IOException e) {
throw new IOException("Block " + replicaInfo + " reopen failed. " +
" Unable to move meta file " + oldmeta +
" to rbw dir " + newmeta, e);
}
// rename block file to rbw directory
if (LOG.isDebugEnabled()) {
LOG.debug("Renaming " + blkfile + " to " + newBlkFile
+ ", file length=" + blkfile.length());
}
try {
NativeIO.renameTo(blkfile, newBlkFile);
} catch (IOException e) {
try {
NativeIO.renameTo(newmeta, oldmeta);
} catch (IOException ex) {
LOG.warn("Cannot move meta file " + newmeta +
"back to the finalized directory " + oldmeta, ex);
}
throw new IOException("Block " + replicaInfo + " reopen failed. " +
" Unable to move block file " + blkfile +
" to rbw dir " + newBlkFile, e);
}
// Replace finalized replica by a RBW replica in replicas map
volumeMap.add(bpid, newReplicaInfo);
v.reserveSpaceForRbw(estimateBlockLen - replicaInfo.getNumBytes());
return newReplicaInfo;
}
private ReplicaInfo recoverCheck(ExtendedBlock b, long newGS,
long expectedBlockLen) throws IOException {
ReplicaInfo replicaInfo = getReplicaInfo(b.getBlockPoolId(), b.getBlockId());
// check state
if (replicaInfo.getState() != ReplicaState.FINALIZED &&
replicaInfo.getState() != ReplicaState.RBW) {
throw new ReplicaNotFoundException(
ReplicaNotFoundException.UNFINALIZED_AND_NONRBW_REPLICA + replicaInfo);
}
// check generation stamp
long replicaGenerationStamp = replicaInfo.getGenerationStamp();
if (replicaGenerationStamp < b.getGenerationStamp() ||
replicaGenerationStamp > newGS) {
throw new ReplicaNotFoundException(
ReplicaNotFoundException.UNEXPECTED_GS_REPLICA + replicaGenerationStamp
+ ". Expected GS range is [" + b.getGenerationStamp() + ", " +
newGS + "].");
}
// stop the previous writer before check a replica's length
long replicaLen = replicaInfo.getNumBytes();
if (replicaInfo.getState() == ReplicaState.RBW) {
ReplicaBeingWritten rbw = (ReplicaBeingWritten)replicaInfo;
// kill the previous writer
rbw.stopWriter(datanode.getDnConf().getXceiverStopTimeout());
rbw.setWriter(Thread.currentThread());
// check length: bytesRcvd, bytesOnDisk, and bytesAcked should be the same
if (replicaLen != rbw.getBytesOnDisk()
|| replicaLen != rbw.getBytesAcked()) {
throw new ReplicaAlreadyExistsException("RBW replica " + replicaInfo +
"bytesRcvd(" + rbw.getNumBytes() + "), bytesOnDisk(" +
rbw.getBytesOnDisk() + "), and bytesAcked(" + rbw.getBytesAcked() +
") are not the same.");
}
}
// check block length
if (replicaLen != expectedBlockLen) {
throw new IOException("Corrupted replica " + replicaInfo +
" with a length of " + replicaLen +
" expected length is " + expectedBlockLen);
}
return replicaInfo;
}
@Override // FsDatasetSpi
public synchronized ReplicaHandler recoverAppend(
ExtendedBlock b, long newGS, long expectedBlockLen) throws IOException {
LOG.info("Recover failed append to " + b);
ReplicaInfo replicaInfo = recoverCheck(b, newGS, expectedBlockLen);
FsVolumeReference ref = replicaInfo.getVolume().obtainReference();
ReplicaBeingWritten replica;
try {
// change the replica's state/gs etc.
if (replicaInfo.getState() == ReplicaState.FINALIZED) {
replica = append(b.getBlockPoolId(), (FinalizedReplica) replicaInfo,
newGS, b.getNumBytes());
} else { //RBW
bumpReplicaGS(replicaInfo, newGS);
replica = (ReplicaBeingWritten) replicaInfo;
}
} catch (IOException e) {
IOUtils.cleanup(null, ref);
throw e;
}
return new ReplicaHandler(replica, ref);
}
@Override // FsDatasetSpi
public synchronized String recoverClose(ExtendedBlock b, long newGS,
long expectedBlockLen) throws IOException {
LOG.info("Recover failed close " + b);
// check replica's state
ReplicaInfo replicaInfo = recoverCheck(b, newGS, expectedBlockLen);
// bump the replica's GS
bumpReplicaGS(replicaInfo, newGS);
// finalize the replica if RBW
if (replicaInfo.getState() == ReplicaState.RBW) {
finalizeReplica(b.getBlockPoolId(), replicaInfo);
}
return replicaInfo.getStorageUuid();
}
/**
* Bump a replica's generation stamp to a new one.
* Its on-disk meta file name is renamed to be the new one too.
*
* @param replicaInfo a replica
* @param newGS new generation stamp
* @throws IOException if rename fails
*/
private void bumpReplicaGS(ReplicaInfo replicaInfo,
long newGS) throws IOException {
long oldGS = replicaInfo.getGenerationStamp();
File oldmeta = replicaInfo.getMetaFile();
replicaInfo.setGenerationStamp(newGS);
File newmeta = replicaInfo.getMetaFile();
// rename meta file to new GS
if (LOG.isDebugEnabled()) {
LOG.debug("Renaming " + oldmeta + " to " + newmeta);
}
try {
NativeIO.renameTo(oldmeta, newmeta);
} catch (IOException e) {
replicaInfo.setGenerationStamp(oldGS); // restore old GS
throw new IOException("Block " + replicaInfo + " reopen failed. " +
" Unable to move meta file " + oldmeta +
" to " + newmeta, e);
}
}
@Override // FsDatasetSpi
public synchronized ReplicaHandler createRbw(
StorageType storageType, ExtendedBlock b, boolean allowLazyPersist)
throws IOException {
ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(),
b.getBlockId());
if (replicaInfo != null) {
throw new ReplicaAlreadyExistsException("Block " + b +
" already exists in state " + replicaInfo.getState() +
" and thus cannot be created.");
}
// create a new block
FsVolumeReference ref = null;
// Use ramdisk only if block size is a multiple of OS page size.
// This simplifies reservation for partially used replicas
// significantly.
if (allowLazyPersist &&
lazyWriter != null &&
b.getNumBytes() % cacheManager.getOsPageSize() == 0 &&
reserveLockedMemory(b.getNumBytes())) {
try {
// First try to place the block on a transient volume.
ref = volumes.getNextTransientVolume(b.getNumBytes());
datanode.getMetrics().incrRamDiskBlocksWrite();
} catch(DiskOutOfSpaceException de) {
// Ignore the exception since we just fall back to persistent storage.
} finally {
if (ref == null) {
cacheManager.release(b.getNumBytes());
}
}
}
if (ref == null) {
ref = volumes.getNextVolume(storageType, b.getNumBytes());
}
FsVolumeImpl v = (FsVolumeImpl) ref.getVolume();
// create an rbw file to hold block in the designated volume
if (allowLazyPersist && !v.isTransientStorage()) {
datanode.getMetrics().incrRamDiskBlocksWriteFallback();
}
File f;
try {
f = v.createRbwFile(b.getBlockPoolId(), b.getLocalBlock());
} catch (IOException e) {
IOUtils.cleanup(null, ref);
throw e;
}
ReplicaBeingWritten newReplicaInfo = new ReplicaBeingWritten(b.getBlockId(),
b.getGenerationStamp(), v, f.getParentFile(), b.getNumBytes());
volumeMap.add(b.getBlockPoolId(), newReplicaInfo);
return new ReplicaHandler(newReplicaInfo, ref);
}
@Override // FsDatasetSpi
public synchronized ReplicaHandler recoverRbw(
ExtendedBlock b, long newGS, long minBytesRcvd, long maxBytesRcvd)
throws IOException {
LOG.info("Recover RBW replica " + b);
ReplicaInfo replicaInfo = getReplicaInfo(b.getBlockPoolId(), b.getBlockId());
// check the replica's state
if (replicaInfo.getState() != ReplicaState.RBW) {
throw new ReplicaNotFoundException(
ReplicaNotFoundException.NON_RBW_REPLICA + replicaInfo);
}
ReplicaBeingWritten rbw = (ReplicaBeingWritten)replicaInfo;
LOG.info("Recovering " + rbw);
// Stop the previous writer
rbw.stopWriter(datanode.getDnConf().getXceiverStopTimeout());
rbw.setWriter(Thread.currentThread());
// check generation stamp
long replicaGenerationStamp = rbw.getGenerationStamp();
if (replicaGenerationStamp < b.getGenerationStamp() ||
replicaGenerationStamp > newGS) {
throw new ReplicaNotFoundException(
ReplicaNotFoundException.UNEXPECTED_GS_REPLICA + b +
". Expected GS range is [" + b.getGenerationStamp() + ", " +
newGS + "].");
}
// check replica length
long bytesAcked = rbw.getBytesAcked();
long numBytes = rbw.getNumBytes();
if (bytesAcked < minBytesRcvd || numBytes > maxBytesRcvd){
throw new ReplicaNotFoundException("Unmatched length replica " +
replicaInfo + ": BytesAcked = " + bytesAcked +
" BytesRcvd = " + numBytes + " are not in the range of [" +
minBytesRcvd + ", " + maxBytesRcvd + "].");
}
FsVolumeReference ref = rbw.getVolume().obtainReference();
try {
// Truncate the potentially corrupt portion.
// If the source was client and the last node in the pipeline was lost,
// any corrupt data written after the acked length can go unnoticed.
if (numBytes > bytesAcked) {
final File replicafile = rbw.getBlockFile();
truncateBlock(replicafile, rbw.getMetaFile(), numBytes, bytesAcked);
rbw.setNumBytes(bytesAcked);
rbw.setLastChecksumAndDataLen(bytesAcked, null);
}
// bump the replica's generation stamp to newGS
bumpReplicaGS(rbw, newGS);
} catch (IOException e) {
IOUtils.cleanup(null, ref);
throw e;
}
return new ReplicaHandler(rbw, ref);
}
@Override // FsDatasetSpi
public synchronized ReplicaInPipeline convertTemporaryToRbw(
final ExtendedBlock b) throws IOException {
final long blockId = b.getBlockId();
final long expectedGs = b.getGenerationStamp();
final long visible = b.getNumBytes();
LOG.info("Convert " + b + " from Temporary to RBW, visible length="
+ visible);
final ReplicaInPipeline temp;
{
// get replica
final ReplicaInfo r = volumeMap.get(b.getBlockPoolId(), blockId);
if (r == null) {
throw new ReplicaNotFoundException(
ReplicaNotFoundException.NON_EXISTENT_REPLICA + b);
}
// check the replica's state
if (r.getState() != ReplicaState.TEMPORARY) {
throw new ReplicaAlreadyExistsException(
"r.getState() != ReplicaState.TEMPORARY, r=" + r);
}
temp = (ReplicaInPipeline)r;
}
// check generation stamp
if (temp.getGenerationStamp() != expectedGs) {
throw new ReplicaAlreadyExistsException(
"temp.getGenerationStamp() != expectedGs = " + expectedGs
+ ", temp=" + temp);
}
// TODO: check writer?
// set writer to the current thread
// temp.setWriter(Thread.currentThread());
// check length
final long numBytes = temp.getNumBytes();
if (numBytes < visible) {
throw new IOException(numBytes + " = numBytes < visible = "
+ visible + ", temp=" + temp);
}
// check volume
final FsVolumeImpl v = (FsVolumeImpl)temp.getVolume();
if (v == null) {
throw new IOException("r.getVolume() = null, temp=" + temp);
}
// move block files to the rbw directory
BlockPoolSlice bpslice = v.getBlockPoolSlice(b.getBlockPoolId());
final File dest = moveBlockFiles(b.getLocalBlock(), temp.getBlockFile(),
bpslice.getRbwDir());
// create RBW
final ReplicaBeingWritten rbw = new ReplicaBeingWritten(
blockId, numBytes, expectedGs,
v, dest.getParentFile(), Thread.currentThread(), 0);
rbw.setBytesAcked(visible);
// overwrite the RBW in the volume map
volumeMap.add(b.getBlockPoolId(), rbw);
return rbw;
}
@Override // FsDatasetSpi
public ReplicaHandler createTemporary(
StorageType storageType, ExtendedBlock b) throws IOException {
long startTimeMs = Time.monotonicNow();
long writerStopTimeoutMs = datanode.getDnConf().getXceiverStopTimeout();
ReplicaInfo lastFoundReplicaInfo = null;
do {
synchronized (this) {
ReplicaInfo currentReplicaInfo =
volumeMap.get(b.getBlockPoolId(), b.getBlockId());
if (currentReplicaInfo == lastFoundReplicaInfo) {
if (lastFoundReplicaInfo != null) {
invalidate(b.getBlockPoolId(), new Block[] { lastFoundReplicaInfo });
}
FsVolumeReference ref =
volumes.getNextVolume(storageType, b.getNumBytes());
FsVolumeImpl v = (FsVolumeImpl) ref.getVolume();
// create a temporary file to hold block in the designated volume
File f;
try {
f = v.createTmpFile(b.getBlockPoolId(), b.getLocalBlock());
} catch (IOException e) {
IOUtils.cleanup(null, ref);
throw e;
}
ReplicaInPipeline newReplicaInfo =
new ReplicaInPipeline(b.getBlockId(), b.getGenerationStamp(), v,
f.getParentFile(), 0);
volumeMap.add(b.getBlockPoolId(), newReplicaInfo);
return new ReplicaHandler(newReplicaInfo, ref);
} else {
if (!(currentReplicaInfo.getGenerationStamp() < b
.getGenerationStamp() && currentReplicaInfo instanceof ReplicaInPipeline)) {
throw new ReplicaAlreadyExistsException("Block " + b
+ " already exists in state " + currentReplicaInfo.getState()
+ " and thus cannot be created.");
}
lastFoundReplicaInfo = currentReplicaInfo;
}
}
// Hang too long, just bail out. This is not supposed to happen.
long writerStopMs = Time.monotonicNow() - startTimeMs;
if (writerStopMs > writerStopTimeoutMs) {
LOG.warn("Unable to stop existing writer for block " + b + " after "
+ writerStopMs + " miniseconds.");
throw new IOException("Unable to stop existing writer for block " + b
+ " after " + writerStopMs + " miniseconds.");
}
// Stop the previous writer
((ReplicaInPipeline) lastFoundReplicaInfo)
.stopWriter(writerStopTimeoutMs);
} while (true);
}
/**
* Sets the offset in the meta file so that the
* last checksum will be overwritten.
*/
@Override // FsDatasetSpi
public void adjustCrcChannelPosition(ExtendedBlock b, ReplicaOutputStreams streams,
int checksumSize) throws IOException {
FileOutputStream file = (FileOutputStream)streams.getChecksumOut();
FileChannel channel = file.getChannel();
long oldPos = channel.position();
long newPos = oldPos - checksumSize;
if (LOG.isDebugEnabled()) {
LOG.debug("Changing meta file offset of block " + b + " from " +
oldPos + " to " + newPos);
}
channel.position(newPos);
}
//
// REMIND - mjc - eventually we should have a timeout system
// in place to clean up block files left by abandoned clients.
// We should have some timer in place, so that if a blockfile
// is created but non-valid, and has been idle for >48 hours,
// we can GC it safely.
//
/**
* Complete the block write!
*/
@Override // FsDatasetSpi
public synchronized void finalizeBlock(ExtendedBlock b) throws IOException {
if (Thread.interrupted()) {
// Don't allow data modifications from interrupted threads
throw new IOException("Cannot finalize block from Interrupted Thread");
}
ReplicaInfo replicaInfo = getReplicaInfo(b);
if (replicaInfo.getState() == ReplicaState.FINALIZED) {
// this is legal, when recovery happens on a file that has
// been opened for append but never modified
return;
}
finalizeReplica(b.getBlockPoolId(), replicaInfo);
}
private synchronized FinalizedReplica finalizeReplica(String bpid,
ReplicaInfo replicaInfo) throws IOException {
FinalizedReplica newReplicaInfo = null;
if (replicaInfo.getState() == ReplicaState.RUR &&
((ReplicaUnderRecovery)replicaInfo).getOriginalReplica().getState() ==
ReplicaState.FINALIZED) {
newReplicaInfo = (FinalizedReplica)
((ReplicaUnderRecovery)replicaInfo).getOriginalReplica();
} else {
FsVolumeImpl v = (FsVolumeImpl)replicaInfo.getVolume();
File f = replicaInfo.getBlockFile();
if (v == null) {
throw new IOException("No volume for temporary file " + f +
" for block " + replicaInfo);
}
File dest = v.addFinalizedBlock(
bpid, replicaInfo, f, replicaInfo.getBytesReserved());
newReplicaInfo = new FinalizedReplica(replicaInfo, v, dest.getParentFile());
if (v.isTransientStorage()) {
releaseLockedMemory(
replicaInfo.getOriginalBytesReserved() - replicaInfo.getNumBytes(),
false);
ramDiskReplicaTracker.addReplica(
bpid, replicaInfo.getBlockId(), v, replicaInfo.getNumBytes());
datanode.getMetrics().addRamDiskBytesWrite(replicaInfo.getNumBytes());
}
}
volumeMap.add(bpid, newReplicaInfo);
return newReplicaInfo;
}
/**
* Remove the temporary block file (if any)
*/
@Override // FsDatasetSpi
public synchronized void unfinalizeBlock(ExtendedBlock b) throws IOException {
ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(),
b.getLocalBlock());
if (replicaInfo != null && replicaInfo.getState() == ReplicaState.TEMPORARY) {
// remove from volumeMap
volumeMap.remove(b.getBlockPoolId(), b.getLocalBlock());
// delete the on-disk temp file
if (delBlockFromDisk(replicaInfo.getBlockFile(),
replicaInfo.getMetaFile(), b.getLocalBlock())) {
LOG.warn("Block " + b + " unfinalized and removed. " );
}
if (replicaInfo.getVolume().isTransientStorage()) {
ramDiskReplicaTracker.discardReplica(b.getBlockPoolId(), b.getBlockId(), true);
}
}
}
/**
* Remove a block from disk
* @param blockFile block file
* @param metaFile block meta file
* @param b a block
* @return true if on-disk files are deleted; false otherwise
*/
private boolean delBlockFromDisk(File blockFile, File metaFile, Block b) {
if (blockFile == null) {
LOG.warn("No file exists for block: " + b);
return true;
}
if (!blockFile.delete()) {
LOG.warn("Not able to delete the block file: " + blockFile);
return false;
} else { // remove the meta file
if (metaFile != null && !metaFile.delete()) {
LOG.warn("Not able to delete the meta block file: " + metaFile);
return false;
}
}
return true;
}
@Override
public Map<DatanodeStorage, BlockListAsLongs> getBlockReports(String bpid) {
Map<DatanodeStorage, BlockListAsLongs> blockReportsMap =
new HashMap<DatanodeStorage, BlockListAsLongs>();
Map<String, BlockListAsLongs.Builder> builders =
new HashMap<String, BlockListAsLongs.Builder>();
List<FsVolumeImpl> curVolumes = volumes.getVolumes();
for (FsVolumeSpi v : curVolumes) {
builders.put(v.getStorageID(), BlockListAsLongs.builder());
}
synchronized(this) {
for (ReplicaInfo b : volumeMap.replicas(bpid)) {
switch(b.getState()) {
case FINALIZED:
case RBW:
case RWR:
builders.get(b.getVolume().getStorageID()).add(b);
break;
case RUR:
ReplicaUnderRecovery rur = (ReplicaUnderRecovery)b;
builders.get(rur.getVolume().getStorageID())
.add(rur.getOriginalReplica());
break;
case TEMPORARY:
break;
default:
assert false : "Illegal ReplicaInfo state.";
}
}
}
for (FsVolumeImpl v : curVolumes) {
blockReportsMap.put(v.toDatanodeStorage(),
builders.get(v.getStorageID()).build());
}
return blockReportsMap;
}
@Override // FsDatasetSpi
public List<Long> getCacheReport(String bpid) {
return cacheManager.getCachedBlocks(bpid);
}
/**
* Get the list of finalized blocks from in-memory blockmap for a block pool.
*/
@Override
public synchronized List<FinalizedReplica> getFinalizedBlocks(String bpid) {
ArrayList<FinalizedReplica> finalized =
new ArrayList<FinalizedReplica>(volumeMap.size(bpid));
for (ReplicaInfo b : volumeMap.replicas(bpid)) {
if(b.getState() == ReplicaState.FINALIZED) {
finalized.add(new FinalizedReplica((FinalizedReplica)b));
}
}
return finalized;
}
/**
* Get the list of finalized blocks from in-memory blockmap for a block pool.
*/
@Override
public synchronized List<FinalizedReplica> getFinalizedBlocksOnPersistentStorage(String bpid) {
ArrayList<FinalizedReplica> finalized =
new ArrayList<FinalizedReplica>(volumeMap.size(bpid));
for (ReplicaInfo b : volumeMap.replicas(bpid)) {
if(!b.getVolume().isTransientStorage() &&
b.getState() == ReplicaState.FINALIZED) {
finalized.add(new FinalizedReplica((FinalizedReplica)b));
}
}
return finalized;
}
/**
* Check if a block is valid.
*
* @param b The block to check.
* @param minLength The minimum length that the block must have. May be 0.
* @param state If this is null, it is ignored. If it is non-null, we
* will check that the replica has this state.
*
* @throws ReplicaNotFoundException If the replica is not found
*
* @throws UnexpectedReplicaStateException If the replica is not in the
* expected state.
* @throws FileNotFoundException If the block file is not found or there
* was an error locating it.
* @throws EOFException If the replica length is too short.
*
* @throws IOException May be thrown from the methods called.
*/
public void checkBlock(ExtendedBlock b, long minLength, ReplicaState state)
throws ReplicaNotFoundException, UnexpectedReplicaStateException,
FileNotFoundException, EOFException, IOException {
final ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(),
b.getLocalBlock());
if (replicaInfo == null) {
throw new ReplicaNotFoundException(b);
}
if (replicaInfo.getState() != state) {
throw new UnexpectedReplicaStateException(b,state);
}
if (!replicaInfo.getBlockFile().exists()) {
throw new FileNotFoundException(replicaInfo.getBlockFile().getPath());
}
long onDiskLength = getLength(b);
if (onDiskLength < minLength) {
throw new EOFException(b + "'s on-disk length " + onDiskLength
+ " is shorter than minLength " + minLength);
}
}
/**
* Check whether the given block is a valid one.
* valid means finalized
*/
@Override // FsDatasetSpi
public boolean isValidBlock(ExtendedBlock b) {
return isValid(b, ReplicaState.FINALIZED);
}
/**
* Check whether the given block is a valid RBW.
*/
@Override // {@link FsDatasetSpi}
public boolean isValidRbw(final ExtendedBlock b) {
return isValid(b, ReplicaState.RBW);
}
/** Does the block exist and have the given state? */
private boolean isValid(final ExtendedBlock b, final ReplicaState state) {
try {
checkBlock(b, 0, state);
} catch (IOException e) {
return false;
}
return true;
}
/**
* Find the file corresponding to the block and return it if it exists.
*/
File validateBlockFile(String bpid, long blockId) {
//Should we check for metadata file too?
final File f;
synchronized(this) {
f = getFile(bpid, blockId, false);
}
if(f != null ) {
if(f.exists())
return f;
// if file is not null, but doesn't exist - possibly disk failed
datanode.checkDiskErrorAsync();
}
if (LOG.isDebugEnabled()) {
LOG.debug("blockId=" + blockId + ", f=" + f);
}
return null;
}
/** Check the files of a replica. */
static void checkReplicaFiles(final ReplicaInfo r) throws IOException {
//check replica's file
final File f = r.getBlockFile();
if (!f.exists()) {
throw new FileNotFoundException("File " + f + " not found, r=" + r);
}
if (r.getBytesOnDisk() != f.length()) {
throw new IOException("File length mismatched. The length of "
+ f + " is " + f.length() + " but r=" + r);
}
//check replica's meta file
final File metafile = FsDatasetUtil.getMetaFile(f, r.getGenerationStamp());
if (!metafile.exists()) {
throw new IOException("Metafile " + metafile + " does not exist, r=" + r);
}
if (metafile.length() == 0) {
throw new IOException("Metafile " + metafile + " is empty, r=" + r);
}
}
/**
* We're informed that a block is no longer valid. Delete it.
*/
@Override // FsDatasetSpi
public void invalidate(String bpid, Block invalidBlks[]) throws IOException {
final List<String> errors = new ArrayList<String>();
for (int i = 0; i < invalidBlks.length; i++) {
final File f;
final FsVolumeImpl v;
synchronized (this) {
final ReplicaInfo info = volumeMap.get(bpid, invalidBlks[i]);
if (info == null) {
// It is okay if the block is not found -- it may be deleted earlier.
LOG.info("Failed to delete replica " + invalidBlks[i]
+ ": ReplicaInfo not found.");
continue;
}
if (info.getGenerationStamp() != invalidBlks[i].getGenerationStamp()) {
errors.add("Failed to delete replica " + invalidBlks[i]
+ ": GenerationStamp not matched, info=" + info);
continue;
}
f = info.getBlockFile();
v = (FsVolumeImpl)info.getVolume();
if (v == null) {
errors.add("Failed to delete replica " + invalidBlks[i]
+ ". No volume for this replica, file=" + f);
continue;
}
File parent = f.getParentFile();
if (parent == null) {
errors.add("Failed to delete replica " + invalidBlks[i]
+ ". Parent not found for file " + f);
continue;
}
ReplicaInfo removing = volumeMap.remove(bpid, invalidBlks[i]);
addDeletingBlock(bpid, removing.getBlockId());
if (LOG.isDebugEnabled()) {
LOG.debug("Block file " + removing.getBlockFile().getName()
+ " is to be deleted");
}
}
if (v.isTransientStorage()) {
RamDiskReplica replicaInfo =
ramDiskReplicaTracker.getReplica(bpid, invalidBlks[i].getBlockId());
if (replicaInfo != null) {
if (!replicaInfo.getIsPersisted()) {
datanode.getMetrics().incrRamDiskBlocksDeletedBeforeLazyPersisted();
}
ramDiskReplicaTracker.discardReplica(replicaInfo.getBlockPoolId(),
replicaInfo.getBlockId(), true);
}
}
// If a DFSClient has the replica in its cache of short-circuit file
// descriptors (and the client is using ShortCircuitShm), invalidate it.
datanode.getShortCircuitRegistry().processBlockInvalidation(
new ExtendedBlockId(invalidBlks[i].getBlockId(), bpid));
// If the block is cached, start uncaching it.
cacheManager.uncacheBlock(bpid, invalidBlks[i].getBlockId());
// Delete the block asynchronously to make sure we can do it fast enough.
// It's ok to unlink the block file before the uncache operation
// finishes.
try {
asyncDiskService.deleteAsync(v.obtainReference(), f,
FsDatasetUtil.getMetaFile(f, invalidBlks[i].getGenerationStamp()),
new ExtendedBlock(bpid, invalidBlks[i]),
dataStorage.getTrashDirectoryForBlockFile(bpid, f));
} catch (ClosedChannelException e) {
LOG.warn("Volume " + v + " is closed, ignore the deletion task for " +
"block " + invalidBlks[i]);
}
}
if (!errors.isEmpty()) {
StringBuilder b = new StringBuilder("Failed to delete ")
.append(errors.size()).append(" (out of ").append(invalidBlks.length)
.append(") replica(s):");
for(int i = 0; i < errors.size(); i++) {
b.append("\n").append(i).append(") ").append(errors.get(i));
}
throw new IOException(b.toString());
}
}
/**
* Invalidate a block but does not delete the actual on-disk block file.
*
* It should only be used when deactivating disks.
*
* @param bpid the block pool ID.
* @param block The block to be invalidated.
*/
public void invalidate(String bpid, ReplicaInfo block) {
// If a DFSClient has the replica in its cache of short-circuit file
// descriptors (and the client is using ShortCircuitShm), invalidate it.
// The short-circuit registry is null in the unit tests, because the
// datanode is mock object.
if (datanode.getShortCircuitRegistry() != null) {
datanode.getShortCircuitRegistry().processBlockInvalidation(
new ExtendedBlockId(block.getBlockId(), bpid));
// If the block is cached, start uncaching it.
cacheManager.uncacheBlock(bpid, block.getBlockId());
}
datanode.notifyNamenodeDeletedBlock(new ExtendedBlock(bpid, block),
block.getStorageUuid());
}
/**
* Asynchronously attempts to cache a single block via {@link FsDatasetCache}.
*/
private void cacheBlock(String bpid, long blockId) {
FsVolumeImpl volume;
String blockFileName;
long length, genstamp;
Executor volumeExecutor;
synchronized (this) {
ReplicaInfo info = volumeMap.get(bpid, blockId);
boolean success = false;
try {
if (info == null) {
LOG.warn("Failed to cache block with id " + blockId + ", pool " +
bpid + ": ReplicaInfo not found.");
return;
}
if (info.getState() != ReplicaState.FINALIZED) {
LOG.warn("Failed to cache block with id " + blockId + ", pool " +
bpid + ": replica is not finalized; it is in state " +
info.getState());
return;
}
try {
volume = (FsVolumeImpl)info.getVolume();
if (volume == null) {
LOG.warn("Failed to cache block with id " + blockId + ", pool " +
bpid + ": volume not found.");
return;
}
} catch (ClassCastException e) {
LOG.warn("Failed to cache block with id " + blockId +
": volume was not an instance of FsVolumeImpl.");
return;
}
if (volume.isTransientStorage()) {
LOG.warn("Caching not supported on block with id " + blockId +
" since the volume is backed by RAM.");
return;
}
success = true;
} finally {
if (!success) {
cacheManager.numBlocksFailedToCache.incrementAndGet();
}
}
blockFileName = info.getBlockFile().getAbsolutePath();
length = info.getVisibleLength();
genstamp = info.getGenerationStamp();
volumeExecutor = volume.getCacheExecutor();
}
cacheManager.cacheBlock(blockId, bpid,
blockFileName, length, genstamp, volumeExecutor);
}
@Override // FsDatasetSpi
public void cache(String bpid, long[] blockIds) {
for (int i=0; i < blockIds.length; i++) {
cacheBlock(bpid, blockIds[i]);
}
}
@Override // FsDatasetSpi
public void uncache(String bpid, long[] blockIds) {
for (int i=0; i < blockIds.length; i++) {
cacheManager.uncacheBlock(bpid, blockIds[i]);
}
}
@Override
public boolean isCached(String bpid, long blockId) {
return cacheManager.isCached(bpid, blockId);
}
@Override // FsDatasetSpi
public synchronized boolean contains(final ExtendedBlock block) {
final long blockId = block.getLocalBlock().getBlockId();
return getFile(block.getBlockPoolId(), blockId, false) != null;
}
/**
* Turn the block identifier into a filename
* @param bpid Block pool Id
* @param blockId a block's id
* @return on disk data file path; null if the replica does not exist
*/
File getFile(final String bpid, final long blockId, boolean touch) {
ReplicaInfo info = volumeMap.get(bpid, blockId);
if (info != null) {
if (touch && info.getVolume().isTransientStorage()) {
ramDiskReplicaTracker.touch(bpid, blockId);
datanode.getMetrics().incrRamDiskBlocksReadHits();
}
return info.getBlockFile();
}
return null;
}
/**
* check if a data directory is healthy
*
* if some volumes failed - the caller must emove all the blocks that belong
* to these failed volumes.
* @return the failed volumes. Returns null if no volume failed.
*/
@Override // FsDatasetSpi
public Set<File> checkDataDir() {
return volumes.checkDirs();
}
@Override // FsDatasetSpi
public String toString() {
return "FSDataset{dirpath='"+volumes+"'}";
}
private ObjectName mbeanName;
/**
* Register the FSDataset MBean using the name
* "hadoop:service=DataNode,name=FSDatasetState-<datanodeUuid>"
*/
void registerMBean(final String datanodeUuid) {
// We wrap to bypass standard mbean naming convetion.
// This wraping can be removed in java 6 as it is more flexible in
// package naming for mbeans and their impl.
try {
StandardMBean bean = new StandardMBean(this,FSDatasetMBean.class);
mbeanName = MBeans.register("DataNode", "FSDatasetState-" + datanodeUuid, bean);
} catch (NotCompliantMBeanException e) {
LOG.warn("Error registering FSDatasetState MBean", e);
}
LOG.info("Registered FSDatasetState MBean");
}
@Override // FsDatasetSpi
public void shutdown() {
fsRunning = false;
if (lazyWriter != null) {
((LazyWriter) lazyWriter.getRunnable()).stop();
lazyWriter.interrupt();
}
if (mbeanName != null) {
MBeans.unregister(mbeanName);
}
if (asyncDiskService != null) {
asyncDiskService.shutdown();
}
if (asyncLazyPersistService != null) {
asyncLazyPersistService.shutdown();
}
if(volumes != null) {
volumes.shutdown();
}
if (lazyWriter != null) {
try {
lazyWriter.join();
} catch (InterruptedException ie) {
LOG.warn("FsDatasetImpl.shutdown ignoring InterruptedException " +
"from LazyWriter.join");
}
}
}
@Override // FSDatasetMBean
public String getStorageInfo() {
return toString();
}
/**
* Reconcile the difference between blocks on the disk and blocks in
* volumeMap
*
* Check the given block for inconsistencies. Look at the
* current state of the block and reconcile the differences as follows:
* <ul>
* <li>If the block file is missing, delete the block from volumeMap</li>
* <li>If the block file exists and the block is missing in volumeMap,
* add the block to volumeMap <li>
* <li>If generation stamp does not match, then update the block with right
* generation stamp</li>
* <li>If the block length in memory does not match the actual block file length
* then mark the block as corrupt and update the block length in memory</li>
* <li>If the file in {@link ReplicaInfo} does not match the file on
* the disk, update {@link ReplicaInfo} with the correct file</li>
* </ul>
*
* @param blockId Block that differs
* @param diskFile Block file on the disk
* @param diskMetaFile Metadata file from on the disk
* @param vol Volume of the block file
*/
@Override
public void checkAndUpdate(String bpid, long blockId, File diskFile,
File diskMetaFile, FsVolumeSpi vol) throws IOException {
Block corruptBlock = null;
ReplicaInfo memBlockInfo;
synchronized (this) {
memBlockInfo = volumeMap.get(bpid, blockId);
if (memBlockInfo != null && memBlockInfo.getState() != ReplicaState.FINALIZED) {
// Block is not finalized - ignore the difference
return;
}
final long diskGS = diskMetaFile != null && diskMetaFile.exists() ?
Block.getGenerationStamp(diskMetaFile.getName()) :
HdfsConstants.GRANDFATHER_GENERATION_STAMP;
if (diskFile == null || !diskFile.exists()) {
if (memBlockInfo == null) {
// Block file does not exist and block does not exist in memory
// If metadata file exists then delete it
if (diskMetaFile != null && diskMetaFile.exists()
&& diskMetaFile.delete()) {
LOG.warn("Deleted a metadata file without a block "
+ diskMetaFile.getAbsolutePath());
}
return;
}
if (!memBlockInfo.getBlockFile().exists()) {
// Block is in memory and not on the disk
// Remove the block from volumeMap
volumeMap.remove(bpid, blockId);
if (vol.isTransientStorage()) {
ramDiskReplicaTracker.discardReplica(bpid, blockId, true);
}
LOG.warn("Removed block " + blockId
+ " from memory with missing block file on the disk");
// Finally remove the metadata file
if (diskMetaFile != null && diskMetaFile.exists()
&& diskMetaFile.delete()) {
LOG.warn("Deleted a metadata file for the deleted block "
+ diskMetaFile.getAbsolutePath());
}
}
return;
}
/*
* Block file exists on the disk
*/
if (memBlockInfo == null) {
// Block is missing in memory - add the block to volumeMap
ReplicaInfo diskBlockInfo = new FinalizedReplica(blockId,
diskFile.length(), diskGS, vol, diskFile.getParentFile());
volumeMap.add(bpid, diskBlockInfo);
if (vol.isTransientStorage()) {
long lockedBytesReserved =
cacheManager.reserve(diskBlockInfo.getNumBytes()) > 0 ?
diskBlockInfo.getNumBytes() : 0;
ramDiskReplicaTracker.addReplica(
bpid, blockId, (FsVolumeImpl) vol, lockedBytesReserved);
}
LOG.warn("Added missing block to memory " + diskBlockInfo);
return;
}
/*
* Block exists in volumeMap and the block file exists on the disk
*/
// Compare block files
File memFile = memBlockInfo.getBlockFile();
if (memFile.exists()) {
if (memFile.compareTo(diskFile) != 0) {
if (diskMetaFile.exists()) {
if (memBlockInfo.getMetaFile().exists()) {
// We have two sets of block+meta files. Decide which one to
// keep.
ReplicaInfo diskBlockInfo = new FinalizedReplica(
blockId, diskFile.length(), diskGS, vol, diskFile.getParentFile());
((FsVolumeImpl) vol).getBlockPoolSlice(bpid).resolveDuplicateReplicas(
memBlockInfo, diskBlockInfo, volumeMap);
}
} else {
if (!diskFile.delete()) {
LOG.warn("Failed to delete " + diskFile + ". Will retry on next scan");
}
}
}
} else {
// Block refers to a block file that does not exist.
// Update the block with the file found on the disk. Since the block
// file and metadata file are found as a pair on the disk, update
// the block based on the metadata file found on the disk
LOG.warn("Block file in volumeMap "
+ memFile.getAbsolutePath()
+ " does not exist. Updating it to the file found during scan "
+ diskFile.getAbsolutePath());
memBlockInfo.setDir(diskFile.getParentFile());
memFile = diskFile;
LOG.warn("Updating generation stamp for block " + blockId
+ " from " + memBlockInfo.getGenerationStamp() + " to " + diskGS);
memBlockInfo.setGenerationStamp(diskGS);
}
// Compare generation stamp
if (memBlockInfo.getGenerationStamp() != diskGS) {
File memMetaFile = FsDatasetUtil.getMetaFile(diskFile,
memBlockInfo.getGenerationStamp());
if (memMetaFile.exists()) {
if (memMetaFile.compareTo(diskMetaFile) != 0) {
LOG.warn("Metadata file in memory "
+ memMetaFile.getAbsolutePath()
+ " does not match file found by scan "
+ (diskMetaFile == null? null: diskMetaFile.getAbsolutePath()));
}
} else {
// Metadata file corresponding to block in memory is missing
// If metadata file found during the scan is on the same directory
// as the block file, then use the generation stamp from it
long gs = diskMetaFile != null && diskMetaFile.exists()
&& diskMetaFile.getParent().equals(memFile.getParent()) ? diskGS
: HdfsConstants.GRANDFATHER_GENERATION_STAMP;
LOG.warn("Updating generation stamp for block " + blockId
+ " from " + memBlockInfo.getGenerationStamp() + " to " + gs);
memBlockInfo.setGenerationStamp(gs);
}
}
// Compare block size
if (memBlockInfo.getNumBytes() != memFile.length()) {
// Update the length based on the block file
corruptBlock = new Block(memBlockInfo);
LOG.warn("Updating size of block " + blockId + " from "
+ memBlockInfo.getNumBytes() + " to " + memFile.length());
memBlockInfo.setNumBytes(memFile.length());
}
}
// Send corrupt block report outside the lock
if (corruptBlock != null) {
LOG.warn("Reporting the block " + corruptBlock
+ " as corrupt due to length mismatch");
try {
datanode.reportBadBlocks(new ExtendedBlock(bpid, corruptBlock));
} catch (IOException e) {
LOG.warn("Failed to repot bad block " + corruptBlock, e);
}
}
}
/**
* @deprecated use {@link #fetchReplicaInfo(String, long)} instead.
*/
@Override // FsDatasetSpi
@Deprecated
public ReplicaInfo getReplica(String bpid, long blockId) {
return volumeMap.get(bpid, blockId);
}
@Override
public synchronized String getReplicaString(String bpid, long blockId) {
final Replica r = volumeMap.get(bpid, blockId);
return r == null? "null": r.toString();
}
@Override // FsDatasetSpi
public synchronized ReplicaRecoveryInfo initReplicaRecovery(
RecoveringBlock rBlock) throws IOException {
return initReplicaRecovery(rBlock.getBlock().getBlockPoolId(), volumeMap,
rBlock.getBlock().getLocalBlock(), rBlock.getNewGenerationStamp(),
datanode.getDnConf().getXceiverStopTimeout());
}
/** static version of {@link #initReplicaRecovery(RecoveringBlock)}. */
static ReplicaRecoveryInfo initReplicaRecovery(String bpid, ReplicaMap map,
Block block, long recoveryId, long xceiverStopTimeout) throws IOException {
final ReplicaInfo replica = map.get(bpid, block.getBlockId());
LOG.info("initReplicaRecovery: " + block + ", recoveryId=" + recoveryId
+ ", replica=" + replica);
//check replica
if (replica == null) {
return null;
}
//stop writer if there is any
if (replica instanceof ReplicaInPipeline) {
final ReplicaInPipeline rip = (ReplicaInPipeline)replica;
rip.stopWriter(xceiverStopTimeout);
//check replica bytes on disk.
if (rip.getBytesOnDisk() < rip.getVisibleLength()) {
throw new IOException("THIS IS NOT SUPPOSED TO HAPPEN:"
+ " getBytesOnDisk() < getVisibleLength(), rip=" + rip);
}
//check the replica's files
checkReplicaFiles(rip);
}
//check generation stamp
if (replica.getGenerationStamp() < block.getGenerationStamp()) {
throw new IOException(
"replica.getGenerationStamp() < block.getGenerationStamp(), block="
+ block + ", replica=" + replica);
}
//check recovery id
if (replica.getGenerationStamp() >= recoveryId) {
throw new IOException("THIS IS NOT SUPPOSED TO HAPPEN:"
+ " replica.getGenerationStamp() >= recoveryId = " + recoveryId
+ ", block=" + block + ", replica=" + replica);
}
//check RUR
final ReplicaUnderRecovery rur;
if (replica.getState() == ReplicaState.RUR) {
rur = (ReplicaUnderRecovery)replica;
if (rur.getRecoveryID() >= recoveryId) {
throw new RecoveryInProgressException(
"rur.getRecoveryID() >= recoveryId = " + recoveryId
+ ", block=" + block + ", rur=" + rur);
}
final long oldRecoveryID = rur.getRecoveryID();
rur.setRecoveryID(recoveryId);
LOG.info("initReplicaRecovery: update recovery id for " + block
+ " from " + oldRecoveryID + " to " + recoveryId);
}
else {
rur = new ReplicaUnderRecovery(replica, recoveryId);
map.add(bpid, rur);
LOG.info("initReplicaRecovery: changing replica state for "
+ block + " from " + replica.getState()
+ " to " + rur.getState());
}
return rur.createInfo();
}
@Override // FsDatasetSpi
public synchronized String updateReplicaUnderRecovery(
final ExtendedBlock oldBlock,
final long recoveryId,
final long newBlockId,
final long newlength) throws IOException {
//get replica
final String bpid = oldBlock.getBlockPoolId();
final ReplicaInfo replica = volumeMap.get(bpid, oldBlock.getBlockId());
LOG.info("updateReplica: " + oldBlock
+ ", recoveryId=" + recoveryId
+ ", length=" + newlength
+ ", replica=" + replica);
//check replica
if (replica == null) {
throw new ReplicaNotFoundException(oldBlock);
}
//check replica state
if (replica.getState() != ReplicaState.RUR) {
throw new IOException("replica.getState() != " + ReplicaState.RUR
+ ", replica=" + replica);
}
//check replica's byte on disk
if (replica.getBytesOnDisk() != oldBlock.getNumBytes()) {
throw new IOException("THIS IS NOT SUPPOSED TO HAPPEN:"
+ " replica.getBytesOnDisk() != block.getNumBytes(), block="
+ oldBlock + ", replica=" + replica);
}
//check replica files before update
checkReplicaFiles(replica);
//update replica
final FinalizedReplica finalized = updateReplicaUnderRecovery(oldBlock
.getBlockPoolId(), (ReplicaUnderRecovery) replica, recoveryId,
newBlockId, newlength);
boolean copyTruncate = newBlockId != oldBlock.getBlockId();
if(!copyTruncate) {
assert finalized.getBlockId() == oldBlock.getBlockId()
&& finalized.getGenerationStamp() == recoveryId
&& finalized.getNumBytes() == newlength
: "Replica information mismatched: oldBlock=" + oldBlock
+ ", recoveryId=" + recoveryId + ", newlength=" + newlength
+ ", newBlockId=" + newBlockId + ", finalized=" + finalized;
} else {
assert finalized.getBlockId() == oldBlock.getBlockId()
&& finalized.getGenerationStamp() == oldBlock.getGenerationStamp()
&& finalized.getNumBytes() == oldBlock.getNumBytes()
: "Finalized and old information mismatched: oldBlock=" + oldBlock
+ ", genStamp=" + oldBlock.getGenerationStamp()
+ ", len=" + oldBlock.getNumBytes()
+ ", finalized=" + finalized;
}
//check replica files after update
checkReplicaFiles(finalized);
//return storage ID
return getVolume(new ExtendedBlock(bpid, finalized)).getStorageID();
}
private FinalizedReplica updateReplicaUnderRecovery(
String bpid,
ReplicaUnderRecovery rur,
long recoveryId,
long newBlockId,
long newlength) throws IOException {
//check recovery id
if (rur.getRecoveryID() != recoveryId) {
throw new IOException("rur.getRecoveryID() != recoveryId = " + recoveryId
+ ", rur=" + rur);
}
boolean copyOnTruncate = newBlockId > 0L && rur.getBlockId() != newBlockId;
File blockFile;
File metaFile;
// bump rur's GS to be recovery id
if(!copyOnTruncate) {
bumpReplicaGS(rur, recoveryId);
blockFile = rur.getBlockFile();
metaFile = rur.getMetaFile();
} else {
File[] copiedReplicaFiles =
copyReplicaWithNewBlockIdAndGS(rur, bpid, newBlockId, recoveryId);
blockFile = copiedReplicaFiles[1];
metaFile = copiedReplicaFiles[0];
}
//update length
if (rur.getNumBytes() < newlength) {
throw new IOException("rur.getNumBytes() < newlength = " + newlength
+ ", rur=" + rur);
}
if (rur.getNumBytes() > newlength) {
rur.unlinkBlock(1);
truncateBlock(blockFile, metaFile, rur.getNumBytes(), newlength);
if(!copyOnTruncate) {
// update RUR with the new length
rur.setNumBytes(newlength);
} else {
// Copying block to a new block with new blockId.
// Not truncating original block.
ReplicaBeingWritten newReplicaInfo = new ReplicaBeingWritten(
newBlockId, recoveryId, rur.getVolume(), blockFile.getParentFile(),
newlength);
newReplicaInfo.setNumBytes(newlength);
volumeMap.add(bpid, newReplicaInfo);
finalizeReplica(bpid, newReplicaInfo);
}
}
// finalize the block
return finalizeReplica(bpid, rur);
}
private File[] copyReplicaWithNewBlockIdAndGS(
ReplicaUnderRecovery replicaInfo, String bpid, long newBlkId, long newGS)
throws IOException {
String blockFileName = Block.BLOCK_FILE_PREFIX + newBlkId;
FsVolumeReference v = volumes.getNextVolume(
replicaInfo.getVolume().getStorageType(), replicaInfo.getNumBytes());
final File tmpDir = ((FsVolumeImpl) v.getVolume())
.getBlockPoolSlice(bpid).getTmpDir();
final File destDir = DatanodeUtil.idToBlockDir(tmpDir, newBlkId);
final File dstBlockFile = new File(destDir, blockFileName);
final File dstMetaFile = FsDatasetUtil.getMetaFile(dstBlockFile, newGS);
return copyBlockFiles(replicaInfo.getMetaFile(),
replicaInfo.getBlockFile(),
dstMetaFile, dstBlockFile, true, smallBufferSize);
}
@Override // FsDatasetSpi
public synchronized long getReplicaVisibleLength(final ExtendedBlock block)
throws IOException {
final Replica replica = getReplicaInfo(block.getBlockPoolId(),
block.getBlockId());
if (replica.getGenerationStamp() < block.getGenerationStamp()) {
throw new IOException(
"replica.getGenerationStamp() < block.getGenerationStamp(), block="
+ block + ", replica=" + replica);
}
return replica.getVisibleLength();
}
@Override
public void addBlockPool(String bpid, Configuration conf)
throws IOException {
LOG.info("Adding block pool " + bpid);
synchronized(this) {
volumes.addBlockPool(bpid, conf);
volumeMap.initBlockPool(bpid);
}
volumes.getAllVolumesMap(bpid, volumeMap, ramDiskReplicaTracker);
}
@Override
public synchronized void shutdownBlockPool(String bpid) {
LOG.info("Removing block pool " + bpid);
Map<DatanodeStorage, BlockListAsLongs> blocksPerVolume = getBlockReports(bpid);
volumeMap.cleanUpBlockPool(bpid);
volumes.removeBlockPool(bpid, blocksPerVolume);
}
/**
* Class for representing the Datanode volume information
*/
private static class VolumeInfo {
final String directory;
final long usedSpace; // size of space used by HDFS
final long freeSpace; // size of free space excluding reserved space
final long reservedSpace; // size of space reserved for non-HDFS
final long reservedSpaceForRBW; // size of space reserved RBW
VolumeInfo(FsVolumeImpl v, long usedSpace, long freeSpace) {
this.directory = v.toString();
this.usedSpace = usedSpace;
this.freeSpace = freeSpace;
this.reservedSpace = v.getReserved();
this.reservedSpaceForRBW = v.getReservedForRbw();
}
}
private Collection<VolumeInfo> getVolumeInfo() {
Collection<VolumeInfo> info = new ArrayList<VolumeInfo>();
for (FsVolumeImpl volume : volumes.getVolumes()) {
long used = 0;
long free = 0;
try (FsVolumeReference ref = volume.obtainReference()) {
used = volume.getDfsUsed();
free = volume.getAvailable();
} catch (ClosedChannelException e) {
continue;
} catch (IOException e) {
LOG.warn(e.getMessage());
used = 0;
free = 0;
}
info.add(new VolumeInfo(volume, used, free));
}
return info;
}
@Override
public Map<String, Object> getVolumeInfoMap() {
final Map<String, Object> info = new HashMap<String, Object>();
Collection<VolumeInfo> volumes = getVolumeInfo();
for (VolumeInfo v : volumes) {
final Map<String, Object> innerInfo = new HashMap<String, Object>();
innerInfo.put("usedSpace", v.usedSpace);
innerInfo.put("freeSpace", v.freeSpace);
innerInfo.put("reservedSpace", v.reservedSpace);
innerInfo.put("reservedSpaceForRBW", v.reservedSpaceForRBW);
info.put(v.directory, innerInfo);
}
return info;
}
@Override //FsDatasetSpi
public synchronized void deleteBlockPool(String bpid, boolean force)
throws IOException {
List<FsVolumeImpl> curVolumes = volumes.getVolumes();
if (!force) {
for (FsVolumeImpl volume : curVolumes) {
try (FsVolumeReference ref = volume.obtainReference()) {
if (!volume.isBPDirEmpty(bpid)) {
LOG.warn(bpid + " has some block files, cannot delete unless forced");
throw new IOException("Cannot delete block pool, "
+ "it contains some block files");
}
} catch (ClosedChannelException e) {
// ignore.
}
}
}
for (FsVolumeImpl volume : curVolumes) {
try (FsVolumeReference ref = volume.obtainReference()) {
volume.deleteBPDirectories(bpid, force);
} catch (ClosedChannelException e) {
// ignore.
}
}
}
@Override // FsDatasetSpi
public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block)
throws IOException {
synchronized(this) {
final Replica replica = volumeMap.get(block.getBlockPoolId(),
block.getBlockId());
if (replica == null) {
throw new ReplicaNotFoundException(block);
}
if (replica.getGenerationStamp() < block.getGenerationStamp()) {
throw new IOException(
"Replica generation stamp < block generation stamp, block="
+ block + ", replica=" + replica);
} else if (replica.getGenerationStamp() > block.getGenerationStamp()) {
block.setGenerationStamp(replica.getGenerationStamp());
}
}
File datafile = getBlockFile(block);
File metafile = FsDatasetUtil.getMetaFile(datafile, block.getGenerationStamp());
BlockLocalPathInfo info = new BlockLocalPathInfo(block,
datafile.getAbsolutePath(), metafile.getAbsolutePath());
return info;
}
@Override // FsDatasetSpi
public HdfsBlocksMetadata getHdfsBlocksMetadata(String poolId,
long[] blockIds) throws IOException {
List<FsVolumeImpl> curVolumes = volumes.getVolumes();
// List of VolumeIds, one per volume on the datanode
List<byte[]> blocksVolumeIds = new ArrayList<>(curVolumes.size());
// List of indexes into the list of VolumeIds, pointing at the VolumeId of
// the volume that the block is on
List<Integer> blocksVolumeIndexes = new ArrayList<Integer>(blockIds.length);
// Initialize the list of VolumeIds simply by enumerating the volumes
for (int i = 0; i < curVolumes.size(); i++) {
blocksVolumeIds.add(ByteBuffer.allocate(4).putInt(i).array());
}
// Determine the index of the VolumeId of each block's volume, by comparing
// the block's volume against the enumerated volumes
for (int i = 0; i < blockIds.length; i++) {
long blockId = blockIds[i];
boolean isValid = false;
ReplicaInfo info = volumeMap.get(poolId, blockId);
int volumeIndex = 0;
if (info != null) {
FsVolumeSpi blockVolume = info.getVolume();
for (FsVolumeImpl volume : curVolumes) {
// This comparison of references should be safe
if (blockVolume == volume) {
isValid = true;
break;
}
volumeIndex++;
}
}
// Indicates that the block is not present, or not found in a data dir
if (!isValid) {
volumeIndex = Integer.MAX_VALUE;
}
blocksVolumeIndexes.add(volumeIndex);
}
return new HdfsBlocksMetadata(poolId, blockIds,
blocksVolumeIds, blocksVolumeIndexes);
}
@Override
public void enableTrash(String bpid) {
dataStorage.enableTrash(bpid);
}
@Override
public void clearTrash(String bpid) {
dataStorage.clearTrash(bpid);
}
@Override
public boolean trashEnabled(String bpid) {
return dataStorage.trashEnabled(bpid);
}
@Override
public void setRollingUpgradeMarker(String bpid) throws IOException {
dataStorage.setRollingUpgradeMarker(bpid);
}
@Override
public void clearRollingUpgradeMarker(String bpid) throws IOException {
dataStorage.clearRollingUpgradeMarker(bpid);
}
@Override
public void onCompleteLazyPersist(String bpId, long blockId,
long creationTime, File[] savedFiles, FsVolumeImpl targetVolume) {
synchronized (FsDatasetImpl.this) {
ramDiskReplicaTracker.recordEndLazyPersist(bpId, blockId, savedFiles);
targetVolume.incDfsUsed(bpId,
savedFiles[0].length() + savedFiles[1].length());
// Update metrics (ignore the metadata file size)
datanode.getMetrics().incrRamDiskBlocksLazyPersisted();
datanode.getMetrics().incrRamDiskBytesLazyPersisted(savedFiles[1].length());
datanode.getMetrics().addRamDiskBlocksLazyPersistWindowMs(
Time.monotonicNow() - creationTime);
if (LOG.isDebugEnabled()) {
LOG.debug("LazyWriter: Finish persisting RamDisk block: "
+ " block pool Id: " + bpId + " block id: " + blockId
+ " to block file " + savedFiles[1] + " and meta file " + savedFiles[0]
+ " on target volume " + targetVolume);
}
}
}
@Override
public void onFailLazyPersist(String bpId, long blockId) {
RamDiskReplica block = null;
block = ramDiskReplicaTracker.getReplica(bpId, blockId);
if (block != null) {
LOG.warn("Failed to save replica " + block + ". re-enqueueing it.");
ramDiskReplicaTracker.reenqueueReplicaNotPersisted(block);
}
}
@Override
public void submitBackgroundSyncFileRangeRequest(ExtendedBlock block,
FileDescriptor fd, long offset, long nbytes, int flags) {
FsVolumeImpl fsVolumeImpl = this.getVolume(block);
asyncDiskService.submitSyncFileRangeRequest(fsVolumeImpl, fd, offset,
nbytes, flags);
}
private boolean ramDiskConfigured() {
for (FsVolumeImpl v: volumes.getVolumes()){
if (v.isTransientStorage()) {
return true;
}
}
return false;
}
// Add/Remove per DISK volume async lazy persist thread when RamDisk volume is
// added or removed.
// This should only be called when the FsDataSetImpl#volumes list is finalized.
private void setupAsyncLazyPersistThreads() {
for (FsVolumeImpl v: volumes.getVolumes()){
setupAsyncLazyPersistThread(v);
}
}
private void setupAsyncLazyPersistThread(final FsVolumeImpl v) {
// Skip transient volumes
if (v.isTransientStorage()) {
return;
}
boolean ramDiskConfigured = ramDiskConfigured();
// Add thread for DISK volume if RamDisk is configured
if (ramDiskConfigured &&
asyncLazyPersistService != null &&
!asyncLazyPersistService.queryVolume(v.getCurrentDir())) {
asyncLazyPersistService.addVolume(v.getCurrentDir());
}
// Remove thread for DISK volume if RamDisk is not configured
if (!ramDiskConfigured &&
asyncLazyPersistService != null &&
asyncLazyPersistService.queryVolume(v.getCurrentDir())) {
asyncLazyPersistService.removeVolume(v.getCurrentDir());
}
}
private void removeOldReplica(ReplicaInfo replicaInfo,
ReplicaInfo newReplicaInfo, File blockFile, File metaFile,
long blockFileUsed, long metaFileUsed, final String bpid) {
// Before deleting the files from old storage we must notify the
// NN that the files are on the new storage. Else a blockReport from
// the transient storage might cause the NN to think the blocks are lost.
// Replicas must be evicted from client short-circuit caches, because the
// storage will no longer be same, and thus will require validating
// checksum. This also stops a client from holding file descriptors,
// which would prevent the OS from reclaiming the memory.
ExtendedBlock extendedBlock =
new ExtendedBlock(bpid, newReplicaInfo);
datanode.getShortCircuitRegistry().processBlockInvalidation(
ExtendedBlockId.fromExtendedBlock(extendedBlock));
datanode.notifyNamenodeReceivedBlock(
extendedBlock, null, newReplicaInfo.getStorageUuid());
// Remove the old replicas
if (blockFile.delete() || !blockFile.exists()) {
FsVolumeImpl volume = (FsVolumeImpl) replicaInfo.getVolume();
volume.onBlockFileDeletion(bpid, blockFileUsed);
if (metaFile.delete() || !metaFile.exists()) {
volume.onMetaFileDeletion(bpid, metaFileUsed);
}
}
// If deletion failed then the directory scanner will cleanup the blocks
// eventually.
}
class LazyWriter implements Runnable {
private volatile boolean shouldRun = true;
final int checkpointerInterval;
public LazyWriter(Configuration conf) {
this.checkpointerInterval = conf.getInt(
DFSConfigKeys.DFS_DATANODE_LAZY_WRITER_INTERVAL_SEC,
DFSConfigKeys.DFS_DATANODE_LAZY_WRITER_INTERVAL_DEFAULT_SEC);
}
/**
* Checkpoint a pending replica to persistent storage now.
* If we fail then move the replica to the end of the queue.
* @return true if there is more work to be done, false otherwise.
*/
private boolean saveNextReplica() {
RamDiskReplica block = null;
FsVolumeReference targetReference;
FsVolumeImpl targetVolume;
ReplicaInfo replicaInfo;
boolean succeeded = false;
try {
block = ramDiskReplicaTracker.dequeueNextReplicaToPersist();
if (block != null) {
synchronized (FsDatasetImpl.this) {
replicaInfo = volumeMap.get(block.getBlockPoolId(), block.getBlockId());
// If replicaInfo is null, the block was either deleted before
// it could be checkpointed or it is already on persistent storage.
// This can occur if a second replica on persistent storage was found
// after the lazy write was scheduled.
if (replicaInfo != null &&
replicaInfo.getVolume().isTransientStorage()) {
// Pick a target volume to persist the block.
targetReference = volumes.getNextVolume(
StorageType.DEFAULT, replicaInfo.getNumBytes());
targetVolume = (FsVolumeImpl) targetReference.getVolume();
ramDiskReplicaTracker.recordStartLazyPersist(
block.getBlockPoolId(), block.getBlockId(), targetVolume);
if (LOG.isDebugEnabled()) {
LOG.debug("LazyWriter: Start persisting RamDisk block:"
+ " block pool Id: " + block.getBlockPoolId()
+ " block id: " + block.getBlockId()
+ " on target volume " + targetVolume);
}
asyncLazyPersistService.submitLazyPersistTask(
block.getBlockPoolId(), block.getBlockId(),
replicaInfo.getGenerationStamp(), block.getCreationTime(),
replicaInfo.getMetaFile(), replicaInfo.getBlockFile(),
targetReference);
}
}
}
succeeded = true;
} catch(IOException ioe) {
LOG.warn("Exception saving replica " + block, ioe);
} finally {
if (!succeeded && block != null) {
LOG.warn("Failed to save replica " + block + ". re-enqueueing it.");
onFailLazyPersist(block.getBlockPoolId(), block.getBlockId());
}
}
return succeeded;
}
/**
* Attempt to evict one or more transient block replicas until we
* have at least bytesNeeded bytes free.
*/
public void evictBlocks(long bytesNeeded) throws IOException {
int iterations = 0;
final long cacheCapacity = cacheManager.getCacheCapacity();
while (iterations++ < MAX_BLOCK_EVICTIONS_PER_ITERATION &&
(cacheCapacity - cacheManager.getCacheUsed()) < bytesNeeded) {
RamDiskReplica replicaState = ramDiskReplicaTracker.getNextCandidateForEviction();
if (replicaState == null) {
break;
}
if (LOG.isDebugEnabled()) {
LOG.debug("Evicting block " + replicaState);
}
ReplicaInfo replicaInfo, newReplicaInfo;
File blockFile, metaFile;
long blockFileUsed, metaFileUsed;
final String bpid = replicaState.getBlockPoolId();
synchronized (FsDatasetImpl.this) {
replicaInfo = getReplicaInfo(replicaState.getBlockPoolId(),
replicaState.getBlockId());
Preconditions.checkState(replicaInfo.getVolume().isTransientStorage());
blockFile = replicaInfo.getBlockFile();
metaFile = replicaInfo.getMetaFile();
blockFileUsed = blockFile.length();
metaFileUsed = metaFile.length();
ramDiskReplicaTracker.discardReplica(replicaState.getBlockPoolId(),
replicaState.getBlockId(), false);
// Move the replica from lazyPersist/ to finalized/ on
// the target volume
BlockPoolSlice bpSlice =
replicaState.getLazyPersistVolume().getBlockPoolSlice(bpid);
File newBlockFile = bpSlice.activateSavedReplica(
replicaInfo, replicaState.getSavedMetaFile(),
replicaState.getSavedBlockFile());
newReplicaInfo =
new FinalizedReplica(replicaInfo.getBlockId(),
replicaInfo.getBytesOnDisk(),
replicaInfo.getGenerationStamp(),
replicaState.getLazyPersistVolume(),
newBlockFile.getParentFile());
// Update the volumeMap entry.
volumeMap.add(bpid, newReplicaInfo);
// Update metrics
datanode.getMetrics().incrRamDiskBlocksEvicted();
datanode.getMetrics().addRamDiskBlocksEvictionWindowMs(
Time.monotonicNow() - replicaState.getCreationTime());
if (replicaState.getNumReads() == 0) {
datanode.getMetrics().incrRamDiskBlocksEvictedWithoutRead();
}
// Delete the block+meta files from RAM disk and release locked
// memory.
removeOldReplica(replicaInfo, newReplicaInfo, blockFile, metaFile,
blockFileUsed, metaFileUsed, bpid);
}
}
}
@Override
public void run() {
int numSuccessiveFailures = 0;
while (fsRunning && shouldRun) {
try {
numSuccessiveFailures = saveNextReplica() ? 0 : (numSuccessiveFailures + 1);
// Sleep if we have no more work to do or if it looks like we are not
// making any forward progress. This is to ensure that if all persist
// operations are failing we don't keep retrying them in a tight loop.
if (numSuccessiveFailures >= ramDiskReplicaTracker.numReplicasNotPersisted()) {
Thread.sleep(checkpointerInterval * 1000);
numSuccessiveFailures = 0;
}
} catch (InterruptedException e) {
LOG.info("LazyWriter was interrupted, exiting");
break;
} catch (Exception e) {
LOG.warn("Ignoring exception in LazyWriter:", e);
}
}
}
public void stop() {
shouldRun = false;
}
}
@Override
public void setPinning(ExtendedBlock block) throws IOException {
if (!blockPinningEnabled) {
return;
}
File f = getBlockFile(block);
Path p = new Path(f.getAbsolutePath());
FsPermission oldPermission = localFS.getFileStatus(
new Path(f.getAbsolutePath())).getPermission();
//sticky bit is used for pinning purpose
FsPermission permission = new FsPermission(oldPermission.getUserAction(),
oldPermission.getGroupAction(), oldPermission.getOtherAction(), true);
localFS.setPermission(p, permission);
}
@Override
public boolean getPinning(ExtendedBlock block) throws IOException {
if (!blockPinningEnabled) {
return false;
}
File f = getBlockFile(block);
FileStatus fss = localFS.getFileStatus(new Path(f.getAbsolutePath()));
return fss.getPermission().getStickyBit();
}
@Override
public boolean isDeletingBlock(String bpid, long blockId) {
synchronized(deletingBlock) {
Set<Long> s = deletingBlock.get(bpid);
return s != null ? s.contains(blockId) : false;
}
}
public void removeDeletedBlocks(String bpid, Set<Long> blockIds) {
synchronized (deletingBlock) {
Set<Long> s = deletingBlock.get(bpid);
if (s != null) {
for (Long id : blockIds) {
s.remove(id);
}
}
}
}
private void addDeletingBlock(String bpid, Long blockId) {
synchronized(deletingBlock) {
Set<Long> s = deletingBlock.get(bpid);
if (s == null) {
s = new HashSet<Long>();
deletingBlock.put(bpid, s);
}
s.add(blockId);
}
}
void releaseLockedMemory(long count, boolean roundup) {
if (roundup) {
cacheManager.release(count);
} else {
cacheManager.releaseRoundDown(count);
}
}
/**
* Attempt to evict blocks from cache Manager to free the requested
* bytes.
*
* @param bytesNeeded
*/
@VisibleForTesting
public void evictLazyPersistBlocks(long bytesNeeded) {
try {
((LazyWriter) lazyWriter.getRunnable()).evictBlocks(bytesNeeded);
} catch(IOException ioe) {
LOG.info("Ignoring exception ", ioe);
}
}
/**
* Attempt to reserve the given amount of memory with the cache Manager.
* @param bytesNeeded
* @return
*/
boolean reserveLockedMemory(long bytesNeeded) {
if (cacheManager.reserve(bytesNeeded) > 0) {
return true;
}
// Round up bytes needed to osPageSize and attempt to evict
// one more more blocks to free up the reservation.
bytesNeeded = cacheManager.roundUpPageSize(bytesNeeded);
evictLazyPersistBlocks(bytesNeeded);
return cacheManager.reserve(bytesNeeded) > 0;
}
}
| 114,681 | 35.887102 | 98 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileOutputStream;
import java.io.FilenameFilter;
import java.io.IOException;
import java.nio.channels.ClosedChannelException;
import java.io.OutputStreamWriter;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.nio.file.StandardCopyOption;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Executor;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.DF;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.datanode.DataStorage;
import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.util.CloseableReferenceCount;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.apache.hadoop.util.Time;
import org.codehaus.jackson.annotate.JsonProperty;
import org.codehaus.jackson.map.ObjectMapper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* The underlying volume used to store replica.
*
* It uses the {@link FsDatasetImpl} object for synchronization.
*/
@InterfaceAudience.Private
@VisibleForTesting
public class FsVolumeImpl implements FsVolumeSpi {
public static final Logger LOG =
LoggerFactory.getLogger(FsVolumeImpl.class);
private final FsDatasetImpl dataset;
private final String storageID;
private final StorageType storageType;
private final Map<String, BlockPoolSlice> bpSlices
= new ConcurrentHashMap<String, BlockPoolSlice>();
private final File currentDir; // <StorageDirectory>/current
private final DF usage;
private final long reserved;
private CloseableReferenceCount reference = new CloseableReferenceCount();
// Disk space reserved for open blocks.
private AtomicLong reservedForRbw;
// Capacity configured. This is useful when we want to
// limit the visible capacity for tests. If negative, then we just
// query from the filesystem.
protected volatile long configuredCapacity;
/**
* Per-volume worker pool that processes new blocks to cache.
* The maximum number of workers per volume is bounded (configurable via
* dfs.datanode.fsdatasetcache.max.threads.per.volume) to limit resource
* contention.
*/
protected ThreadPoolExecutor cacheExecutor;
FsVolumeImpl(FsDatasetImpl dataset, String storageID, File currentDir,
Configuration conf, StorageType storageType) throws IOException {
this.dataset = dataset;
this.storageID = storageID;
this.reserved = conf.getLong(
DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY,
DFSConfigKeys.DFS_DATANODE_DU_RESERVED_DEFAULT);
this.reservedForRbw = new AtomicLong(0L);
this.currentDir = currentDir;
File parent = currentDir.getParentFile();
this.usage = new DF(parent, conf);
this.storageType = storageType;
this.configuredCapacity = -1;
cacheExecutor = initializeCacheExecutor(parent);
}
protected ThreadPoolExecutor initializeCacheExecutor(File parent) {
if (storageType.isTransient()) {
return null;
}
if (dataset.datanode == null) {
// FsVolumeImpl is used in test.
return null;
}
final int maxNumThreads = dataset.datanode.getConf().getInt(
DFSConfigKeys.DFS_DATANODE_FSDATASETCACHE_MAX_THREADS_PER_VOLUME_KEY,
DFSConfigKeys.DFS_DATANODE_FSDATASETCACHE_MAX_THREADS_PER_VOLUME_DEFAULT);
ThreadFactory workerFactory = new ThreadFactoryBuilder()
.setDaemon(true)
.setNameFormat("FsVolumeImplWorker-" + parent.toString() + "-%d")
.build();
ThreadPoolExecutor executor = new ThreadPoolExecutor(
1, maxNumThreads,
60, TimeUnit.SECONDS,
new LinkedBlockingQueue<Runnable>(),
workerFactory);
executor.allowCoreThreadTimeOut(true);
return executor;
}
private void printReferenceTraceInfo(String op) {
StackTraceElement[] stack = Thread.currentThread().getStackTrace();
for (StackTraceElement ste : stack) {
switch (ste.getMethodName()) {
case "getDfsUsed":
case "getBlockPoolUsed":
case "getAvailable":
case "getVolumeMap":
return;
default:
break;
}
}
FsDatasetImpl.LOG.trace("Reference count: " + op + " " + this + ": " +
this.reference.getReferenceCount());
FsDatasetImpl.LOG.trace(
Joiner.on("\n").join(Thread.currentThread().getStackTrace()));
}
/**
* Increase the reference count. The caller must increase the reference count
* before issuing IOs.
*
* @throws IOException if the volume is already closed.
*/
private void reference() throws ClosedChannelException {
this.reference.reference();
if (FsDatasetImpl.LOG.isTraceEnabled()) {
printReferenceTraceInfo("incr");
}
}
/**
* Decrease the reference count.
*/
private void unreference() {
if (FsDatasetImpl.LOG.isTraceEnabled()) {
printReferenceTraceInfo("desc");
}
if (FsDatasetImpl.LOG.isDebugEnabled()) {
if (reference.getReferenceCount() <= 0) {
FsDatasetImpl.LOG.debug("Decrease reference count <= 0 on " + this +
Joiner.on("\n").join(Thread.currentThread().getStackTrace()));
}
}
checkReference();
this.reference.unreference();
}
private static class FsVolumeReferenceImpl implements FsVolumeReference {
private FsVolumeImpl volume;
FsVolumeReferenceImpl(FsVolumeImpl volume) throws ClosedChannelException {
this.volume = volume;
volume.reference();
}
/**
* Decreases the reference count.
* @throws IOException it never throws IOException.
*/
@Override
public void close() throws IOException {
if (volume != null) {
volume.unreference();
volume = null;
}
}
@Override
public FsVolumeSpi getVolume() {
return this.volume;
}
}
@Override
public FsVolumeReference obtainReference() throws ClosedChannelException {
return new FsVolumeReferenceImpl(this);
}
private void checkReference() {
Preconditions.checkState(reference.getReferenceCount() > 0);
}
/**
* Close this volume and wait all other threads to release the reference count
* on this volume.
* @throws IOException if the volume is closed or the waiting is interrupted.
*/
void closeAndWait() throws IOException {
try {
this.reference.setClosed();
} catch (ClosedChannelException e) {
throw new IOException("The volume has already closed.", e);
}
final int SLEEP_MILLIS = 500;
while (this.reference.getReferenceCount() > 0) {
if (FsDatasetImpl.LOG.isDebugEnabled()) {
FsDatasetImpl.LOG.debug(String.format(
"The reference count for %s is %d, wait to be 0.",
this, reference.getReferenceCount()));
}
try {
Thread.sleep(SLEEP_MILLIS);
} catch (InterruptedException e) {
throw new IOException(e);
}
}
}
File getCurrentDir() {
return currentDir;
}
File getRbwDir(String bpid) throws IOException {
return getBlockPoolSlice(bpid).getRbwDir();
}
File getLazyPersistDir(String bpid) throws IOException {
return getBlockPoolSlice(bpid).getLazypersistDir();
}
File getTmpDir(String bpid) throws IOException {
return getBlockPoolSlice(bpid).getTmpDir();
}
void onBlockFileDeletion(String bpid, long value) {
decDfsUsed(bpid, value);
if (isTransientStorage()) {
dataset.releaseLockedMemory(value, true);
}
}
void onMetaFileDeletion(String bpid, long value) {
decDfsUsed(bpid, value);
}
private void decDfsUsed(String bpid, long value) {
synchronized(dataset) {
BlockPoolSlice bp = bpSlices.get(bpid);
if (bp != null) {
bp.decDfsUsed(value);
}
}
}
void incDfsUsed(String bpid, long value) {
synchronized(dataset) {
BlockPoolSlice bp = bpSlices.get(bpid);
if (bp != null) {
bp.incDfsUsed(value);
}
}
}
@VisibleForTesting
public long getDfsUsed() throws IOException {
long dfsUsed = 0;
synchronized(dataset) {
for(BlockPoolSlice s : bpSlices.values()) {
dfsUsed += s.getDfsUsed();
}
}
return dfsUsed;
}
long getBlockPoolUsed(String bpid) throws IOException {
return getBlockPoolSlice(bpid).getDfsUsed();
}
/**
* Return either the configured capacity of the file system if configured; or
* the capacity of the file system excluding space reserved for non-HDFS.
*
* @return the unreserved number of bytes left in this filesystem. May be
* zero.
*/
@VisibleForTesting
public long getCapacity() {
if (configuredCapacity < 0) {
long remaining = usage.getCapacity() - reserved;
return remaining > 0 ? remaining : 0;
}
return configuredCapacity;
}
/**
* This function MUST NOT be used outside of tests.
*
* @param capacity
*/
@VisibleForTesting
public void setCapacityForTesting(long capacity) {
this.configuredCapacity = capacity;
}
/*
* Calculate the available space of the filesystem, excluding space reserved
* for non-HDFS and space reserved for RBW
*
* @return the available number of bytes left in this filesystem. May be zero.
*/
@Override
public long getAvailable() throws IOException {
long remaining = getCapacity() - getDfsUsed() - reservedForRbw.get();
long available = usage.getAvailable() - reserved - reservedForRbw.get();
if (remaining > available) {
remaining = available;
}
return (remaining > 0) ? remaining : 0;
}
@VisibleForTesting
public long getReservedForRbw() {
return reservedForRbw.get();
}
long getReserved(){
return reserved;
}
BlockPoolSlice getBlockPoolSlice(String bpid) throws IOException {
BlockPoolSlice bp = bpSlices.get(bpid);
if (bp == null) {
throw new IOException("block pool " + bpid + " is not found");
}
return bp;
}
@Override
public String getBasePath() {
return currentDir.getParent();
}
@Override
public boolean isTransientStorage() {
return storageType.isTransient();
}
@Override
public String getPath(String bpid) throws IOException {
return getBlockPoolSlice(bpid).getDirectory().getAbsolutePath();
}
@Override
public File getFinalizedDir(String bpid) throws IOException {
return getBlockPoolSlice(bpid).getFinalizedDir();
}
/**
* Make a deep copy of the list of currently active BPIDs
*/
@Override
public String[] getBlockPoolList() {
return bpSlices.keySet().toArray(new String[bpSlices.keySet().size()]);
}
/**
* Temporary files. They get moved to the finalized block directory when
* the block is finalized.
*/
File createTmpFile(String bpid, Block b) throws IOException {
checkReference();
return getBlockPoolSlice(bpid).createTmpFile(b);
}
@Override
public void reserveSpaceForRbw(long bytesToReserve) {
if (bytesToReserve != 0) {
reservedForRbw.addAndGet(bytesToReserve);
}
}
@Override
public void releaseReservedSpace(long bytesToRelease) {
if (bytesToRelease != 0) {
long oldReservation, newReservation;
do {
oldReservation = reservedForRbw.get();
newReservation = oldReservation - bytesToRelease;
if (newReservation < 0) {
// Failsafe, this should never occur in practice, but if it does we don't
// want to start advertising more space than we have available.
newReservation = 0;
}
} while (!reservedForRbw.compareAndSet(oldReservation, newReservation));
}
}
@Override
public void releaseLockedMemory(long bytesToRelease) {
if (isTransientStorage()) {
dataset.releaseLockedMemory(bytesToRelease, false);
}
}
private enum SubdirFilter implements FilenameFilter {
INSTANCE;
@Override
public boolean accept(File dir, String name) {
return name.startsWith("subdir");
}
}
private enum BlockFileFilter implements FilenameFilter {
INSTANCE;
@Override
public boolean accept(File dir, String name) {
return !name.endsWith(".meta") &&
name.startsWith(Block.BLOCK_FILE_PREFIX);
}
}
@VisibleForTesting
public static String nextSorted(List<String> arr, String prev) {
int res = 0;
if (prev != null) {
res = Collections.binarySearch(arr, prev);
if (res < 0) {
res = -1 - res;
} else {
res++;
}
}
if (res >= arr.size()) {
return null;
}
return arr.get(res);
}
private static class BlockIteratorState {
BlockIteratorState() {
lastSavedMs = iterStartMs = Time.now();
curFinalizedDir = null;
curFinalizedSubDir = null;
curEntry = null;
atEnd = false;
}
// The wall-clock ms since the epoch at which this iterator was last saved.
@JsonProperty
private long lastSavedMs;
// The wall-clock ms since the epoch at which this iterator was created.
@JsonProperty
private long iterStartMs;
@JsonProperty
private String curFinalizedDir;
@JsonProperty
private String curFinalizedSubDir;
@JsonProperty
private String curEntry;
@JsonProperty
private boolean atEnd;
}
/**
* A BlockIterator implementation for FsVolumeImpl.
*/
private class BlockIteratorImpl implements FsVolumeSpi.BlockIterator {
private final File bpidDir;
private final String name;
private final String bpid;
private long maxStalenessMs = 0;
private List<String> cache;
private long cacheMs;
private BlockIteratorState state;
BlockIteratorImpl(String bpid, String name) {
this.bpidDir = new File(currentDir, bpid);
this.name = name;
this.bpid = bpid;
rewind();
}
/**
* Get the next subdirectory within the block pool slice.
*
* @return The next subdirectory within the block pool slice, or
* null if there are no more.
*/
private String getNextSubDir(String prev, File dir)
throws IOException {
List<String> children =
IOUtils.listDirectory(dir, SubdirFilter.INSTANCE);
cache = null;
cacheMs = 0;
if (children.size() == 0) {
LOG.trace("getNextSubDir({}, {}): no subdirectories found in {}",
storageID, bpid, dir.getAbsolutePath());
return null;
}
Collections.sort(children);
String nextSubDir = nextSorted(children, prev);
if (nextSubDir == null) {
LOG.trace("getNextSubDir({}, {}): no more subdirectories found in {}",
storageID, bpid, dir.getAbsolutePath());
} else {
LOG.trace("getNextSubDir({}, {}): picking next subdirectory {} " +
"within {}", storageID, bpid, nextSubDir, dir.getAbsolutePath());
}
return nextSubDir;
}
private String getNextFinalizedDir() throws IOException {
File dir = Paths.get(
bpidDir.getAbsolutePath(), "current", "finalized").toFile();
return getNextSubDir(state.curFinalizedDir, dir);
}
private String getNextFinalizedSubDir() throws IOException {
if (state.curFinalizedDir == null) {
return null;
}
File dir = Paths.get(
bpidDir.getAbsolutePath(), "current", "finalized",
state.curFinalizedDir).toFile();
return getNextSubDir(state.curFinalizedSubDir, dir);
}
private List<String> getSubdirEntries() throws IOException {
if (state.curFinalizedSubDir == null) {
return null; // There are no entries in the null subdir.
}
long now = Time.monotonicNow();
if (cache != null) {
long delta = now - cacheMs;
if (delta < maxStalenessMs) {
return cache;
} else {
LOG.trace("getSubdirEntries({}, {}): purging entries cache for {} " +
"after {} ms.", storageID, bpid, state.curFinalizedSubDir, delta);
cache = null;
}
}
File dir = Paths.get(bpidDir.getAbsolutePath(), "current", "finalized",
state.curFinalizedDir, state.curFinalizedSubDir).toFile();
List<String> entries =
IOUtils.listDirectory(dir, BlockFileFilter.INSTANCE);
if (entries.size() == 0) {
entries = null;
} else {
Collections.sort(entries);
}
if (entries == null) {
LOG.trace("getSubdirEntries({}, {}): no entries found in {}",
storageID, bpid, dir.getAbsolutePath());
} else {
LOG.trace("getSubdirEntries({}, {}): listed {} entries in {}",
storageID, bpid, entries.size(), dir.getAbsolutePath());
}
cache = entries;
cacheMs = now;
return cache;
}
/**
* Get the next block.<p/>
*
* Each volume has a hierarchical structure.<p/>
*
* <code>
* BPID B0
* finalized/
* subdir0
* subdir0
* blk_000
* blk_001
* ...
* subdir1
* subdir0
* ...
* rbw/
* </code>
*
* When we run out of entries at one level of the structure, we search
* progressively higher levels. For example, when we run out of blk_
* entries in a subdirectory, we search for the next subdirectory.
* And so on.
*/
@Override
public ExtendedBlock nextBlock() throws IOException {
if (state.atEnd) {
return null;
}
try {
while (true) {
List<String> entries = getSubdirEntries();
if (entries != null) {
state.curEntry = nextSorted(entries, state.curEntry);
if (state.curEntry == null) {
LOG.trace("nextBlock({}, {}): advancing from {} to next " +
"subdirectory.", storageID, bpid, state.curFinalizedSubDir);
} else {
ExtendedBlock block =
new ExtendedBlock(bpid, Block.filename2id(state.curEntry));
LOG.trace("nextBlock({}, {}): advancing to {}",
storageID, bpid, block);
return block;
}
}
state.curFinalizedSubDir = getNextFinalizedSubDir();
if (state.curFinalizedSubDir == null) {
state.curFinalizedDir = getNextFinalizedDir();
if (state.curFinalizedDir == null) {
state.atEnd = true;
return null;
}
}
}
} catch (IOException e) {
state.atEnd = true;
LOG.error("nextBlock({}, {}): I/O error", storageID, bpid, e);
throw e;
}
}
@Override
public boolean atEnd() {
return state.atEnd;
}
@Override
public void rewind() {
cache = null;
cacheMs = 0;
state = new BlockIteratorState();
}
@Override
public void save() throws IOException {
state.lastSavedMs = Time.now();
boolean success = false;
ObjectMapper mapper = new ObjectMapper();
try (BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(
new FileOutputStream(getTempSaveFile(), false), "UTF-8"))) {
mapper.writerWithDefaultPrettyPrinter().writeValue(writer, state);
success = true;
} finally {
if (!success) {
if (getTempSaveFile().delete()) {
LOG.debug("save({}, {}): error deleting temporary file.",
storageID, bpid);
}
}
}
Files.move(getTempSaveFile().toPath(), getSaveFile().toPath(),
StandardCopyOption.ATOMIC_MOVE);
if (LOG.isTraceEnabled()) {
LOG.trace("save({}, {}): saved {}", storageID, bpid,
mapper.writerWithDefaultPrettyPrinter().writeValueAsString(state));
}
}
public void load() throws IOException {
ObjectMapper mapper = new ObjectMapper();
File file = getSaveFile();
this.state = mapper.reader(BlockIteratorState.class).readValue(file);
LOG.trace("load({}, {}): loaded iterator {} from {}: {}", storageID,
bpid, name, file.getAbsoluteFile(),
mapper.writerWithDefaultPrettyPrinter().writeValueAsString(state));
}
File getSaveFile() {
return new File(bpidDir, name + ".cursor");
}
File getTempSaveFile() {
return new File(bpidDir, name + ".cursor.tmp");
}
@Override
public void setMaxStalenessMs(long maxStalenessMs) {
this.maxStalenessMs = maxStalenessMs;
}
@Override
public void close() throws IOException {
// No action needed for this volume implementation.
}
@Override
public long getIterStartMs() {
return state.iterStartMs;
}
@Override
public long getLastSavedMs() {
return state.lastSavedMs;
}
@Override
public String getBlockPoolId() {
return bpid;
}
}
@Override
public BlockIterator newBlockIterator(String bpid, String name) {
return new BlockIteratorImpl(bpid, name);
}
@Override
public BlockIterator loadBlockIterator(String bpid, String name)
throws IOException {
BlockIteratorImpl iter = new BlockIteratorImpl(bpid, name);
iter.load();
return iter;
}
@Override
public FsDatasetSpi getDataset() {
return dataset;
}
/**
* RBW files. They get moved to the finalized block directory when
* the block is finalized.
*/
File createRbwFile(String bpid, Block b) throws IOException {
checkReference();
reserveSpaceForRbw(b.getNumBytes());
try {
return getBlockPoolSlice(bpid).createRbwFile(b);
} catch (IOException exception) {
releaseReservedSpace(b.getNumBytes());
throw exception;
}
}
/**
*
* @param bytesReservedForRbw Space that was reserved during
* block creation. Now that the block is being finalized we
* can free up this space.
* @return
* @throws IOException
*/
File addFinalizedBlock(String bpid, Block b,
File f, long bytesReservedForRbw)
throws IOException {
releaseReservedSpace(bytesReservedForRbw);
return getBlockPoolSlice(bpid).addBlock(b, f);
}
Executor getCacheExecutor() {
return cacheExecutor;
}
void checkDirs() throws DiskErrorException {
// TODO:FEDERATION valid synchronization
for(BlockPoolSlice s : bpSlices.values()) {
s.checkDirs();
}
}
void getVolumeMap(ReplicaMap volumeMap,
final RamDiskReplicaTracker ramDiskReplicaMap)
throws IOException {
for(BlockPoolSlice s : bpSlices.values()) {
s.getVolumeMap(volumeMap, ramDiskReplicaMap);
}
}
void getVolumeMap(String bpid, ReplicaMap volumeMap,
final RamDiskReplicaTracker ramDiskReplicaMap)
throws IOException {
getBlockPoolSlice(bpid).getVolumeMap(volumeMap, ramDiskReplicaMap);
}
@Override
public String toString() {
return currentDir.getAbsolutePath();
}
void shutdown() {
if (cacheExecutor != null) {
cacheExecutor.shutdown();
}
Set<Entry<String, BlockPoolSlice>> set = bpSlices.entrySet();
for (Entry<String, BlockPoolSlice> entry : set) {
entry.getValue().shutdown(null);
}
}
void addBlockPool(String bpid, Configuration conf) throws IOException {
File bpdir = new File(currentDir, bpid);
BlockPoolSlice bp = new BlockPoolSlice(bpid, this, bpdir, conf);
bpSlices.put(bpid, bp);
}
void shutdownBlockPool(String bpid, BlockListAsLongs blocksListsAsLongs) {
BlockPoolSlice bp = bpSlices.get(bpid);
if (bp != null) {
bp.shutdown(blocksListsAsLongs);
}
bpSlices.remove(bpid);
}
boolean isBPDirEmpty(String bpid) throws IOException {
File volumeCurrentDir = this.getCurrentDir();
File bpDir = new File(volumeCurrentDir, bpid);
File bpCurrentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT);
File finalizedDir = new File(bpCurrentDir,
DataStorage.STORAGE_DIR_FINALIZED);
File rbwDir = new File(bpCurrentDir, DataStorage.STORAGE_DIR_RBW);
if (finalizedDir.exists() && !DatanodeUtil.dirNoFilesRecursive(
finalizedDir)) {
return false;
}
if (rbwDir.exists() && FileUtil.list(rbwDir).length != 0) {
return false;
}
return true;
}
void deleteBPDirectories(String bpid, boolean force) throws IOException {
File volumeCurrentDir = this.getCurrentDir();
File bpDir = new File(volumeCurrentDir, bpid);
if (!bpDir.isDirectory()) {
// nothing to be deleted
return;
}
File tmpDir = new File(bpDir, DataStorage.STORAGE_DIR_TMP);
File bpCurrentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT);
File finalizedDir = new File(bpCurrentDir,
DataStorage.STORAGE_DIR_FINALIZED);
File lazypersistDir = new File(bpCurrentDir,
DataStorage.STORAGE_DIR_LAZY_PERSIST);
File rbwDir = new File(bpCurrentDir, DataStorage.STORAGE_DIR_RBW);
if (force) {
FileUtil.fullyDelete(bpDir);
} else {
if (!rbwDir.delete()) {
throw new IOException("Failed to delete " + rbwDir);
}
if (!DatanodeUtil.dirNoFilesRecursive(finalizedDir) ||
!FileUtil.fullyDelete(finalizedDir)) {
throw new IOException("Failed to delete " + finalizedDir);
}
if (lazypersistDir.exists() &&
((!DatanodeUtil.dirNoFilesRecursive(lazypersistDir) ||
!FileUtil.fullyDelete(lazypersistDir)))) {
throw new IOException("Failed to delete " + lazypersistDir);
}
FileUtil.fullyDelete(tmpDir);
for (File f : FileUtil.listFiles(bpCurrentDir)) {
if (!f.delete()) {
throw new IOException("Failed to delete " + f);
}
}
if (!bpCurrentDir.delete()) {
throw new IOException("Failed to delete " + bpCurrentDir);
}
for (File f : FileUtil.listFiles(bpDir)) {
if (!f.delete()) {
throw new IOException("Failed to delete " + f);
}
}
if (!bpDir.delete()) {
throw new IOException("Failed to delete " + bpDir);
}
}
}
@Override
public String getStorageID() {
return storageID;
}
@Override
public StorageType getStorageType() {
return storageType;
}
DatanodeStorage toDatanodeStorage() {
return new DatanodeStorage(storageID, DatanodeStorage.State.NORMAL, storageType);
}
}
| 28,467 | 29.252922 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/SimpleHttpProxyHandler.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode.web;
import static io.netty.handler.codec.http.HttpHeaderNames.CONNECTION;
import static io.netty.handler.codec.http.HttpResponseStatus.INTERNAL_SERVER_ERROR;
import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1;
import io.netty.bootstrap.Bootstrap;
import io.netty.buffer.Unpooled;
import io.netty.channel.Channel;
import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelFutureListener;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelInboundHandlerAdapter;
import io.netty.channel.ChannelInitializer;
import io.netty.channel.ChannelPipeline;
import io.netty.channel.SimpleChannelInboundHandler;
import io.netty.channel.socket.SocketChannel;
import io.netty.channel.socket.nio.NioSocketChannel;
import io.netty.handler.codec.http.DefaultFullHttpRequest;
import io.netty.handler.codec.http.DefaultHttpResponse;
import io.netty.handler.codec.http.HttpHeaderValues;
import io.netty.handler.codec.http.HttpRequest;
import io.netty.handler.codec.http.HttpRequestEncoder;
import io.netty.handler.codec.http.HttpResponseEncoder;
import java.net.InetSocketAddress;
import org.apache.commons.logging.Log;
/**
* Dead simple session-layer HTTP proxy. It gets the HTTP responses
* inside the context, assuming that the remote peer is reasonable fast and
* the response is small. The upper layer should be filtering out malicious
* inputs.
*/
class SimpleHttpProxyHandler extends SimpleChannelInboundHandler<HttpRequest> {
private String uri;
private Channel proxiedChannel;
private final InetSocketAddress host;
static final Log LOG = DatanodeHttpServer.LOG;
SimpleHttpProxyHandler(InetSocketAddress host) {
this.host = host;
}
private static class Forwarder extends ChannelInboundHandlerAdapter {
private final String uri;
private final Channel client;
private Forwarder(String uri, Channel client) {
this.uri = uri;
this.client = client;
}
@Override
public void channelInactive(ChannelHandlerContext ctx) {
closeOnFlush(client);
}
@Override
public void channelRead(final ChannelHandlerContext ctx, Object msg) {
client.writeAndFlush(msg).addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture future) {
if (future.isSuccess()) {
ctx.channel().read();
} else {
LOG.debug("Proxy failed. Cause: ", future.cause());
future.channel().close();
}
}
});
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
LOG.debug("Proxy for " + uri + " failed. cause: ", cause);
closeOnFlush(ctx.channel());
}
}
@Override
public void channelRead0
(final ChannelHandlerContext ctx, final HttpRequest req) {
uri = req.uri();
final Channel client = ctx.channel();
Bootstrap proxiedServer = new Bootstrap()
.group(client.eventLoop())
.channel(NioSocketChannel.class)
.handler(new ChannelInitializer<SocketChannel>() {
@Override
protected void initChannel(SocketChannel ch) throws Exception {
ChannelPipeline p = ch.pipeline();
p.addLast(new HttpRequestEncoder(), new Forwarder(uri, client));
}
});
ChannelFuture f = proxiedServer.connect(host);
proxiedChannel = f.channel();
f.addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture future) throws Exception {
if (future.isSuccess()) {
ctx.channel().pipeline().remove(HttpResponseEncoder.class);
HttpRequest newReq = new DefaultFullHttpRequest(HTTP_1_1,
req.method(), req.uri());
newReq.headers().add(req.headers());
newReq.headers().set(CONNECTION, HttpHeaderValues.CLOSE);
future.channel().writeAndFlush(newReq);
} else {
DefaultHttpResponse resp = new DefaultHttpResponse(HTTP_1_1,
INTERNAL_SERVER_ERROR);
resp.headers().set(CONNECTION, HttpHeaderValues.CLOSE);
LOG.info("Proxy " + uri + " failed. Cause: ", future.cause());
ctx.writeAndFlush(resp).addListener(ChannelFutureListener.CLOSE);
client.close();
}
}
});
}
@Override
public void channelInactive(ChannelHandlerContext ctx) {
if (proxiedChannel != null) {
proxiedChannel.close();
proxiedChannel = null;
}
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
LOG.info("Proxy for " + uri + " failed. cause: ", cause);
if (proxiedChannel != null) {
proxiedChannel.close();
proxiedChannel = null;
}
ctx.close();
}
private static void closeOnFlush(Channel ch) {
if (ch.isActive()) {
ch.writeAndFlush(Unpooled.EMPTY_BUFFER)
.addListener(ChannelFutureListener.CLOSE);
}
}
}
| 5,780 | 34.685185 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/URLDispatcher.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode.web;
import static org.apache.hadoop.hdfs.server.datanode.web.webhdfs.WebHdfsHandler.WEBHDFS_PREFIX;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelPipeline;
import io.netty.channel.SimpleChannelInboundHandler;
import io.netty.handler.codec.http.HttpRequest;
import java.net.InetSocketAddress;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.server.datanode.web.webhdfs.WebHdfsHandler;
class URLDispatcher extends SimpleChannelInboundHandler<HttpRequest> {
private final InetSocketAddress proxyHost;
private final Configuration conf;
private final Configuration confForCreate;
URLDispatcher(InetSocketAddress proxyHost, Configuration conf,
Configuration confForCreate) {
this.proxyHost = proxyHost;
this.conf = conf;
this.confForCreate = confForCreate;
}
@Override
protected void channelRead0(ChannelHandlerContext ctx, HttpRequest req)
throws Exception {
String uri = req.uri();
ChannelPipeline p = ctx.pipeline();
if (uri.startsWith(WEBHDFS_PREFIX)) {
WebHdfsHandler h = new WebHdfsHandler(conf, confForCreate);
p.replace(this, WebHdfsHandler.class.getSimpleName(), h);
h.channelRead0(ctx, req);
} else {
SimpleHttpProxyHandler h = new SimpleHttpProxyHandler(proxyHost);
p.replace(this, SimpleHttpProxyHandler.class.getSimpleName(), h);
h.channelRead0(ctx, req);
}
}
}
| 2,293 | 37.881356 | 95 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/PortUnificationServerHandler.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode.web;
import java.net.InetSocketAddress;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.server.datanode.web.dtp.DtpHttp2Handler;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.ByteBufUtil;
import io.netty.channel.ChannelHandlerContext;
import io.netty.handler.codec.ByteToMessageDecoder;
import io.netty.handler.codec.http.HttpServerCodec;
import io.netty.handler.codec.http2.Http2CodecUtil;
import io.netty.handler.stream.ChunkedWriteHandler;
/**
* A port unification handler to support HTTP/1.1 and HTTP/2 on the same port.
*/
@InterfaceAudience.Private
public class PortUnificationServerHandler extends ByteToMessageDecoder {
private static final ByteBuf HTTP2_CLIENT_CONNECTION_PREFACE = Http2CodecUtil
.connectionPrefaceBuf();
// we only want to support HTTP/1.1 and HTTP/2, so the first 3 bytes is
// enough. No HTTP/1.1 request could start with "PRI"
private static final int MAGIC_HEADER_LENGTH = 3;
private final InetSocketAddress proxyHost;
private final Configuration conf;
private final Configuration confForCreate;
public PortUnificationServerHandler(InetSocketAddress proxyHost,
Configuration conf, Configuration confForCreate) {
this.proxyHost = proxyHost;
this.conf = conf;
this.confForCreate = confForCreate;
}
private void configureHttp1(ChannelHandlerContext ctx) {
ctx.pipeline().addLast(new HttpServerCodec(), new ChunkedWriteHandler(),
new URLDispatcher(proxyHost, conf, confForCreate));
}
private void configureHttp2(ChannelHandlerContext ctx) {
ctx.pipeline().addLast(new DtpHttp2Handler());
}
@Override
protected void decode(ChannelHandlerContext ctx, ByteBuf in,
List<Object> out) throws Exception {
if (in.readableBytes() < MAGIC_HEADER_LENGTH) {
return;
}
if (ByteBufUtil.equals(in, 0, HTTP2_CLIENT_CONNECTION_PREFACE, 0,
MAGIC_HEADER_LENGTH)) {
configureHttp2(ctx);
} else {
configureHttp1(ctx);
}
ctx.pipeline().remove(this);
}
}
| 2,975 | 33.604651 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode.web;
import io.netty.bootstrap.ServerBootstrap;
import io.netty.channel.ChannelFactory;
import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelInitializer;
import io.netty.channel.ChannelPipeline;
import io.netty.channel.EventLoopGroup;
import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.channel.socket.SocketChannel;
import io.netty.channel.socket.nio.NioServerSocketChannel;
import io.netty.handler.codec.http.HttpRequestDecoder;
import io.netty.handler.codec.http.HttpResponseEncoder;
import io.netty.handler.ssl.SslHandler;
import io.netty.handler.stream.ChunkedWriteHandler;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.hdfs.server.datanode.BlockScanner;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.namenode.FileChecksumServlets;
import org.apache.hadoop.hdfs.server.namenode.StreamFile;
import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.http.HttpServer2;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.security.ssl.SSLFactory;
import java.io.Closeable;
import java.io.IOException;
import java.net.BindException;
import java.net.InetSocketAddress;
import java.net.SocketAddress;
import java.net.SocketException;
import java.net.URI;
import java.nio.channels.ServerSocketChannel;
import java.security.GeneralSecurityException;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ADMIN;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY;
public class DatanodeHttpServer implements Closeable {
private final HttpServer2 infoServer;
private final EventLoopGroup bossGroup;
private final EventLoopGroup workerGroup;
private final ServerSocketChannel externalHttpChannel;
private final ServerBootstrap httpServer;
private final SSLFactory sslFactory;
private final ServerBootstrap httpsServer;
private final Configuration conf;
private final Configuration confForCreate;
private InetSocketAddress httpAddress;
private InetSocketAddress httpsAddress;
static final Log LOG = LogFactory.getLog(DatanodeHttpServer.class);
public DatanodeHttpServer(final Configuration conf,
final DataNode datanode,
final ServerSocketChannel externalHttpChannel)
throws IOException {
this.conf = conf;
Configuration confForInfoServer = new Configuration(conf);
confForInfoServer.setInt(HttpServer2.HTTP_MAX_THREADS, 10);
HttpServer2.Builder builder = new HttpServer2.Builder()
.setName("datanode")
.setConf(confForInfoServer)
.setACL(new AccessControlList(conf.get(DFS_ADMIN, " ")))
.hostName(getHostnameForSpnegoPrincipal(confForInfoServer))
.addEndpoint(URI.create("http://localhost:0"))
.setFindPort(true);
this.infoServer = builder.build();
this.infoServer.addInternalServlet(null, "/streamFile/*", StreamFile.class);
this.infoServer.addInternalServlet(null, "/getFileChecksum/*",
FileChecksumServlets.GetServlet.class);
this.infoServer.setAttribute("datanode", datanode);
this.infoServer.setAttribute(JspHelper.CURRENT_CONF, conf);
this.infoServer.addServlet(null, "/blockScannerReport",
BlockScanner.Servlet.class);
this.infoServer.start();
final InetSocketAddress jettyAddr = infoServer.getConnectorAddress(0);
this.confForCreate = new Configuration(conf);
confForCreate.set(FsPermission.UMASK_LABEL, "000");
this.bossGroup = new NioEventLoopGroup();
this.workerGroup = new NioEventLoopGroup();
this.externalHttpChannel = externalHttpChannel;
HttpConfig.Policy policy = DFSUtil.getHttpPolicy(conf);
if (policy.isHttpEnabled()) {
this.httpServer = new ServerBootstrap().group(bossGroup, workerGroup)
.childHandler(new ChannelInitializer<SocketChannel>() {
@Override
protected void initChannel(SocketChannel ch) throws Exception {
ch.pipeline().addLast(new PortUnificationServerHandler(jettyAddr,
conf, confForCreate));
}
});
if (externalHttpChannel == null) {
httpServer.channel(NioServerSocketChannel.class);
} else {
httpServer.channelFactory(new ChannelFactory<NioServerSocketChannel>() {
@Override
public NioServerSocketChannel newChannel() {
return new NioServerSocketChannel(externalHttpChannel) {
// The channel has been bounded externally via JSVC,
// thus bind() becomes a no-op.
@Override
protected void doBind(SocketAddress localAddress) throws Exception {}
};
}
});
}
} else {
this.httpServer = null;
}
if (policy.isHttpsEnabled()) {
this.sslFactory = new SSLFactory(SSLFactory.Mode.SERVER, conf);
try {
sslFactory.init();
} catch (GeneralSecurityException e) {
throw new IOException(e);
}
this.httpsServer = new ServerBootstrap().group(bossGroup, workerGroup)
.channel(NioServerSocketChannel.class)
.childHandler(new ChannelInitializer<SocketChannel>() {
@Override
protected void initChannel(SocketChannel ch) throws Exception {
ChannelPipeline p = ch.pipeline();
p.addLast(
new SslHandler(sslFactory.createSSLEngine()),
new HttpRequestDecoder(),
new HttpResponseEncoder(),
new ChunkedWriteHandler(),
new URLDispatcher(jettyAddr, conf, confForCreate));
}
});
} else {
this.httpsServer = null;
this.sslFactory = null;
}
}
public InetSocketAddress getHttpAddress() {
return httpAddress;
}
public InetSocketAddress getHttpsAddress() {
return httpsAddress;
}
public void start() throws IOException {
if (httpServer != null) {
InetSocketAddress infoAddr = DataNode.getInfoAddr(conf);
ChannelFuture f = httpServer.bind(infoAddr);
try {
f.syncUninterruptibly();
} catch (Throwable e) {
if (e instanceof BindException) {
throw NetUtils.wrapException(null, 0, infoAddr.getHostName(),
infoAddr.getPort(), (SocketException) e);
} else {
throw e;
}
}
httpAddress = (InetSocketAddress) f.channel().localAddress();
LOG.info("Listening HTTP traffic on " + httpAddress);
}
if (httpsServer != null) {
InetSocketAddress secInfoSocAddr =
NetUtils.createSocketAddr(conf.getTrimmed(
DFS_DATANODE_HTTPS_ADDRESS_KEY,
DFS_DATANODE_HTTPS_ADDRESS_DEFAULT));
ChannelFuture f = httpsServer.bind(secInfoSocAddr);
try {
f.syncUninterruptibly();
} catch (Throwable e) {
if (e instanceof BindException) {
throw NetUtils.wrapException(null, 0, secInfoSocAddr.getHostName(),
secInfoSocAddr.getPort(), (SocketException) e);
} else {
throw e;
}
}
httpsAddress = (InetSocketAddress) f.channel().localAddress();
LOG.info("Listening HTTPS traffic on " + httpsAddress);
}
}
@Override
public void close() throws IOException {
bossGroup.shutdownGracefully();
workerGroup.shutdownGracefully();
if (sslFactory != null) {
sslFactory.destroy();
}
if (externalHttpChannel != null) {
externalHttpChannel.close();
}
try {
infoServer.stop();
} catch (Exception e) {
throw new IOException(e);
}
}
private static String getHostnameForSpnegoPrincipal(Configuration conf) {
String addr = conf.getTrimmed(DFS_DATANODE_HTTP_ADDRESS_KEY, null);
if (addr == null) {
addr = conf.getTrimmed(DFS_DATANODE_HTTPS_ADDRESS_KEY,
DFS_DATANODE_HTTPS_ADDRESS_DEFAULT);
}
InetSocketAddress inetSocker = NetUtils.createSocketAddr(addr);
return inetSocker.getHostString();
}
}
| 9,286 | 36.59919 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/dtp/DtpHttp2FrameListener.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode.web.dtp;
import io.netty.channel.ChannelHandlerContext;
import io.netty.handler.codec.http.HttpResponseStatus;
import io.netty.handler.codec.http2.DefaultHttp2Headers;
import io.netty.handler.codec.http2.Http2ConnectionEncoder;
import io.netty.handler.codec.http2.Http2Exception;
import io.netty.handler.codec.http2.Http2FrameAdapter;
import io.netty.handler.codec.http2.Http2Headers;
import java.nio.charset.StandardCharsets;
class DtpHttp2FrameListener extends Http2FrameAdapter {
private Http2ConnectionEncoder encoder;
public void encoder(Http2ConnectionEncoder encoder) {
this.encoder = encoder;
}
@Override
public void onHeadersRead(ChannelHandlerContext ctx, int streamId,
Http2Headers headers, int streamDependency, short weight,
boolean exclusive, int padding, boolean endStream) throws Http2Exception {
encoder.writeHeaders(ctx, streamId,
new DefaultHttp2Headers().status(HttpResponseStatus.OK.codeAsText()), 0,
false, ctx.newPromise());
encoder.writeData(
ctx,
streamId,
ctx.alloc().buffer()
.writeBytes("HTTP/2 DTP".getBytes(StandardCharsets.UTF_8)), 0, true,
ctx.newPromise());
}
}
| 2,037 | 37.45283 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/dtp/DtpHttp2Handler.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode.web.dtp;
import org.apache.hadoop.classification.InterfaceAudience;
import io.netty.handler.codec.http2.Http2ConnectionHandler;
/**
* The HTTP/2 handler.
*/
@InterfaceAudience.Private
public class DtpHttp2Handler extends Http2ConnectionHandler {
public DtpHttp2Handler() {
super(true, new DtpHttp2FrameListener());
((DtpHttp2FrameListener) decoder().listener()).encoder(encoder());
}
}
| 1,258 | 34.971429 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ExceptionHandler.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode.web.webhdfs;
import static io.netty.handler.codec.http.HttpHeaderNames.CONTENT_LENGTH;
import static io.netty.handler.codec.http.HttpHeaderNames.CONTENT_TYPE;
import static io.netty.handler.codec.http.HttpResponseStatus.BAD_REQUEST;
import static io.netty.handler.codec.http.HttpResponseStatus.FORBIDDEN;
import static io.netty.handler.codec.http.HttpResponseStatus.INTERNAL_SERVER_ERROR;
import static io.netty.handler.codec.http.HttpResponseStatus.NOT_FOUND;
import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1;
import static org.apache.hadoop.hdfs.server.datanode.web.webhdfs.WebHdfsHandler.APPLICATION_JSON_UTF8;
import io.netty.buffer.Unpooled;
import io.netty.handler.codec.http.DefaultFullHttpResponse;
import io.netty.handler.codec.http.HttpResponseStatus;
import java.io.FileNotFoundException;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.hadoop.hdfs.web.JsonUtil;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.ipc.StandbyException;
import org.apache.hadoop.security.authorize.AuthorizationException;
import org.apache.hadoop.security.token.SecretManager;
import com.google.common.base.Charsets;
import com.sun.jersey.api.ParamException;
import com.sun.jersey.api.container.ContainerException;
class ExceptionHandler {
static Log LOG = WebHdfsHandler.LOG;
static DefaultFullHttpResponse exceptionCaught(Throwable cause) {
Exception e = cause instanceof Exception ? (Exception) cause : new Exception(cause);
if (LOG.isTraceEnabled()) {
LOG.trace("GOT EXCEPTION", e);
}
//Convert exception
if (e instanceof ParamException) {
final ParamException paramexception = (ParamException)e;
e = new IllegalArgumentException("Invalid value for webhdfs parameter \""
+ paramexception.getParameterName() + "\": "
+ e.getCause().getMessage(), e);
} else if (e instanceof ContainerException || e instanceof SecurityException) {
e = toCause(e);
} else if (e instanceof RemoteException) {
e = ((RemoteException)e).unwrapRemoteException();
}
//Map response status
final HttpResponseStatus s;
if (e instanceof SecurityException) {
s = FORBIDDEN;
} else if (e instanceof AuthorizationException) {
s = FORBIDDEN;
} else if (e instanceof FileNotFoundException) {
s = NOT_FOUND;
} else if (e instanceof IOException) {
s = FORBIDDEN;
} else if (e instanceof UnsupportedOperationException) {
s = BAD_REQUEST;
} else if (e instanceof IllegalArgumentException) {
s = BAD_REQUEST;
} else {
LOG.warn("INTERNAL_SERVER_ERROR", e);
s = INTERNAL_SERVER_ERROR;
}
final byte[] js = JsonUtil.toJsonString(e).getBytes(Charsets.UTF_8);
DefaultFullHttpResponse resp =
new DefaultFullHttpResponse(HTTP_1_1, s, Unpooled.wrappedBuffer(js));
resp.headers().set(CONTENT_TYPE, APPLICATION_JSON_UTF8);
resp.headers().set(CONTENT_LENGTH, js.length);
return resp;
}
private static Exception toCause(Exception e) {
final Throwable t = e.getCause();
if (e instanceof SecurityException) {
// For the issue reported in HDFS-6475, if SecurityException's cause
// is InvalidToken, and the InvalidToken's cause is StandbyException,
// return StandbyException; Otherwise, leave the exception as is,
// since they are handled elsewhere. See HDFS-6588.
if (t != null && t instanceof SecretManager.InvalidToken) {
final Throwable t1 = t.getCause();
if (t1 != null && t1 instanceof StandbyException) {
e = (StandbyException)t1;
}
}
} else {
if (t != null && t instanceof Exception) {
e = (Exception)t;
}
}
return e;
}
}
| 4,689 | 38.745763 | 102 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/HdfsWriter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode.web.webhdfs;
import static io.netty.handler.codec.http.HttpHeaderNames.CONNECTION;
import static io.netty.handler.codec.http.HttpHeaderValues.CLOSE;
import io.netty.channel.ChannelFutureListener;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.SimpleChannelInboundHandler;
import io.netty.handler.codec.http.DefaultHttpResponse;
import io.netty.handler.codec.http.HttpContent;
import io.netty.handler.codec.http.LastHttpContent;
import java.io.IOException;
import java.io.OutputStream;
import org.apache.commons.logging.Log;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.io.IOUtils;
class HdfsWriter extends SimpleChannelInboundHandler<HttpContent> {
private final DFSClient client;
private final OutputStream out;
private final DefaultHttpResponse response;
private static final Log LOG = WebHdfsHandler.LOG;
HdfsWriter(DFSClient client, OutputStream out, DefaultHttpResponse response) {
this.client = client;
this.out = out;
this.response = response;
}
@Override
public void channelReadComplete(ChannelHandlerContext ctx) throws Exception {
ctx.flush();
}
@Override
protected void channelRead0(ChannelHandlerContext ctx, HttpContent chunk)
throws IOException {
chunk.content().readBytes(out, chunk.content().readableBytes());
if (chunk instanceof LastHttpContent) {
response.headers().set(CONNECTION, CLOSE);
ctx.write(response).addListener(ChannelFutureListener.CLOSE);
releaseDfsResources();
}
}
@Override
public void channelInactive(ChannelHandlerContext ctx) {
releaseDfsResources();
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
releaseDfsResources();
DefaultHttpResponse resp = ExceptionHandler.exceptionCaught(cause);
resp.headers().set(CONNECTION, CLOSE);
ctx.writeAndFlush(response).addListener(ChannelFutureListener.CLOSE);
}
private void releaseDfsResources() {
IOUtils.cleanup(LOG, out);
IOUtils.cleanup(LOG, client);
}
}
| 2,902 | 33.975904 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode.web.webhdfs;
import static io.netty.handler.codec.http.HttpHeaderNames.ACCESS_CONTROL_ALLOW_METHODS;
import static io.netty.handler.codec.http.HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN;
import static io.netty.handler.codec.http.HttpHeaderNames.CONNECTION;
import static io.netty.handler.codec.http.HttpHeaderNames.CONTENT_LENGTH;
import static io.netty.handler.codec.http.HttpHeaderNames.CONTENT_TYPE;
import static io.netty.handler.codec.http.HttpHeaderNames.LOCATION;
import static io.netty.handler.codec.http.HttpHeaderValues.CLOSE;
import static io.netty.handler.codec.http.HttpMethod.GET;
import static io.netty.handler.codec.http.HttpMethod.POST;
import static io.netty.handler.codec.http.HttpMethod.PUT;
import static io.netty.handler.codec.http.HttpResponseStatus.CONTINUE;
import static io.netty.handler.codec.http.HttpResponseStatus.CREATED;
import static io.netty.handler.codec.http.HttpResponseStatus.OK;
import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1;
import static org.apache.hadoop.hdfs.protocol.HdfsConstants.HDFS_URI_SCHEME;
import static org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier.HDFS_DELEGATION_KIND;
import io.netty.buffer.Unpooled;
import io.netty.channel.ChannelFutureListener;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.SimpleChannelInboundHandler;
import io.netty.handler.codec.http.DefaultFullHttpResponse;
import io.netty.handler.codec.http.DefaultHttpResponse;
import io.netty.handler.codec.http.HttpHeaders;
import io.netty.handler.codec.http.HttpMethod;
import io.netty.handler.codec.http.HttpRequest;
import io.netty.handler.codec.http.QueryStringDecoder;
import io.netty.handler.stream.ChunkedStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.URI;
import java.net.URISyntaxException;
import java.security.PrivilegedExceptionAction;
import java.util.EnumSet;
import org.apache.commons.io.Charsets;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.web.JsonUtil;
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
import org.apache.hadoop.hdfs.web.resources.GetOpParam;
import org.apache.hadoop.hdfs.web.resources.PostOpParam;
import org.apache.hadoop.hdfs.web.resources.PutOpParam;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.LimitInputStream;
import com.google.common.base.Preconditions;
public class WebHdfsHandler extends SimpleChannelInboundHandler<HttpRequest> {
static final Log LOG = LogFactory.getLog(WebHdfsHandler.class);
public static final String WEBHDFS_PREFIX = WebHdfsFileSystem.PATH_PREFIX;
public static final int WEBHDFS_PREFIX_LENGTH = WEBHDFS_PREFIX.length();
public static final String APPLICATION_OCTET_STREAM =
"application/octet-stream";
public static final String APPLICATION_JSON_UTF8 =
"application/json; charset=utf-8";
private final Configuration conf;
private final Configuration confForCreate;
private String path;
private ParameterParser params;
private UserGroupInformation ugi;
public WebHdfsHandler(Configuration conf, Configuration confForCreate)
throws IOException {
this.conf = conf;
this.confForCreate = confForCreate;
}
@Override
public void channelRead0(final ChannelHandlerContext ctx,
final HttpRequest req) throws Exception {
Preconditions.checkArgument(req.uri().startsWith(WEBHDFS_PREFIX));
QueryStringDecoder queryString = new QueryStringDecoder(req.uri());
params = new ParameterParser(queryString, conf);
DataNodeUGIProvider ugiProvider = new DataNodeUGIProvider(params);
ugi = ugiProvider.ugi();
path = params.path();
injectToken();
ugi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
handle(ctx, req);
return null;
}
});
}
public void handle(ChannelHandlerContext ctx, HttpRequest req)
throws IOException, URISyntaxException {
String op = params.op();
HttpMethod method = req.method();
if (PutOpParam.Op.CREATE.name().equalsIgnoreCase(op)
&& method == PUT) {
onCreate(ctx);
} else if (PostOpParam.Op.APPEND.name().equalsIgnoreCase(op)
&& method == POST) {
onAppend(ctx);
} else if (GetOpParam.Op.OPEN.name().equalsIgnoreCase(op)
&& method == GET) {
onOpen(ctx);
} else if(GetOpParam.Op.GETFILECHECKSUM.name().equalsIgnoreCase(op)
&& method == GET) {
onGetFileChecksum(ctx);
} else {
throw new IllegalArgumentException("Invalid operation " + op);
}
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
LOG.debug("Error ", cause);
DefaultHttpResponse resp = ExceptionHandler.exceptionCaught(cause);
resp.headers().set(CONNECTION, CLOSE);
ctx.writeAndFlush(resp).addListener(ChannelFutureListener.CLOSE);
}
private void onCreate(ChannelHandlerContext ctx)
throws IOException, URISyntaxException {
writeContinueHeader(ctx);
final String nnId = params.namenodeId();
final int bufferSize = params.bufferSize();
final short replication = params.replication();
final long blockSize = params.blockSize();
final FsPermission permission = params.permission();
EnumSet<CreateFlag> flags = params.overwrite() ?
EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)
: EnumSet.of(CreateFlag.CREATE);
final DFSClient dfsClient = newDfsClient(nnId, confForCreate);
OutputStream out = dfsClient.createWrappedOutputStream(dfsClient.create(
path, permission, flags, replication,
blockSize, null, bufferSize, null), null);
DefaultHttpResponse resp = new DefaultHttpResponse(HTTP_1_1, CREATED);
final URI uri = new URI(HDFS_URI_SCHEME, nnId, path, null, null);
resp.headers().set(LOCATION, uri.toString());
resp.headers().set(CONTENT_LENGTH, 0);
ctx.pipeline().replace(this, HdfsWriter.class.getSimpleName(),
new HdfsWriter(dfsClient, out, resp));
}
private void onAppend(ChannelHandlerContext ctx) throws IOException {
writeContinueHeader(ctx);
final String nnId = params.namenodeId();
final int bufferSize = params.bufferSize();
DFSClient dfsClient = newDfsClient(nnId, conf);
OutputStream out = dfsClient.append(path, bufferSize,
EnumSet.of(CreateFlag.APPEND), null, null);
DefaultHttpResponse resp = new DefaultHttpResponse(HTTP_1_1, OK);
resp.headers().set(CONTENT_LENGTH, 0);
ctx.pipeline().replace(this, HdfsWriter.class.getSimpleName(),
new HdfsWriter(dfsClient, out, resp));
}
private void onOpen(ChannelHandlerContext ctx) throws IOException {
final String nnId = params.namenodeId();
final int bufferSize = params.bufferSize();
final long offset = params.offset();
final long length = params.length();
DefaultHttpResponse response = new DefaultHttpResponse(HTTP_1_1, OK);
HttpHeaders headers = response.headers();
// Allow the UI to access the file
headers.set(ACCESS_CONTROL_ALLOW_METHODS, GET);
headers.set(ACCESS_CONTROL_ALLOW_ORIGIN, "*");
headers.set(CONTENT_TYPE, APPLICATION_OCTET_STREAM);
headers.set(CONNECTION, CLOSE);
final DFSClient dfsclient = newDfsClient(nnId, conf);
HdfsDataInputStream in = dfsclient.createWrappedInputStream(
dfsclient.open(path, bufferSize, true));
in.seek(offset);
long contentLength = in.getVisibleLength() - offset;
if (length >= 0) {
contentLength = Math.min(contentLength, length);
}
final InputStream data;
if (contentLength >= 0) {
headers.set(CONTENT_LENGTH, contentLength);
data = new LimitInputStream(in, contentLength);
} else {
data = in;
}
ctx.write(response);
ctx.writeAndFlush(new ChunkedStream(data) {
@Override
public void close() throws Exception {
super.close();
dfsclient.close();
}
}).addListener(ChannelFutureListener.CLOSE);
}
private void onGetFileChecksum(ChannelHandlerContext ctx) throws IOException {
MD5MD5CRC32FileChecksum checksum = null;
final String nnId = params.namenodeId();
DFSClient dfsclient = newDfsClient(nnId, conf);
try {
checksum = dfsclient.getFileChecksum(path, Long.MAX_VALUE);
dfsclient.close();
dfsclient = null;
} finally {
IOUtils.cleanup(LOG, dfsclient);
}
final byte[] js = JsonUtil.toJsonString(checksum).getBytes(Charsets.UTF_8);
DefaultFullHttpResponse resp =
new DefaultFullHttpResponse(HTTP_1_1, OK, Unpooled.wrappedBuffer(js));
resp.headers().set(CONTENT_TYPE, APPLICATION_JSON_UTF8);
resp.headers().set(CONTENT_LENGTH, js.length);
resp.headers().set(CONNECTION, CLOSE);
ctx.writeAndFlush(resp).addListener(ChannelFutureListener.CLOSE);
}
private static void writeContinueHeader(ChannelHandlerContext ctx) {
DefaultHttpResponse r = new DefaultFullHttpResponse(HTTP_1_1, CONTINUE,
Unpooled.EMPTY_BUFFER);
ctx.writeAndFlush(r);
}
private static DFSClient newDfsClient
(String nnId, Configuration conf) throws IOException {
URI uri = URI.create(HDFS_URI_SCHEME + "://" + nnId);
return new DFSClient(uri, conf);
}
private void injectToken() throws IOException {
if (UserGroupInformation.isSecurityEnabled()) {
Token<DelegationTokenIdentifier> token = params.delegationToken();
token.setKind(HDFS_DELEGATION_KIND);
ugi.addToken(token);
}
}
}
| 10,885 | 39.022059 | 110 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/DataNodeUGIProvider.java
|
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
package org.apache.hadoop.hdfs.server.datanode.web.webhdfs;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.IOException;
/**
* Create UGI from the request for the WebHDFS requests for the DNs. Note that
* the DN does not authenticate the UGI -- the NN will authenticate them in
* subsequent operations.
*/
class DataNodeUGIProvider {
private final ParameterParser params;
DataNodeUGIProvider(ParameterParser params) {
this.params = params;
}
UserGroupInformation ugi() throws IOException {
if (UserGroupInformation.isSecurityEnabled()) {
return tokenUGI();
}
final String usernameFromQuery = params.userName();
final String doAsUserFromQuery = params.doAsUser();
final String remoteUser = usernameFromQuery == null
? JspHelper.getDefaultWebUserName(params.conf()) // not specified in
// request
: usernameFromQuery;
UserGroupInformation ugi = UserGroupInformation.createRemoteUser(remoteUser);
JspHelper.checkUsername(ugi.getShortUserName(), usernameFromQuery);
if (doAsUserFromQuery != null) {
// create and attempt to authorize a proxy user
ugi = UserGroupInformation.createProxyUser(doAsUserFromQuery, ugi);
}
return ugi;
}
private UserGroupInformation tokenUGI() throws IOException {
Token<DelegationTokenIdentifier> token = params.delegationToken();
ByteArrayInputStream buf =
new ByteArrayInputStream(token.getIdentifier());
DataInputStream in = new DataInputStream(buf);
DelegationTokenIdentifier id = new DelegationTokenIdentifier();
id.readFields(in);
UserGroupInformation ugi = id.getUser();
ugi.addToken(token);
return ugi;
}
}
| 2,559 | 35.056338 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ParameterParser.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode.web.webhdfs;
import io.netty.handler.codec.http.QueryStringDecoder;
import org.apache.commons.io.Charsets;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.HAUtilClient;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.web.resources.BlockSizeParam;
import org.apache.hadoop.hdfs.web.resources.BufferSizeParam;
import org.apache.hadoop.hdfs.web.resources.DelegationParam;
import org.apache.hadoop.hdfs.web.resources.DoAsParam;
import org.apache.hadoop.hdfs.web.resources.HttpOpParam;
import org.apache.hadoop.hdfs.web.resources.LengthParam;
import org.apache.hadoop.hdfs.web.resources.NamenodeAddressParam;
import org.apache.hadoop.hdfs.web.resources.OffsetParam;
import org.apache.hadoop.hdfs.web.resources.OverwriteParam;
import org.apache.hadoop.hdfs.web.resources.PermissionParam;
import org.apache.hadoop.hdfs.web.resources.ReplicationParam;
import org.apache.hadoop.hdfs.web.resources.UserParam;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.token.Token;
import java.io.IOException;
import java.net.URI;
import java.nio.charset.Charset;
import java.util.List;
import java.util.Map;
import static org.apache.hadoop.hdfs.protocol.HdfsConstants.HDFS_URI_SCHEME;
import static org.apache.hadoop.hdfs.server.datanode.web.webhdfs.WebHdfsHandler.WEBHDFS_PREFIX_LENGTH;
class ParameterParser {
private final Configuration conf;
private final String path;
private final Map<String, List<String>> params;
ParameterParser(QueryStringDecoder decoder, Configuration conf) {
this.path = decodeComponent(decoder.path().substring
(WEBHDFS_PREFIX_LENGTH), Charsets.UTF_8);
this.params = decoder.parameters();
this.conf = conf;
}
String path() { return path; }
String op() {
return param(HttpOpParam.NAME);
}
long offset() {
return new OffsetParam(param(OffsetParam.NAME)).getOffset();
}
long length() {
return new LengthParam(param(LengthParam.NAME)).getLength();
}
String namenodeId() {
return new NamenodeAddressParam(param(NamenodeAddressParam.NAME))
.getValue();
}
String doAsUser() {
return new DoAsParam(param(DoAsParam.NAME)).getValue();
}
String userName() {
return new UserParam(param(UserParam.NAME)).getValue();
}
int bufferSize() {
return new BufferSizeParam(param(BufferSizeParam.NAME)).getValue(conf);
}
long blockSize() {
return new BlockSizeParam(param(BlockSizeParam.NAME)).getValue(conf);
}
short replication() {
return new ReplicationParam(param(ReplicationParam.NAME)).getValue(conf);
}
FsPermission permission() {
return new PermissionParam(param(PermissionParam.NAME)).getFsPermission();
}
boolean overwrite() {
return new OverwriteParam(param(OverwriteParam.NAME)).getValue();
}
Token<DelegationTokenIdentifier> delegationToken() throws IOException {
String delegation = param(DelegationParam.NAME);
final Token<DelegationTokenIdentifier> token = new
Token<DelegationTokenIdentifier>();
token.decodeFromUrlString(delegation);
URI nnUri = URI.create(HDFS_URI_SCHEME + "://" + namenodeId());
boolean isLogical = HAUtilClient.isLogicalUri(conf, nnUri);
if (isLogical) {
token.setService(
HAUtilClient.buildTokenServiceForLogicalUri(nnUri, HDFS_URI_SCHEME));
} else {
token.setService(SecurityUtil.buildTokenService(nnUri));
}
return token;
}
Configuration conf() {
return conf;
}
private String param(String key) {
List<String> p = params.get(key);
return p == null ? null : p.get(0);
}
/**
* The following function behaves exactly the same as netty's
* <code>QueryStringDecoder#decodeComponent</code> except that it
* does not decode the '+' character as space. WebHDFS takes this scheme
* to maintain the backward-compatibility for pre-2.7 releases.
*/
private static String decodeComponent(final String s, final Charset charset) {
if (s == null) {
return "";
}
final int size = s.length();
boolean modified = false;
for (int i = 0; i < size; i++) {
final char c = s.charAt(i);
if (c == '%' || c == '+') {
modified = true;
break;
}
}
if (!modified) {
return s;
}
final byte[] buf = new byte[size];
int pos = 0; // position in `buf'.
for (int i = 0; i < size; i++) {
char c = s.charAt(i);
if (c == '%') {
if (i == size - 1) {
throw new IllegalArgumentException("unterminated escape sequence at" +
" end of string: " + s);
}
c = s.charAt(++i);
if (c == '%') {
buf[pos++] = '%'; // "%%" -> "%"
break;
}
if (i == size - 1) {
throw new IllegalArgumentException("partial escape sequence at end " +
"of string: " + s);
}
c = decodeHexNibble(c);
final char c2 = decodeHexNibble(s.charAt(++i));
if (c == Character.MAX_VALUE || c2 == Character.MAX_VALUE) {
throw new IllegalArgumentException(
"invalid escape sequence `%" + s.charAt(i - 1) + s.charAt(
i) + "' at index " + (i - 2) + " of: " + s);
}
c = (char) (c * 16 + c2);
// Fall through.
}
buf[pos++] = (byte) c;
}
return new String(buf, 0, pos, charset);
}
/**
* Helper to decode half of a hexadecimal number from a string.
* @param c The ASCII character of the hexadecimal number to decode.
* Must be in the range {@code [0-9a-fA-F]}.
* @return The hexadecimal value represented in the ASCII character
* given, or {@link Character#MAX_VALUE} if the character is invalid.
*/
private static char decodeHexNibble(final char c) {
if ('0' <= c && c <= '9') {
return (char) (c - '0');
} else if ('a' <= c && c <= 'f') {
return (char) (c - 'a' + 10);
} else if ('A' <= c && c <= 'F') {
return (char) (c - 'A' + 10);
} else {
return Character.MAX_VALUE;
}
}
}
| 7,074 | 33.014423 | 102 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode.metrics;
import static org.apache.hadoop.metrics2.impl.MsInfo.SessionId;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.metrics2.MetricsSystem;
import org.apache.hadoop.metrics2.annotation.Metric;
import org.apache.hadoop.metrics2.annotation.Metrics;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.metrics2.lib.MetricsRegistry;
import org.apache.hadoop.metrics2.lib.MutableCounterLong;
import org.apache.hadoop.metrics2.lib.MutableQuantiles;
import org.apache.hadoop.metrics2.lib.MutableRate;
import org.apache.hadoop.metrics2.source.JvmMetrics;
import java.util.concurrent.ThreadLocalRandom;
/**
*
* This class is for maintaining the various DataNode statistics
* and publishing them through the metrics interfaces.
* This also registers the JMX MBean for RPC.
* <p>
* This class has a number of metrics variables that are publicly accessible;
* these variables (objects) have methods to update their values;
* for example:
* <p> {@link #blocksRead}.inc()
*
*/
@InterfaceAudience.Private
@Metrics(about="DataNode metrics", context="dfs")
public class DataNodeMetrics {
@Metric MutableCounterLong bytesWritten;
@Metric("Milliseconds spent writing")
MutableCounterLong totalWriteTime;
@Metric MutableCounterLong bytesRead;
@Metric("Milliseconds spent reading")
MutableCounterLong totalReadTime;
@Metric MutableCounterLong blocksWritten;
@Metric MutableCounterLong blocksRead;
@Metric MutableCounterLong blocksReplicated;
@Metric MutableCounterLong blocksRemoved;
@Metric MutableCounterLong blocksVerified;
@Metric MutableCounterLong blockVerificationFailures;
@Metric MutableCounterLong blocksCached;
@Metric MutableCounterLong blocksUncached;
@Metric MutableCounterLong readsFromLocalClient;
@Metric MutableCounterLong readsFromRemoteClient;
@Metric MutableCounterLong writesFromLocalClient;
@Metric MutableCounterLong writesFromRemoteClient;
@Metric MutableCounterLong blocksGetLocalPathInfo;
@Metric("Bytes read by remote client")
MutableCounterLong remoteBytesRead;
@Metric("Bytes written by remote client")
MutableCounterLong remoteBytesWritten;
// RamDisk metrics on read/write
@Metric MutableCounterLong ramDiskBlocksWrite;
@Metric MutableCounterLong ramDiskBlocksWriteFallback;
@Metric MutableCounterLong ramDiskBytesWrite;
@Metric MutableCounterLong ramDiskBlocksReadHits;
// RamDisk metrics on eviction
@Metric MutableCounterLong ramDiskBlocksEvicted;
@Metric MutableCounterLong ramDiskBlocksEvictedWithoutRead;
@Metric MutableRate ramDiskBlocksEvictionWindowMs;
final MutableQuantiles[] ramDiskBlocksEvictionWindowMsQuantiles;
// RamDisk metrics on lazy persist
@Metric MutableCounterLong ramDiskBlocksLazyPersisted;
@Metric MutableCounterLong ramDiskBlocksDeletedBeforeLazyPersisted;
@Metric MutableCounterLong ramDiskBytesLazyPersisted;
@Metric MutableRate ramDiskBlocksLazyPersistWindowMs;
final MutableQuantiles[] ramDiskBlocksLazyPersistWindowMsQuantiles;
@Metric MutableCounterLong fsyncCount;
@Metric MutableCounterLong volumeFailures;
@Metric("Count of network errors on the datanode")
MutableCounterLong datanodeNetworkErrors;
@Metric MutableRate readBlockOp;
@Metric MutableRate writeBlockOp;
@Metric MutableRate blockChecksumOp;
@Metric MutableRate copyBlockOp;
@Metric MutableRate replaceBlockOp;
@Metric MutableRate heartbeats;
@Metric MutableRate blockReports;
@Metric MutableRate incrementalBlockReports;
@Metric MutableRate cacheReports;
@Metric MutableRate packetAckRoundTripTimeNanos;
final MutableQuantiles[] packetAckRoundTripTimeNanosQuantiles;
@Metric MutableRate flushNanos;
final MutableQuantiles[] flushNanosQuantiles;
@Metric MutableRate fsyncNanos;
final MutableQuantiles[] fsyncNanosQuantiles;
@Metric MutableRate sendDataPacketBlockedOnNetworkNanos;
final MutableQuantiles[] sendDataPacketBlockedOnNetworkNanosQuantiles;
@Metric MutableRate sendDataPacketTransferNanos;
final MutableQuantiles[] sendDataPacketTransferNanosQuantiles;
final MetricsRegistry registry = new MetricsRegistry("datanode");
final String name;
JvmMetrics jvmMetrics = null;
public DataNodeMetrics(String name, String sessionId, int[] intervals,
final JvmMetrics jvmMetrics) {
this.name = name;
this.jvmMetrics = jvmMetrics;
registry.tag(SessionId, sessionId);
final int len = intervals.length;
packetAckRoundTripTimeNanosQuantiles = new MutableQuantiles[len];
flushNanosQuantiles = new MutableQuantiles[len];
fsyncNanosQuantiles = new MutableQuantiles[len];
sendDataPacketBlockedOnNetworkNanosQuantiles = new MutableQuantiles[len];
sendDataPacketTransferNanosQuantiles = new MutableQuantiles[len];
ramDiskBlocksEvictionWindowMsQuantiles = new MutableQuantiles[len];
ramDiskBlocksLazyPersistWindowMsQuantiles = new MutableQuantiles[len];
for (int i = 0; i < len; i++) {
int interval = intervals[i];
packetAckRoundTripTimeNanosQuantiles[i] = registry.newQuantiles(
"packetAckRoundTripTimeNanos" + interval + "s",
"Packet Ack RTT in ns", "ops", "latency", interval);
flushNanosQuantiles[i] = registry.newQuantiles(
"flushNanos" + interval + "s",
"Disk flush latency in ns", "ops", "latency", interval);
fsyncNanosQuantiles[i] = registry.newQuantiles(
"fsyncNanos" + interval + "s", "Disk fsync latency in ns",
"ops", "latency", interval);
sendDataPacketBlockedOnNetworkNanosQuantiles[i] = registry.newQuantiles(
"sendDataPacketBlockedOnNetworkNanos" + interval + "s",
"Time blocked on network while sending a packet in ns",
"ops", "latency", interval);
sendDataPacketTransferNanosQuantiles[i] = registry.newQuantiles(
"sendDataPacketTransferNanos" + interval + "s",
"Time reading from disk and writing to network while sending " +
"a packet in ns", "ops", "latency", interval);
ramDiskBlocksEvictionWindowMsQuantiles[i] = registry.newQuantiles(
"ramDiskBlocksEvictionWindows" + interval + "s",
"Time between the RamDisk block write and eviction in ms",
"ops", "latency", interval);
ramDiskBlocksLazyPersistWindowMsQuantiles[i] = registry.newQuantiles(
"ramDiskBlocksLazyPersistWindows" + interval + "s",
"Time between the RamDisk block write and disk persist in ms",
"ops", "latency", interval);
}
}
public static DataNodeMetrics create(Configuration conf, String dnName) {
String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY);
MetricsSystem ms = DefaultMetricsSystem.instance();
JvmMetrics jm = JvmMetrics.create("DataNode", sessionId, ms);
String name = "DataNodeActivity-"+ (dnName.isEmpty()
? "UndefinedDataNodeName"+ ThreadLocalRandom.current().nextInt()
: dnName.replace(':', '-'));
// Percentile measurement is off by default, by watching no intervals
int[] intervals =
conf.getInts(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY);
return ms.register(name, null, new DataNodeMetrics(name, sessionId,
intervals, jm));
}
public String name() { return name; }
public JvmMetrics getJvmMetrics() {
return jvmMetrics;
}
public void addHeartbeat(long latency) {
heartbeats.add(latency);
}
public void addBlockReport(long latency) {
blockReports.add(latency);
}
public void addIncrementalBlockReport(long latency) {
incrementalBlockReports.add(latency);
}
public void addCacheReport(long latency) {
cacheReports.add(latency);
}
public void incrBlocksReplicated() {
blocksReplicated.incr();
}
public void incrBlocksWritten() {
blocksWritten.incr();
}
public void incrBlocksRemoved(int delta) {
blocksRemoved.incr(delta);
}
public void incrBytesWritten(int delta) {
bytesWritten.incr(delta);
}
public void incrBlockVerificationFailures() {
blockVerificationFailures.incr();
}
public void incrBlocksVerified() {
blocksVerified.incr();
}
public void incrBlocksCached(int delta) {
blocksCached.incr(delta);
}
public void incrBlocksUncached(int delta) {
blocksUncached.incr(delta);
}
public void addReadBlockOp(long latency) {
readBlockOp.add(latency);
}
public void addWriteBlockOp(long latency) {
writeBlockOp.add(latency);
}
public void addReplaceBlockOp(long latency) {
replaceBlockOp.add(latency);
}
public void addCopyBlockOp(long latency) {
copyBlockOp.add(latency);
}
public void addBlockChecksumOp(long latency) {
blockChecksumOp.add(latency);
}
public void incrBytesRead(int delta) {
bytesRead.incr(delta);
}
public void incrBlocksRead() {
blocksRead.incr();
}
public void incrFsyncCount() {
fsyncCount.incr();
}
public void incrTotalWriteTime(long timeTaken) {
totalWriteTime.incr(timeTaken);
}
public void incrTotalReadTime(long timeTaken) {
totalReadTime.incr(timeTaken);
}
public void addPacketAckRoundTripTimeNanos(long latencyNanos) {
packetAckRoundTripTimeNanos.add(latencyNanos);
for (MutableQuantiles q : packetAckRoundTripTimeNanosQuantiles) {
q.add(latencyNanos);
}
}
public void addFlushNanos(long latencyNanos) {
flushNanos.add(latencyNanos);
for (MutableQuantiles q : flushNanosQuantiles) {
q.add(latencyNanos);
}
}
public void addFsyncNanos(long latencyNanos) {
fsyncNanos.add(latencyNanos);
for (MutableQuantiles q : fsyncNanosQuantiles) {
q.add(latencyNanos);
}
}
public void shutdown() {
DefaultMetricsSystem.shutdown();
}
public void incrWritesFromClient(boolean local, long size) {
if(local) {
writesFromLocalClient.incr();
} else {
writesFromRemoteClient.incr();
remoteBytesWritten.incr(size);
}
}
public void incrReadsFromClient(boolean local, long size) {
if (local) {
readsFromLocalClient.incr();
} else {
readsFromRemoteClient.incr();
remoteBytesRead.incr(size);
}
}
public void incrVolumeFailures() {
volumeFailures.incr();
}
public void incrDatanodeNetworkErrors() {
datanodeNetworkErrors.incr();
}
/** Increment for getBlockLocalPathInfo calls */
public void incrBlocksGetLocalPathInfo() {
blocksGetLocalPathInfo.incr();
}
public void addSendDataPacketBlockedOnNetworkNanos(long latencyNanos) {
sendDataPacketBlockedOnNetworkNanos.add(latencyNanos);
for (MutableQuantiles q : sendDataPacketBlockedOnNetworkNanosQuantiles) {
q.add(latencyNanos);
}
}
public void addSendDataPacketTransferNanos(long latencyNanos) {
sendDataPacketTransferNanos.add(latencyNanos);
for (MutableQuantiles q : sendDataPacketTransferNanosQuantiles) {
q.add(latencyNanos);
}
}
public void incrRamDiskBlocksWrite() {
ramDiskBlocksWrite.incr();
}
public void incrRamDiskBlocksWriteFallback() {
ramDiskBlocksWriteFallback.incr();
}
public void addRamDiskBytesWrite(long bytes) {
ramDiskBytesWrite.incr(bytes);
}
public void incrRamDiskBlocksReadHits() {
ramDiskBlocksReadHits.incr();
}
public void incrRamDiskBlocksEvicted() {
ramDiskBlocksEvicted.incr();
}
public void incrRamDiskBlocksEvictedWithoutRead() {
ramDiskBlocksEvictedWithoutRead.incr();
}
public void addRamDiskBlocksEvictionWindowMs(long latencyMs) {
ramDiskBlocksEvictionWindowMs.add(latencyMs);
for (MutableQuantiles q : ramDiskBlocksEvictionWindowMsQuantiles) {
q.add(latencyMs);
}
}
public void incrRamDiskBlocksLazyPersisted() {
ramDiskBlocksLazyPersisted.incr();
}
public void incrRamDiskBlocksDeletedBeforeLazyPersisted() {
ramDiskBlocksDeletedBeforeLazyPersisted.incr();
}
public void incrRamDiskBytesLazyPersisted(long bytes) {
ramDiskBytesLazyPersisted.incr(bytes);
}
public void addRamDiskBlocksLazyPersistWindowMs(long latencyMs) {
ramDiskBlocksLazyPersistWindowMs.add(latencyMs);
for (MutableQuantiles q : ramDiskBlocksLazyPersistWindowMsQuantiles) {
q.add(latencyMs);
}
}
}
| 13,246 | 31.388753 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetricHelper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode.metrics;
import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean;
import org.apache.hadoop.metrics2.MetricsCollector;
import org.apache.hadoop.metrics2.MetricsTag;
import org.apache.hadoop.metrics2.lib.Interns;
import java.io.IOException;
public class DataNodeMetricHelper {
/**
* Get metrics helper provides Helper function for
* metrics2 interface to act as a Metric source
*
* @param collector Metrics Collector that is passed in
* @param beanClass The Class that currently impliments the metric functions
* @param context A string that idenitifies the context
*
* @throws IOException
*/
public static void getMetrics(MetricsCollector collector,
FSDatasetMBean beanClass, String context)
throws IOException {
if (beanClass == null) {
throw new IOException("beanClass cannot be null");
}
String className = beanClass.getClass().getName();
collector.addRecord(className)
.setContext(context)
.addGauge(Interns.info("Capacity", "Total storage capacity"),
beanClass.getCapacity())
.addGauge(Interns.info("DfsUsed", "Total bytes used by dfs datanode"),
beanClass.getDfsUsed())
.addGauge(Interns.info("Remaining", "Total bytes of free storage"),
beanClass.getRemaining())
.add(new MetricsTag(Interns.info("StorageInfo", "Storage ID"),
beanClass.getStorageInfo()))
.addGauge(Interns.info("NumFailedVolumes", "Number of failed Volumes" +
" in the data Node"), beanClass.getNumFailedVolumes())
.addGauge(Interns.info("LastVolumeFailureDate", "Last Volume failure in" +
" milliseconds from epoch"), beanClass.getLastVolumeFailureDate())
.addGauge(Interns.info("EstimatedCapacityLostTotal", "Total capacity lost"
+ " due to volume failure"), beanClass.getEstimatedCapacityLostTotal())
.addGauge(Interns.info("CacheUsed", "Datanode cache used in bytes"),
beanClass.getCacheUsed())
.addGauge(Interns.info("CacheCapacity", "Datanode cache capacity"),
beanClass.getCacheCapacity())
.addGauge(Interns.info("NumBlocksCached", "Datanode number" +
" of blocks cached"), beanClass.getNumBlocksCached())
.addGauge(Interns.info("NumBlocksFailedToCache", "Datanode number of " +
"blocks failed to cache"), beanClass.getNumBlocksFailedToCache())
.addGauge(Interns.info("NumBlocksFailedToUnCache", "Datanode number of" +
" blocks failed in cache eviction"),
beanClass.getNumBlocksFailedToUncache());
}
}
| 3,429 | 41.875 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/FSDatasetMBean.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode.metrics;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.metrics2.MetricsSource;
/**
*
* This Interface defines the methods to get the status of a the FSDataset of
* a data node.
* It is also used for publishing via JMX (hence we follow the JMX naming
* convention.)
* * Note we have not used the MetricsDynamicMBeanBase to implement this
* because the interface for the FSDatasetMBean is stable and should
* be published as an interface.
*
* <p>
* Data Node runtime statistic info is report in another MBean
* @see org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics
*
*/
@InterfaceAudience.Private
public interface FSDatasetMBean extends MetricsSource {
/**
* Returns the total space (in bytes) used by a block pool
* @return the total space used by a block pool
* @throws IOException
*/
public long getBlockPoolUsed(String bpid) throws IOException;
/**
* Returns the total space (in bytes) used by dfs datanode
* @return the total space used by dfs datanode
* @throws IOException
*/
public long getDfsUsed() throws IOException;
/**
* Returns total capacity (in bytes) of storage (used and unused)
* @return total capacity of storage (used and unused)
*/
public long getCapacity();
/**
* Returns the amount of free storage space (in bytes)
* @return The amount of free storage space
* @throws IOException
*/
public long getRemaining() throws IOException;
/**
* Returns the storage id of the underlying storage
*/
public String getStorageInfo();
/**
* Returns the number of failed volumes in the datanode.
* @return The number of failed volumes in the datanode.
*/
public int getNumFailedVolumes();
/**
* Returns each storage location that has failed, sorted.
* @return each storage location that has failed, sorted
*/
String[] getFailedStorageLocations();
/**
* Returns the date/time of the last volume failure in milliseconds since
* epoch.
* @return date/time of last volume failure in milliseconds since epoch
*/
long getLastVolumeFailureDate();
/**
* Returns an estimate of total capacity lost due to volume failures in bytes.
* @return estimate of total capacity lost in bytes
*/
long getEstimatedCapacityLostTotal();
/**
* Returns the amount of cache used by the datanode (in bytes).
*/
public long getCacheUsed();
/**
* Returns the total cache capacity of the datanode (in bytes).
*/
public long getCacheCapacity();
/**
* Returns the number of blocks cached.
*/
public long getNumBlocksCached();
/**
* Returns the number of blocks that the datanode was unable to cache
*/
public long getNumBlocksFailedToCache();
/**
* Returns the number of blocks that the datanode was unable to uncache
*/
public long getNumBlocksFailedToUncache();
}
| 3,803 | 29.432 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RemoteEditLogManifest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.protocol;
import java.util.Collections;
import java.util.List;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
/**
* An enumeration of logs available on a remote NameNode.
*/
public class RemoteEditLogManifest {
private List<RemoteEditLog> logs;
public RemoteEditLogManifest() {
}
public RemoteEditLogManifest(List<RemoteEditLog> logs) {
this.logs = logs;
checkState();
}
/**
* Check that the logs are non-overlapping sequences of transactions,
* in sorted order. They do not need to be contiguous.
* @throws IllegalStateException if incorrect
*/
private void checkState() {
Preconditions.checkNotNull(logs);
RemoteEditLog prev = null;
for (RemoteEditLog log : logs) {
if (prev != null) {
if (log.getStartTxId() <= prev.getEndTxId()) {
throw new IllegalStateException(
"Invalid log manifest (log " + log + " overlaps " + prev + ")\n"
+ this);
}
}
prev = log;
}
}
public List<RemoteEditLog> getLogs() {
return Collections.unmodifiableList(logs);
}
@Override
public String toString() {
return "[" + Joiner.on(", ").join(logs) + "]";
}
}
| 2,096 | 26.96 | 78 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/HeartbeatResponse.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.protocol;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdfs.protocol.RollingUpgradeStatus;
@InterfaceAudience.Private
@InterfaceStability.Evolving
/**
* Response to {@link DatanodeProtocol#sendHeartbeat}
*/
public class HeartbeatResponse {
/** Commands returned from the namenode to the datanode */
private final DatanodeCommand[] commands;
/** Information about the current HA-related state of the NN */
private final NNHAStatusHeartbeat haStatus;
private final RollingUpgradeStatus rollingUpdateStatus;
private final long fullBlockReportLeaseId;
public HeartbeatResponse(DatanodeCommand[] cmds,
NNHAStatusHeartbeat haStatus, RollingUpgradeStatus rollingUpdateStatus,
long fullBlockReportLeaseId) {
commands = cmds;
this.haStatus = haStatus;
this.rollingUpdateStatus = rollingUpdateStatus;
this.fullBlockReportLeaseId = fullBlockReportLeaseId;
}
public DatanodeCommand[] getCommands() {
return commands;
}
public NNHAStatusHeartbeat getNameNodeHaState() {
return haStatus;
}
public RollingUpgradeStatus getRollingUpdateStatus() {
return rollingUpdateStatus;
}
public long getFullBlockReportLeaseId() {
return fullBlockReportLeaseId;
}
}
| 2,178 | 32.523077 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockReportContext.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.protocol;
import org.apache.hadoop.classification.InterfaceAudience;
/**
* The context of the block report.
*
* This is a set of fields that the Datanode sends to provide context about a
* block report RPC. The context includes a unique 64-bit ID which
* identifies the block report as a whole. It also includes the total number
* of RPCs which this block report is split into, and the index into that
* total for the current RPC.
*/
@InterfaceAudience.Private
public class BlockReportContext {
/**
* The total number of RPCs contained in the block report.
*/
private final int totalRpcs;
/**
* The index of this particular RPC.
*/
private final int curRpc;
/**
* A 64-bit ID which identifies the block report as a whole.
*/
private final long reportId;
/**
* The lease ID which this block report is using, or 0 if this block report is
* bypassing rate-limiting.
*/
private final long leaseId;
public BlockReportContext(int totalRpcs, int curRpc,
long reportId, long leaseId) {
this.totalRpcs = totalRpcs;
this.curRpc = curRpc;
this.reportId = reportId;
this.leaseId = leaseId;
}
public int getTotalRpcs() {
return totalRpcs;
}
public int getCurRpc() {
return curRpc;
}
public long getReportId() {
return reportId;
}
public long getLeaseId() {
return leaseId;
}
}
| 2,253 | 27.531646 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DisallowedDatanodeException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.protocol;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
/**
* This exception is thrown when a datanode tries to register or communicate
* with the namenode when it does not appear on the list of included nodes,
* or has been specifically excluded.
*
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class DisallowedDatanodeException extends IOException {
/** for java.io.Serializable */
private static final long serialVersionUID = 1L;
public DisallowedDatanodeException(DatanodeID nodeID, String reason) {
super("Datanode denied communication with namenode because "
+ reason + ": " + nodeID);
}
public DisallowedDatanodeException(DatanodeID nodeID) {
this(nodeID, "the host is not in the include-list");
}
}
| 1,766 | 35.061224 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.protocol;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
import com.google.common.annotations.VisibleForTesting;
/**
* DatanodeRegistration class contains all information the name-node needs
* to identify and verify a data-node when it contacts the name-node.
* This information is sent by data-node with each communication request.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class DatanodeRegistration extends DatanodeID
implements NodeRegistration {
private final StorageInfo storageInfo;
private ExportedBlockKeys exportedKeys;
private final String softwareVersion;
private NamespaceInfo nsInfo;
@VisibleForTesting
public DatanodeRegistration(String uuid, DatanodeRegistration dnr) {
this(new DatanodeID(uuid, dnr),
dnr.getStorageInfo(),
dnr.getExportedKeys(),
dnr.getSoftwareVersion());
}
public DatanodeRegistration(DatanodeID dn, StorageInfo info,
ExportedBlockKeys keys, String softwareVersion) {
super(dn);
this.storageInfo = info;
this.exportedKeys = keys;
this.softwareVersion = softwareVersion;
}
public StorageInfo getStorageInfo() {
return storageInfo;
}
public void setExportedKeys(ExportedBlockKeys keys) {
this.exportedKeys = keys;
}
public ExportedBlockKeys getExportedKeys() {
return exportedKeys;
}
public String getSoftwareVersion() {
return softwareVersion;
}
@Override // NodeRegistration
public int getVersion() {
return storageInfo.getLayoutVersion();
}
public void setNamespaceInfo(NamespaceInfo nsInfo) {
this.nsInfo = nsInfo;
}
public NamespaceInfo getNamespaceInfo() {
return nsInfo;
}
@Override // NodeRegistration
public String getRegistrationID() {
return Storage.getRegistrationID(storageInfo);
}
@Override // NodeRegistration
public String getAddress() {
return getXferAddr();
}
@Override
public String toString() {
return getClass().getSimpleName()
+ "(" + super.toString()
+ ", datanodeUuid=" + getDatanodeUuid()
+ ", infoPort=" + getInfoPort()
+ ", infoSecurePort=" + getInfoSecurePort()
+ ", ipcPort=" + getIpcPort()
+ ", storageInfo=" + storageInfo
+ ")";
}
@Override
public boolean equals(Object to) {
return super.equals(to);
}
@Override
public int hashCode() {
return super.hashCode();
}
}
| 3,554 | 28.380165 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RemoteEditLog.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.protocol;
import com.google.common.base.Function;
import com.google.common.collect.ComparisonChain;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
public class RemoteEditLog implements Comparable<RemoteEditLog> {
private long startTxId = HdfsServerConstants.INVALID_TXID;
private long endTxId = HdfsServerConstants.INVALID_TXID;
private boolean isInProgress = false;
public RemoteEditLog() {
}
public RemoteEditLog(long startTxId, long endTxId) {
this.startTxId = startTxId;
this.endTxId = endTxId;
this.isInProgress = (endTxId == HdfsServerConstants.INVALID_TXID);
}
public RemoteEditLog(long startTxId, long endTxId, boolean inProgress) {
this.startTxId = startTxId;
this.endTxId = endTxId;
this.isInProgress = inProgress;
}
public long getStartTxId() {
return startTxId;
}
public long getEndTxId() {
return endTxId;
}
public boolean isInProgress() {
return isInProgress;
}
@Override
public String toString() {
if (!isInProgress) {
return "[" + startTxId + "," + endTxId + "]";
} else {
return "[" + startTxId + "-? (in-progress)]";
}
}
@Override
public int compareTo(RemoteEditLog log) {
return ComparisonChain.start()
.compare(startTxId, log.startTxId)
.compare(endTxId, log.endTxId)
.result();
}
@Override
public boolean equals(Object o) {
if (!(o instanceof RemoteEditLog)) return false;
return this.compareTo((RemoteEditLog)o) == 0;
}
@Override
public int hashCode() {
return (int) (startTxId * endTxId);
}
/**
* Guava <code>Function</code> which applies {@link #getStartTxId()}
*/
public static final Function<RemoteEditLog, Long> GET_START_TXID =
new Function<RemoteEditLog, Long>() {
@Override
public Long apply(RemoteEditLog log) {
if (null == log) {
return HdfsServerConstants.INVALID_TXID;
}
return log.getStartTxId();
}
};
}
| 2,844 | 28.030612 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocols.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.protocol;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.ha.HAServiceProtocol;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
import org.apache.hadoop.security.RefreshUserMappingsProtocol;
import org.apache.hadoop.ipc.RefreshCallQueueProtocol;
import org.apache.hadoop.ipc.GenericRefreshProtocol;
import org.apache.hadoop.tools.GetUserMappingsProtocol;
import org.apache.hadoop.tracing.TraceAdminProtocol;
/** The full set of RPC methods implemented by the Namenode. */
@InterfaceAudience.Private
public interface NamenodeProtocols
extends ClientProtocol,
DatanodeProtocol,
NamenodeProtocol,
RefreshAuthorizationPolicyProtocol,
RefreshUserMappingsProtocol,
RefreshCallQueueProtocol,
GenericRefreshProtocol,
GetUserMappingsProtocol,
HAServiceProtocol,
TraceAdminProtocol {
}
| 1,840 | 39.911111 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/StorageBlockReport.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.protocol;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
/**
* Block report for a Datanode storage
*/
public class StorageBlockReport {
private final DatanodeStorage storage;
private final BlockListAsLongs blocks;
public StorageBlockReport(DatanodeStorage storage, BlockListAsLongs blocks) {
this.storage = storage;
this.blocks = blocks;
}
public DatanodeStorage getStorage() {
return storage;
}
public BlockListAsLongs getBlocks() {
return blocks;
}
}
| 1,350 | 30.418605 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ReceivedDeletedBlockInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.protocol;
import org.apache.hadoop.hdfs.protocol.Block;
/**
* A data structure to store the blocks in an incremental block report.
*/
public class ReceivedDeletedBlockInfo {
Block block;
BlockStatus status;
String delHints;
public static enum BlockStatus {
RECEIVING_BLOCK(1),
RECEIVED_BLOCK(2),
DELETED_BLOCK(3);
private final int code;
BlockStatus(int code) {
this.code = code;
}
public int getCode() {
return code;
}
public static BlockStatus fromCode(int code) {
for (BlockStatus bs : BlockStatus.values()) {
if (bs.code == code) {
return bs;
}
}
return null;
}
}
public ReceivedDeletedBlockInfo() {
}
public ReceivedDeletedBlockInfo(
Block blk, BlockStatus status, String delHints) {
this.block = blk;
this.status = status;
this.delHints = delHints;
}
public Block getBlock() {
return this.block;
}
public void setBlock(Block blk) {
this.block = blk;
}
public String getDelHints() {
return this.delHints;
}
public void setDelHints(String hints) {
this.delHints = hints;
}
public BlockStatus getStatus() {
return status;
}
@Override
public boolean equals(Object o) {
if (!(o instanceof ReceivedDeletedBlockInfo)) {
return false;
}
ReceivedDeletedBlockInfo other = (ReceivedDeletedBlockInfo) o;
return this.block.equals(other.getBlock())
&& this.status == other.status
&& this.delHints != null
&& this.delHints.equals(other.delHints);
}
@Override
public int hashCode() {
assert false : "hashCode not designed";
return 0;
}
public boolean blockEquals(Block b) {
return this.block.equals(b);
}
public boolean isDeletedBlock() {
return status == BlockStatus.DELETED_BLOCK;
}
@Override
public String toString() {
return block.toString() + ", status: " + status +
", delHint: " + delHints;
}
}
| 2,842 | 23.299145 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RegisterCommand.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.protocol;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* A RegisterCommand is an instruction to a datanode to register with the namenode.
* This command can't be combined with other commands in the same response.
* This is because after the datanode processes RegisterCommand, it will skip
* the rest of the DatanodeCommands in the same HeartbeatResponse.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class RegisterCommand extends DatanodeCommand {
public static final DatanodeCommand REGISTER = new RegisterCommand();
public RegisterCommand() {
super(DatanodeProtocol.DNA_REGISTER);
}
}
| 1,552 | 39.868421 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ReplicaRecoveryInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.protocol;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
/**
* Replica recovery information.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class ReplicaRecoveryInfo extends Block {
private final ReplicaState originalState;
public ReplicaRecoveryInfo(long blockId, long diskLen, long gs, ReplicaState rState) {
set(blockId, diskLen, gs);
originalState = rState;
}
public ReplicaState getOriginalReplicaState() {
return originalState;
}
@Override
public boolean equals(Object o) {
return super.equals(o);
}
@Override
public int hashCode() {
return super.hashCode();
}
}
| 1,680 | 30.716981 | 88 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksWithLocations.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.protocol;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.protocol.Block;
/**
* Maintains an array of blocks and their corresponding storage IDs.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class BlocksWithLocations {
/**
* A class to keep track of a block and its locations
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public static class BlockWithLocations {
final Block block;
final String[] datanodeUuids;
final String[] storageIDs;
final StorageType[] storageTypes;
/** constructor */
public BlockWithLocations(Block block, String[] datanodeUuids,
String[] storageIDs, StorageType[] storageTypes) {
this.block = block;
this.datanodeUuids = datanodeUuids;
this.storageIDs = storageIDs;
this.storageTypes = storageTypes;
}
/** get the block */
public Block getBlock() {
return block;
}
/** get the block's datanode locations */
public String[] getDatanodeUuids() {
return datanodeUuids;
}
/** get the block's storage locations */
public String[] getStorageIDs() {
return storageIDs;
}
/** @return the storage types */
public StorageType[] getStorageTypes() {
return storageTypes;
}
@Override
public String toString() {
final StringBuilder b = new StringBuilder();
b.append(block);
if (datanodeUuids.length == 0) {
return b.append("[]").toString();
}
appendString(0, b.append("["));
for(int i = 1; i < datanodeUuids.length; i++) {
appendString(i, b.append(","));
}
return b.append("]").toString();
}
private StringBuilder appendString(int i, StringBuilder b) {
return b.append("[").append(storageTypes[i]).append("]")
.append(storageIDs[i])
.append("@").append(datanodeUuids[i]);
}
}
private final BlockWithLocations[] blocks;
/** Constructor with one parameter */
public BlocksWithLocations(BlockWithLocations[] blocks) {
this.blocks = blocks;
}
/** getter */
public BlockWithLocations[] getBlocks() {
return blocks;
}
}
| 3,173 | 28.943396 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.protocol;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
import org.apache.hadoop.hdfs.server.namenode.NNStorage;
import org.apache.hadoop.util.VersionInfo;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
/**
* NamespaceInfo is returned by the name-node in reply
* to a data-node handshake.
*
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class NamespaceInfo extends StorageInfo {
final String buildVersion;
String blockPoolID = ""; // id of the block pool
String softwareVersion;
long capabilities;
// only authoritative on the server-side to determine advertisement to
// clients. enum will update the supported values
private static long CAPABILITIES_SUPPORTED = 0;
public enum Capability {
UNKNOWN(false),
STORAGE_BLOCK_REPORT_BUFFERS(true); // use optimized ByteString buffers
private final long mask;
Capability(boolean isSupported) {
int bits = ordinal() - 1;
mask = (bits < 0) ? 0 : (1L << bits);
if (isSupported) {
CAPABILITIES_SUPPORTED |= mask;
}
}
public long getMask() {
return mask;
}
}
// defaults to enabled capabilites since this ctor is for server
public NamespaceInfo() {
super(NodeType.NAME_NODE);
buildVersion = null;
capabilities = CAPABILITIES_SUPPORTED;
}
// defaults to enabled capabilites since this ctor is for server
public NamespaceInfo(int nsID, String clusterID, String bpID,
long cT, String buildVersion, String softwareVersion) {
this(nsID, clusterID, bpID, cT, buildVersion, softwareVersion,
CAPABILITIES_SUPPORTED);
}
// for use by server and/or client
public NamespaceInfo(int nsID, String clusterID, String bpID,
long cT, String buildVersion, String softwareVersion,
long capabilities) {
super(HdfsServerConstants.NAMENODE_LAYOUT_VERSION, nsID, clusterID, cT,
NodeType.NAME_NODE);
blockPoolID = bpID;
this.buildVersion = buildVersion;
this.softwareVersion = softwareVersion;
this.capabilities = capabilities;
}
public NamespaceInfo(int nsID, String clusterID, String bpID,
long cT) {
this(nsID, clusterID, bpID, cT, Storage.getBuildVersion(),
VersionInfo.getVersion());
}
public long getCapabilities() {
return capabilities;
}
@VisibleForTesting
public void setCapabilities(long capabilities) {
this.capabilities = capabilities;
}
public boolean isCapabilitySupported(Capability capability) {
Preconditions.checkArgument(capability != Capability.UNKNOWN,
"cannot test for unknown capability");
long mask = capability.getMask();
return (capabilities & mask) == mask;
}
public String getBuildVersion() {
return buildVersion;
}
public String getBlockPoolID() {
return blockPoolID;
}
public String getSoftwareVersion() {
return softwareVersion;
}
@Override
public String toString(){
return super.toString() + ";bpid=" + blockPoolID;
}
public void validateStorage(NNStorage storage) throws IOException {
if (layoutVersion != storage.getLayoutVersion() ||
namespaceID != storage.getNamespaceID() ||
cTime != storage.cTime ||
!clusterID.equals(storage.getClusterID()) ||
!blockPoolID.equals(storage.getBlockPoolID())) {
throw new IOException("Inconsistent namespace information:\n" +
"NamespaceInfo has:\n" +
"LV=" + layoutVersion + ";" +
"NS=" + namespaceID + ";" +
"cTime=" + cTime + ";" +
"CID=" + clusterID + ";" +
"BPID=" + blockPoolID +
".\nStorage has:\n" +
"LV=" + storage.getLayoutVersion() + ";" +
"NS=" + storage.getNamespaceID() + ";" +
"cTime=" + storage.getCTime() + ";" +
"CID=" + storage.getClusterID() + ";" +
"BPID=" + storage.getBlockPoolID() + ".");
}
}
}
| 5,148 | 32.219355 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/VolumeFailureSummary.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.protocol;
/**
* Summarizes information about data volume failures on a DataNode.
*/
public class VolumeFailureSummary {
private final String[] failedStorageLocations;
private final long lastVolumeFailureDate;
private final long estimatedCapacityLostTotal;
/**
* Creates a new VolumeFailureSummary.
*
* @param failedStorageLocations storage locations that have failed
* @param lastVolumeFailureDate date/time of last volume failure in
* milliseconds since epoch
* @param estimatedCapacityLostTotal estimate of capacity lost in bytes
*/
public VolumeFailureSummary(String[] failedStorageLocations,
long lastVolumeFailureDate, long estimatedCapacityLostTotal) {
this.failedStorageLocations = failedStorageLocations;
this.lastVolumeFailureDate = lastVolumeFailureDate;
this.estimatedCapacityLostTotal = estimatedCapacityLostTotal;
}
/**
* Returns each storage location that has failed, sorted.
*
* @return each storage location that has failed, sorted
*/
public String[] getFailedStorageLocations() {
return this.failedStorageLocations;
}
/**
* Returns the date/time of the last volume failure in milliseconds since
* epoch.
*
* @return date/time of last volume failure in milliseconds since epoch
*/
public long getLastVolumeFailureDate() {
return this.lastVolumeFailureDate;
}
/**
* Returns estimate of capacity lost. This is said to be an estimate, because
* in some cases it's impossible to know the capacity of the volume, such as if
* we never had a chance to query its capacity before the failure occurred.
*
* @return estimate of capacity lost in bytes
*/
public long getEstimatedCapacityLostTotal() {
return this.estimatedCapacityLostTotal;
}
}
| 2,633 | 35.082192 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BalancerBandwidthCommand.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.protocol;
/*
* A system administrator can tune the balancer bandwidth parameter
* (dfs.balance.bandwidthPerSec) dynamically by calling
* "dfsadmin -setBalanacerBandwidth newbandwidth".
* This class is to define the command which sends the new bandwidth value to
* each datanode.
*/
/**
* Balancer bandwidth command instructs each datanode to change its value for
* the max amount of network bandwidth it may use during the block balancing
* operation.
*
* The Balancer Bandwidth Command contains the new bandwidth value as its
* payload. The bandwidth value is in bytes per second.
*/
public class BalancerBandwidthCommand extends DatanodeCommand {
private final static long BBC_DEFAULTBANDWIDTH = 0L;
private final long bandwidth;
/**
* Balancer Bandwidth Command constructor. Sets bandwidth to 0.
*/
BalancerBandwidthCommand() {
this(BBC_DEFAULTBANDWIDTH);
}
/**
* Balancer Bandwidth Command constructor.
*
* @param bandwidth Blanacer bandwidth in bytes per second.
*/
public BalancerBandwidthCommand(long bandwidth) {
super(DatanodeProtocol.DNA_BALANCERBANDWIDTHUPDATE);
this.bandwidth = bandwidth;
}
/**
* Get current value of the max balancer bandwidth in bytes per second.
*
* @return bandwidth Blanacer bandwidth in bytes per second for this datanode.
*/
public long getBalancerBandwidthValue() {
return this.bandwidth;
}
}
| 2,264 | 32.80597 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/InterDatanodeProtocol.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.protocol;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
import org.apache.hadoop.security.KerberosInfo;
/** An inter-datanode protocol for updating generation stamp
*/
@KerberosInfo(
serverPrincipal = DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY,
clientPrincipal = DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY)
@InterfaceAudience.Private
public interface InterDatanodeProtocol {
public static final Log LOG = LogFactory.getLog(InterDatanodeProtocol.class);
/**
* Until version 9, this class InterDatanodeProtocol served as both
* the interface to the DN AND the RPC protocol used to communicate with the
* DN.
*
* This class is used by both the DN to insulate from the protocol
* serialization.
*
* If you are adding/changing DN's interface then you need to
* change both this class and ALSO related protocol buffer
* wire protocol definition in InterDatanodeProtocol.proto.
*
* For more details on protocol buffer wire protocol, please see
* .../org/apache/hadoop/hdfs/protocolPB/overview.html
*/
public static final long versionID = 6L;
/**
* Initialize a replica recovery.
*
* @return actual state of the replica on this data-node or
* null if data-node does not have the replica.
*/
ReplicaRecoveryInfo initReplicaRecovery(RecoveringBlock rBlock)
throws IOException;
/**
* Update replica with the new generation stamp and length.
*/
String updateReplicaUnderRecovery(ExtendedBlock oldBlock, long recoveryId,
long newBlockId, long newLength)
throws IOException;
}
| 2,779 | 37.082192 | 83 |
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.