repo
stringlengths
1
191
file
stringlengths
23
351
code
stringlengths
0
5.32M
file_length
int64
0
5.32M
avg_line_length
float64
0
2.9k
max_line_length
int64
0
288k
extension_type
stringclasses
1 value
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/FencedException.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.protocol; import java.io.IOException; /** * If a previous user of a resource tries to use a shared resource, after * fenced by another user, this exception is thrown. */ public class FencedException extends IOException { private static final long serialVersionUID = 1L; public FencedException(String errorMsg) { super(errorMsg); } }
1,196
35.272727
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NodeRegistration.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.protocol; import org.apache.hadoop.classification.InterfaceAudience; /** * Generic class specifying information, which need to be sent to the name-node * during the registration process. */ @InterfaceAudience.Private public interface NodeRegistration { /** * Get address of the server node. * @return ipAddr:portNumber */ public String getAddress(); /** * Get registration ID of the server node. */ public String getRegistrationID(); /** * Get layout version of the server node. */ public int getVersion(); @Override public String toString(); }
1,437
28.958333
79
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockCommand.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.protocol; import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; /**************************************************** * A BlockCommand is an instruction to a datanode * regarding some blocks under its control. It tells * the DataNode to either invalidate a set of indicated * blocks, or to copy a set of indicated blocks to * another DataNode. * ****************************************************/ @InterfaceAudience.Private @InterfaceStability.Evolving public class BlockCommand extends DatanodeCommand { /** * This constant is used to indicate that the block deletion does not need * explicit ACK from the datanode. When a block is put into the list of blocks * to be deleted, it's size is set to this constant. We assume that no block * would actually have this size. Otherwise, we would miss ACKs for blocks * with such size. Positive number is used for compatibility reasons. */ public static final long NO_ACK = Long.MAX_VALUE; final String poolId; final Block[] blocks; final DatanodeInfo[][] targets; final StorageType[][] targetStorageTypes; final String[][] targetStorageIDs; /** * Create BlockCommand for transferring blocks to another datanode * @param blocktargetlist blocks to be transferred */ public BlockCommand(int action, String poolId, List<BlockTargetPair> blocktargetlist) { super(action); this.poolId = poolId; blocks = new Block[blocktargetlist.size()]; targets = new DatanodeInfo[blocks.length][]; targetStorageTypes = new StorageType[blocks.length][]; targetStorageIDs = new String[blocks.length][]; for(int i = 0; i < blocks.length; i++) { BlockTargetPair p = blocktargetlist.get(i); blocks[i] = p.block; targets[i] = DatanodeStorageInfo.toDatanodeInfos(p.targets); targetStorageTypes[i] = DatanodeStorageInfo.toStorageTypes(p.targets); targetStorageIDs[i] = DatanodeStorageInfo.toStorageIDs(p.targets); } } private static final DatanodeInfo[][] EMPTY_TARGET_DATANODES = {}; private static final StorageType[][] EMPTY_TARGET_STORAGE_TYPES = {}; private static final String[][] EMPTY_TARGET_STORAGEIDS = {}; /** * Create BlockCommand for the given action * @param blocks blocks related to the action */ public BlockCommand(int action, String poolId, Block blocks[]) { this(action, poolId, blocks, EMPTY_TARGET_DATANODES, EMPTY_TARGET_STORAGE_TYPES, EMPTY_TARGET_STORAGEIDS); } /** * Create BlockCommand for the given action * @param blocks blocks related to the action */ public BlockCommand(int action, String poolId, Block[] blocks, DatanodeInfo[][] targets, StorageType[][] targetStorageTypes, String[][] targetStorageIDs) { super(action); this.poolId = poolId; this.blocks = blocks; this.targets = targets; this.targetStorageTypes = targetStorageTypes; this.targetStorageIDs = targetStorageIDs; } public String getBlockPoolId() { return poolId; } public Block[] getBlocks() { return blocks; } public DatanodeInfo[][] getTargets() { return targets; } public StorageType[][] getTargetStorageTypes() { return targetStorageTypes; } public String[][] getTargetStorageIDs() { return targetStorageIDs; } }
4,545
34.795276
88
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ServerCommand.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.protocol; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * Base class for a server command. * Issued by the name-node to notify other servers what should be done. * Commands are defined by actions defined in respective protocols. * * @see DatanodeProtocol * @see NamenodeProtocol */ @InterfaceAudience.Private @InterfaceStability.Evolving public abstract class ServerCommand { private final int action; /** * Create a command for the specified action. * Actions are protocol specific. * * @see DatanodeProtocol * @see NamenodeProtocol * @param action protocol specific action */ public ServerCommand(int action) { this.action = action; } /** * Get server command action. * @return action code. */ public int getAction() { return this.action; } public String toString() { final StringBuilder sb = new StringBuilder(); sb.append(getClass().getSimpleName()); sb.append("/"); sb.append(action); return sb.toString(); } }
1,928
29.140625
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/CheckpointCommand.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.protocol; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature; /** * Checkpoint command. * <p> * Returned to the backup node by the name-node as a reply to the * {@link NamenodeProtocol#startCheckpoint(NamenodeRegistration)} * request.<br> * Contains: * <ul> * <li>{@link CheckpointSignature} identifying the particular checkpoint</li> * <li>indicator whether the backup image should be discarded before starting * the checkpoint</li> * <li>indicator whether the image should be transfered back to the name-node * upon completion of the checkpoint.</li> * </ul> */ @InterfaceAudience.Private @InterfaceStability.Evolving public class CheckpointCommand extends NamenodeCommand { private final CheckpointSignature cSig; private final boolean needToReturnImage; public CheckpointCommand() { this(null, false); } public CheckpointCommand(CheckpointSignature sig, boolean needToReturnImg) { super(NamenodeProtocol.ACT_CHECKPOINT); this.cSig = sig; this.needToReturnImage = needToReturnImg; } /** * Checkpoint signature is used to ensure * that nodes are talking about the same checkpoint. */ public CheckpointSignature getSignature() { return cSig; } /** * Indicates whether the new checkpoint image needs to be transfered * back to the name-node after the checkpoint is done. * * @return true if the checkpoint should be returned back. */ public boolean needToReturnImage() { return needToReturnImage; } }
2,503
32.837838
78
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NNHAStatusHeartbeat.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.protocol; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; @InterfaceAudience.Private @InterfaceStability.Evolving public class NNHAStatusHeartbeat { private final HAServiceState state; private long txid = HdfsServerConstants.INVALID_TXID; public NNHAStatusHeartbeat(HAServiceState state, long txid) { this.state = state; this.txid = txid; } public HAServiceState getState() { return state; } public long getTxId() { return txid; } }
1,518
32.755556
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.protocol; import java.io.*; import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.io.retry.Idempotent; import org.apache.hadoop.security.KerberosInfo; /********************************************************************** * Protocol that a DFS datanode uses to communicate with the NameNode. * It's used to upload current load information and block reports. * * The only way a NameNode can communicate with a DataNode is by * returning values from these functions. * **********************************************************************/ @KerberosInfo( serverPrincipal = DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, clientPrincipal = DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY) @InterfaceAudience.Private public interface DatanodeProtocol { /** * This class is used by both the Namenode (client) and BackupNode (server) * to insulate from the protocol serialization. * * If you are adding/changing DN's interface then you need to * change both this class and ALSO related protocol buffer * wire protocol definition in DatanodeProtocol.proto. * * For more details on protocol buffer wire protocol, please see * .../org/apache/hadoop/hdfs/protocolPB/overview.html */ public static final long versionID = 28L; // error code final static int NOTIFY = 0; final static int DISK_ERROR = 1; // there are still valid volumes on DN final static int INVALID_BLOCK = 2; final static int FATAL_DISK_ERROR = 3; // no valid volumes left on DN /** * Determines actions that data node should perform * when receiving a datanode command. */ final static int DNA_UNKNOWN = 0; // unknown action final static int DNA_TRANSFER = 1; // transfer blocks to another datanode final static int DNA_INVALIDATE = 2; // invalidate blocks final static int DNA_SHUTDOWN = 3; // shutdown node final static int DNA_REGISTER = 4; // re-register final static int DNA_FINALIZE = 5; // finalize previous upgrade final static int DNA_RECOVERBLOCK = 6; // request a block recovery final static int DNA_ACCESSKEYUPDATE = 7; // update access key final static int DNA_BALANCERBANDWIDTHUPDATE = 8; // update balancer bandwidth final static int DNA_CACHE = 9; // cache blocks final static int DNA_UNCACHE = 10; // uncache blocks /** * Register Datanode. * * @see org.apache.hadoop.hdfs.server.namenode.FSNamesystem#registerDatanode(DatanodeRegistration) * @param registration datanode registration information * @return the given {@link org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration} with * updated registration information */ @Idempotent public DatanodeRegistration registerDatanode(DatanodeRegistration registration ) throws IOException; /** * sendHeartbeat() tells the NameNode that the DataNode is still * alive and well. Includes some status info, too. * It also gives the NameNode a chance to return * an array of "DatanodeCommand" objects in HeartbeatResponse. * A DatanodeCommand tells the DataNode to invalidate local block(s), * or to copy them to other DataNodes, etc. * @param registration datanode registration information * @param reports utilization report per storage * @param xmitsInProgress number of transfers from this datanode to others * @param xceiverCount number of active transceiver threads * @param failedVolumes number of failed volumes * @param volumeFailureSummary info about volume failures * @param requestFullBlockReportLease whether to request a full block * report lease. * @throws IOException on error */ @Idempotent public HeartbeatResponse sendHeartbeat(DatanodeRegistration registration, StorageReport[] reports, long dnCacheCapacity, long dnCacheUsed, int xmitsInProgress, int xceiverCount, int failedVolumes, VolumeFailureSummary volumeFailureSummary, boolean requestFullBlockReportLease) throws IOException; /** * blockReport() tells the NameNode about all the locally-stored blocks. * The NameNode returns an array of Blocks that have become obsolete * and should be deleted. This function is meant to upload *all* * the locally-stored blocks. It's invoked upon startup and then * infrequently afterwards. * @param registration datanode registration * @param poolId the block pool ID for the blocks * @param reports report of blocks per storage * Each finalized block is represented as 3 longs. Each under- * construction replica is represented as 4 longs. * This is done instead of Block[] to reduce memory used by block reports. * @param reports report of blocks per storage * @param context Context information for this block report. * * @return - the next command for DN to process. * @throws IOException */ @Idempotent public DatanodeCommand blockReport(DatanodeRegistration registration, String poolId, StorageBlockReport[] reports, BlockReportContext context) throws IOException; /** * Communicates the complete list of locally cached blocks to the NameNode. * * This method is similar to * {@link #blockReport(DatanodeRegistration, String, StorageBlockReport[], BlockReportContext)}, * which is used to communicated blocks stored on disk. * * @param The datanode registration. * @param poolId The block pool ID for the blocks. * @param blockIds A list of block IDs. * @return The DatanodeCommand. * @throws IOException */ @Idempotent public DatanodeCommand cacheReport(DatanodeRegistration registration, String poolId, List<Long> blockIds) throws IOException; /** * blockReceivedAndDeleted() allows the DataNode to tell the NameNode about * recently-received and -deleted block data. * * For the case of received blocks, a hint for preferred replica to be * deleted when there is any excessive blocks is provided. * For example, whenever client code * writes a new Block here, or another DataNode copies a Block to * this DataNode, it will call blockReceived(). */ @Idempotent public void blockReceivedAndDeleted(DatanodeRegistration registration, String poolId, StorageReceivedDeletedBlocks[] rcvdAndDeletedBlocks) throws IOException; /** * errorReport() tells the NameNode about something that has gone * awry. Useful for debugging. */ @Idempotent public void errorReport(DatanodeRegistration registration, int errorCode, String msg) throws IOException; @Idempotent public NamespaceInfo versionRequest() throws IOException; /** * same as {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#reportBadBlocks(LocatedBlock[])} * } */ @Idempotent public void reportBadBlocks(LocatedBlock[] blocks) throws IOException; /** * Commit block synchronization in lease recovery */ @Idempotent public void commitBlockSynchronization(ExtendedBlock block, long newgenerationstamp, long newlength, boolean closeFile, boolean deleteblock, DatanodeID[] newtargets, String[] newtargetstorages) throws IOException; }
8,744
41.451456
100
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockIdCommand.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.protocol; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /**************************************************** * A BlockIdCommand is an instruction to a datanode * regarding some blocks under its control. ****************************************************/ @InterfaceAudience.Private @InterfaceStability.Evolving public class BlockIdCommand extends DatanodeCommand { final String poolId; final long blockIds[]; /** * Create BlockCommand for the given action */ public BlockIdCommand(int action, String poolId, long[] blockIds) { super(action); this.poolId = poolId; this.blockIds= blockIds; } public String getBlockPoolId() { return poolId; } public long[] getBlockIds() { return blockIds; } }
1,670
32.42
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeRegistration.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.protocol; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; /** * Information sent by a subordinate name-node to the active name-node * during the registration process. */ @InterfaceAudience.Private @InterfaceStability.Evolving public class NamenodeRegistration extends StorageInfo implements NodeRegistration { final String rpcAddress; // RPC address of the node final String httpAddress; // HTTP address of the node final NamenodeRole role; // node role public NamenodeRegistration(String address, String httpAddress, StorageInfo storageInfo, NamenodeRole role) { super(storageInfo); this.rpcAddress = address; this.httpAddress = httpAddress; this.role = role; } @Override // NodeRegistration public String getAddress() { return rpcAddress; } public String getHttpAddress() { return httpAddress; } @Override // NodeRegistration public String getRegistrationID() { return Storage.getRegistrationID(this); } @Override // NodeRegistration public int getVersion() { return super.getLayoutVersion(); } @Override // NodeRegistration public String toString() { return getClass().getSimpleName() + "(" + rpcAddress + ", role=" + getRole() + ")"; } /** * Get name-node role. */ public NamenodeRole getRole() { return role; } public boolean isRole(NamenodeRole that) { return role.equals(that); } }
2,638
29.333333
77
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/FinalizeCommand.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.protocol; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * A BlockCommand is an instruction to a datanode to register with the namenode. */ @InterfaceAudience.Private @InterfaceStability.Evolving public class FinalizeCommand extends DatanodeCommand { String blockPoolId; private FinalizeCommand() { super(DatanodeProtocol.DNA_FINALIZE); } public FinalizeCommand(String bpid) { super(DatanodeProtocol.DNA_FINALIZE); blockPoolId = bpid; } public String getBlockPoolId() { return blockPoolId; } }
1,454
33.642857
80
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeCommand.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.protocol; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * Base class for name-node command. * Issued by the name-node to notify other name-nodes what should be done. */ @InterfaceAudience.Private @InterfaceStability.Evolving public class NamenodeCommand extends ServerCommand { public NamenodeCommand(int action) { super(action); } }
1,269
36.352941
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/FenceResponse.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.protocol; import org.apache.hadoop.classification.InterfaceAudience; /** * Response to a journal fence request. See {@link JournalProtocol#fence} */ @InterfaceAudience.Private public class FenceResponse { private final long previousEpoch; private final long lastTransactionId; private final boolean isInSync; public FenceResponse(long previousEpoch, long lastTransId, boolean inSync) { this.previousEpoch = previousEpoch; this.lastTransactionId = lastTransId; this.isInSync = inSync; } public boolean isInSync() { return isInSync; } public long getLastTransactionId() { return lastTransactionId; } public long getPreviousEpoch() { return previousEpoch; } }
1,558
30.816327
78
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/StorageReceivedDeletedBlocks.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.protocol; /** * Report of block received and deleted per Datanode * storage. */ public class StorageReceivedDeletedBlocks { final DatanodeStorage storage; private final ReceivedDeletedBlockInfo[] blocks; @Deprecated public String getStorageID() { return storage.getStorageID(); } public DatanodeStorage getStorage() { return storage; } public ReceivedDeletedBlockInfo[] getBlocks() { return blocks; } @Deprecated public StorageReceivedDeletedBlocks(final String storageID, final ReceivedDeletedBlockInfo[] blocks) { this.storage = new DatanodeStorage(storageID); this.blocks = blocks; } public StorageReceivedDeletedBlocks(final DatanodeStorage storage, final ReceivedDeletedBlockInfo[] blocks) { this.storage = storage; this.blocks = blocks; } }
1,670
29.381818
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockRecoveryCommand.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.protocol; import java.util.Collection; import java.util.ArrayList; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import com.google.common.base.Joiner; /** * BlockRecoveryCommand is an instruction to a data-node to recover * the specified blocks. * * The data-node that receives this command treats itself as a primary * data-node in the recover process. * * Block recovery is identified by a recoveryId, which is also the new * generation stamp, which the block will have after the recovery succeeds. */ @InterfaceAudience.Private @InterfaceStability.Evolving public class BlockRecoveryCommand extends DatanodeCommand { final Collection<RecoveringBlock> recoveringBlocks; /** * This is a block with locations from which it should be recovered * and the new generation stamp, which the block will have after * successful recovery. * * The new generation stamp of the block, also plays role of the recovery id. */ @InterfaceAudience.Private @InterfaceStability.Evolving public static class RecoveringBlock extends LocatedBlock { private final long newGenerationStamp; private final Block recoveryBlock; /** * Create RecoveringBlock. */ public RecoveringBlock(ExtendedBlock b, DatanodeInfo[] locs, long newGS) { super(b, locs); // startOffset is unknown this.newGenerationStamp = newGS; this.recoveryBlock = null; } /** * Create RecoveringBlock with copy-on-truncate option. */ public RecoveringBlock(ExtendedBlock b, DatanodeInfo[] locs, Block recoveryBlock) { super(b, locs); // startOffset is unknown this.newGenerationStamp = recoveryBlock.getGenerationStamp(); this.recoveryBlock = recoveryBlock; } /** * Return the new generation stamp of the block, * which also plays role of the recovery id. */ public long getNewGenerationStamp() { return newGenerationStamp; } /** * Return the new block. */ public Block getNewBlock() { return recoveryBlock; } } /** * Create empty BlockRecoveryCommand. */ public BlockRecoveryCommand() { this(0); } /** * Create BlockRecoveryCommand with * the specified capacity for recovering blocks. */ public BlockRecoveryCommand(int capacity) { this(new ArrayList<RecoveringBlock>(capacity)); } public BlockRecoveryCommand(Collection<RecoveringBlock> blocks) { super(DatanodeProtocol.DNA_RECOVERBLOCK); recoveringBlocks = blocks; } /** * Return the list of recovering blocks. */ public Collection<RecoveringBlock> getRecoveringBlocks() { return recoveringBlocks; } /** * Add recovering block to the command. */ public void add(RecoveringBlock block) { recoveringBlocks.add(block); } @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("BlockRecoveryCommand(\n "); Joiner.on("\n ").appendTo(sb, recoveringBlocks); sb.append("\n)"); return sb.toString(); } }
4,169
29.217391
79
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/JournalInfo.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.protocol; import org.apache.hadoop.classification.InterfaceAudience; /** * Information that describes a journal */ @InterfaceAudience.Private public class JournalInfo { private final int layoutVersion; private final String clusterId; private final int namespaceId; public JournalInfo(int lv, String clusterId, int nsId) { this.layoutVersion = lv; this.clusterId = clusterId; this.namespaceId = nsId; } public int getLayoutVersion() { return layoutVersion; } public String getClusterId() { return clusterId; } public int getNamespaceId() { return namespaceId; } }
1,462
28.857143
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/JournalProtocol.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.protocol; import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.security.KerberosInfo; /** * Protocol used to journal edits to a remote node. Currently, * this is used to publish edits from the NameNode to a BackupNode. */ @KerberosInfo( serverPrincipal = DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, clientPrincipal = DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY) @InterfaceAudience.Private public interface JournalProtocol { /** * * This class is used by both the Namenode (client) and BackupNode (server) * to insulate from the protocol serialization. * * If you are adding/changing DN's interface then you need to * change both this class and ALSO related protocol buffer * wire protocol definition in JournalProtocol.proto. * * For more details on protocol buffer wire protocol, please see * .../org/apache/hadoop/hdfs/protocolPB/overview.html */ public static final long versionID = 1L; /** * Journal edit records. * This message is sent by the active name-node to the backup node * via {@code EditLogBackupOutputStream} in order to synchronize meta-data * changes with the backup namespace image. * * @param journalInfo journal information * @param epoch marks beginning a new journal writer * @param firstTxnId the first transaction of this batch * @param numTxns number of transactions * @param records byte array containing serialized journal records * @throws FencedException if the resource has been fenced */ public void journal(JournalInfo journalInfo, long epoch, long firstTxnId, int numTxns, byte[] records) throws IOException; /** * Notify the BackupNode that the NameNode has rolled its edit logs * and is now writing a new log segment. * @param journalInfo journal information * @param epoch marks beginning a new journal writer * @param txid the first txid in the new log * @throws FencedException if the resource has been fenced */ public void startLogSegment(JournalInfo journalInfo, long epoch, long txid) throws IOException; /** * Request to fence any other journal writers. * Older writers with at previous epoch will be fenced and can no longer * perform journal operations. * * @param journalInfo journal information * @param epoch marks beginning a new journal writer * @param fencerInfo info about fencer for debugging purposes * @throws FencedException if the resource has been fenced */ public FenceResponse fence(JournalInfo journalInfo, long epoch, String fencerInfo) throws IOException; }
3,643
38.608696
78
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/KeyUpdateCommand.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.protocol; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; @InterfaceAudience.Private @InterfaceStability.Evolving public class KeyUpdateCommand extends DatanodeCommand { private final ExportedBlockKeys keys; KeyUpdateCommand() { this(new ExportedBlockKeys()); } public KeyUpdateCommand(ExportedBlockKeys keys) { super(DatanodeProtocol.DNA_ACCESSKEYUPDATE); this.keys = keys; } public ExportedBlockKeys getExportedKeys() { return this.keys; } }
1,465
33.904762
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeCommand.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.protocol; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * Base class for data-node command. * Issued by the name-node to notify data-nodes what should be done. */ @InterfaceAudience.Private @InterfaceStability.Evolving public abstract class DatanodeCommand extends ServerCommand { DatanodeCommand(int action) { super(action); } }
1,266
35.2
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.protocol; import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature; import org.apache.hadoop.io.retry.AtMostOnce; import org.apache.hadoop.io.retry.Idempotent; import org.apache.hadoop.security.KerberosInfo; /***************************************************************************** * Protocol that a secondary NameNode uses to communicate with the NameNode. * It's used to get part of the name node state *****************************************************************************/ @KerberosInfo( serverPrincipal = DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY) @InterfaceAudience.Private public interface NamenodeProtocol { /** * Until version 6L, this class served as both * the client interface to the NN AND the RPC protocol used to * communicate with the NN. * * This class is used by both the DFSClient and the * NN server side to insulate from the protocol serialization. * * If you are adding/changing NN's interface then you need to * change both this class and ALSO related protocol buffer * wire protocol definition in NamenodeProtocol.proto. * * For more details on protocol buffer wire protocol, please see * .../org/apache/hadoop/hdfs/protocolPB/overview.html * * 6: Switch to txid-based file naming for image and edits */ public static final long versionID = 6L; // Error codes passed by errorReport(). final static int NOTIFY = 0; final static int FATAL = 1; public final static int ACT_UNKNOWN = 0; // unknown action public final static int ACT_SHUTDOWN = 50; // shutdown node public final static int ACT_CHECKPOINT = 51; // do checkpoint /** * Get a list of blocks belonging to <code>datanode</code> * whose total size equals <code>size</code>. * * @see org.apache.hadoop.hdfs.server.balancer.Balancer * @param datanode a data node * @param size requested size * @return a list of blocks & their locations * @throws IOException if size is less than or equal to 0 or datanode does not exist */ @Idempotent public BlocksWithLocations getBlocks(DatanodeInfo datanode, long size) throws IOException; /** * Get the current block keys * * @return ExportedBlockKeys containing current block keys * @throws IOException */ @Idempotent public ExportedBlockKeys getBlockKeys() throws IOException; /** * @return The most recent transaction ID that has been synced to * persistent storage, or applied from persistent storage in the * case of a non-active node. * @throws IOException */ @Idempotent public long getTransactionID() throws IOException; /** * Get the transaction ID of the most recent checkpoint. */ @Idempotent public long getMostRecentCheckpointTxId() throws IOException; /** * Closes the current edit log and opens a new one. The * call fails if the file system is in SafeMode. * @throws IOException * @return a unique token to identify this transaction. */ @Idempotent public CheckpointSignature rollEditLog() throws IOException; /** * Request name-node version and storage information. * * @return {@link NamespaceInfo} identifying versions and storage information * of the name-node * @throws IOException */ @Idempotent public NamespaceInfo versionRequest() throws IOException; /** * Report to the active name-node an error occurred on a subordinate node. * Depending on the error code the active node may decide to unregister the * reporting node. * * @param registration requesting node. * @param errorCode indicates the error * @param msg free text description of the error * @throws IOException */ @Idempotent public void errorReport(NamenodeRegistration registration, int errorCode, String msg) throws IOException; /** * Register a subordinate name-node like backup node. * * @return {@link NamenodeRegistration} of the node, * which this node has just registered with. */ @Idempotent public NamenodeRegistration registerSubordinateNamenode( NamenodeRegistration registration) throws IOException; /** * A request to the active name-node to start a checkpoint. * The name-node should decide whether to admit it or reject. * The name-node also decides what should be done with the backup node * image before and after the checkpoint. * * @see CheckpointCommand * @see NamenodeCommand * @see #ACT_SHUTDOWN * * @param registration the requesting node * @return {@link CheckpointCommand} if checkpoint is allowed. * @throws IOException */ @AtMostOnce public NamenodeCommand startCheckpoint(NamenodeRegistration registration) throws IOException; /** * A request to the active name-node to finalize * previously started checkpoint. * * @param registration the requesting node * @param sig {@code CheckpointSignature} which identifies the checkpoint. * @throws IOException */ @AtMostOnce public void endCheckpoint(NamenodeRegistration registration, CheckpointSignature sig) throws IOException; /** * Return a structure containing details about all edit logs * available to be fetched from the NameNode. * @param sinceTxId return only logs that contain transactions >= sinceTxId */ @Idempotent public RemoteEditLogManifest getEditLogManifest(long sinceTxId) throws IOException; /** * @return Whether the NameNode is in upgrade state (false) or not (true) */ @Idempotent public boolean isUpgradeFinalized() throws IOException; }
6,844
33.570707
80
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.blockmanagement; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.common.GenerationStamp; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import java.io.IOException; /** * BlockIdManager allocates the generation stamps and the block ID. The * {@see FSNamesystem} is responsible for persisting the allocations in the * {@see EditLog}. */ public class BlockIdManager { /** * The global generation stamp for legacy blocks with randomly * generated block IDs. */ private final GenerationStamp generationStampV1 = new GenerationStamp(); /** * The global generation stamp for this file system. */ private final GenerationStamp generationStampV2 = new GenerationStamp(); /** * The value of the generation stamp when the first switch to sequential * block IDs was made. Blocks with generation stamps below this value * have randomly allocated block IDs. Blocks with generation stamps above * this value had sequentially allocated block IDs. Read from the fsImage * (or initialized as an offset from the V1 (legacy) generation stamp on * upgrade). */ private long generationStampV1Limit; /** * The global block ID space for this file system. */ private final SequentialBlockIdGenerator blockIdGenerator; public BlockIdManager(BlockManager blockManager) { this.generationStampV1Limit = HdfsConstants.GRANDFATHER_GENERATION_STAMP; this.blockIdGenerator = new SequentialBlockIdGenerator(blockManager); } /** * Upgrades the generation stamp for the filesystem * by reserving a sufficient range for all existing blocks. * Should be invoked only during the first upgrade to * sequential block IDs. */ public long upgradeGenerationStampToV2() { Preconditions.checkState(generationStampV2.getCurrentValue() == GenerationStamp.LAST_RESERVED_STAMP); generationStampV2.skipTo(generationStampV1.getCurrentValue() + HdfsServerConstants.RESERVED_GENERATION_STAMPS_V1); generationStampV1Limit = generationStampV2.getCurrentValue(); return generationStampV2.getCurrentValue(); } /** * Sets the generation stamp that delineates random and sequentially * allocated block IDs. * * @param stamp set generation stamp limit to this value */ public void setGenerationStampV1Limit(long stamp) { Preconditions.checkState(generationStampV1Limit == HdfsConstants .GRANDFATHER_GENERATION_STAMP); generationStampV1Limit = stamp; } /** * Gets the value of the generation stamp that delineates sequential * and random block IDs. */ public long getGenerationStampAtblockIdSwitch() { return generationStampV1Limit; } @VisibleForTesting SequentialBlockIdGenerator getBlockIdGenerator() { return blockIdGenerator; } /** * Sets the maximum allocated block ID for this filesystem. This is * the basis for allocating new block IDs. */ public void setLastAllocatedBlockId(long blockId) { blockIdGenerator.skipTo(blockId); } /** * Gets the maximum sequentially allocated block ID for this filesystem */ public long getLastAllocatedBlockId() { return blockIdGenerator.getCurrentValue(); } /** * Sets the current generation stamp for legacy blocks */ public void setGenerationStampV1(long stamp) { generationStampV1.setCurrentValue(stamp); } /** * Gets the current generation stamp for legacy blocks */ public long getGenerationStampV1() { return generationStampV1.getCurrentValue(); } /** * Gets the current generation stamp for this filesystem */ public void setGenerationStampV2(long stamp) { generationStampV2.setCurrentValue(stamp); } public long getGenerationStampV2() { return generationStampV2.getCurrentValue(); } /** * Increments, logs and then returns the stamp */ public long nextGenerationStamp(boolean legacyBlock) throws IOException { return legacyBlock ? getNextGenerationStampV1() : getNextGenerationStampV2(); } @VisibleForTesting long getNextGenerationStampV1() throws IOException { long genStampV1 = generationStampV1.nextValue(); if (genStampV1 >= generationStampV1Limit) { // We ran out of generation stamps for legacy blocks. In practice, it // is extremely unlikely as we reserved 1T v1 generation stamps. The // result is that we can no longer append to the legacy blocks that // were created before the upgrade to sequential block IDs. throw new OutOfV1GenerationStampsException(); } return genStampV1; } @VisibleForTesting long getNextGenerationStampV2() { return generationStampV2.nextValue(); } public long getGenerationStampV1Limit() { return generationStampV1Limit; } /** * Determine whether the block ID was randomly generated (legacy) or * sequentially generated. The generation stamp value is used to * make the distinction. * * @return true if the block ID was randomly generated, false otherwise. */ public boolean isLegacyBlock(Block block) { return block.getGenerationStamp() < getGenerationStampV1Limit(); } /** * Increments, logs and then returns the block ID */ public long nextBlockId() { return blockIdGenerator.nextValue(); } public boolean isGenStampInFuture(Block block) { if (isLegacyBlock(block)) { return block.getGenerationStamp() > getGenerationStampV1(); } else { return block.getGenerationStamp() > getGenerationStampV2(); } } public void clear() { generationStampV1.setCurrentValue(GenerationStamp.LAST_RESERVED_STAMP); generationStampV2.setCurrentValue(GenerationStamp.LAST_RESERVED_STAMP); getBlockIdGenerator().setCurrentValue(SequentialBlockIdGenerator .LAST_RESERVED_BLOCK_ID); generationStampV1Limit = HdfsConstants.GRANDFATHER_GENERATION_STAMP; } }
6,902
32.028708
77
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReplicationBlocks.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.blockmanagement; import static org.apache.hadoop.util.Time.monotonicNow; import java.io.PrintWriter; import java.sql.Time; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.util.Daemon; import org.slf4j.Logger; /*************************************************** * PendingReplicationBlocks does the bookkeeping of all * blocks that are getting replicated. * * It does the following: * 1) record blocks that are getting replicated at this instant. * 2) a coarse grain timer to track age of replication request * 3) a thread that periodically identifies replication-requests * that never made it. * ***************************************************/ class PendingReplicationBlocks { private static final Logger LOG = BlockManager.LOG; private final Map<BlockInfo, PendingBlockInfo> pendingReplications; private final ArrayList<BlockInfo> timedOutItems; Daemon timerThread = null; private volatile boolean fsRunning = true; // // It might take anywhere between 5 to 10 minutes before // a request is timed out. // private long timeout = 5 * 60 * 1000; private final static long DEFAULT_RECHECK_INTERVAL = 5 * 60 * 1000; PendingReplicationBlocks(long timeoutPeriod) { if ( timeoutPeriod > 0 ) { this.timeout = timeoutPeriod; } pendingReplications = new HashMap<>(); timedOutItems = new ArrayList<>(); } void start() { timerThread = new Daemon(new PendingReplicationMonitor()); timerThread.start(); } /** * Add a block to the list of pending Replications * @param block The corresponding block * @param targets The DataNodes where replicas of the block should be placed */ void increment(BlockInfo block, DatanodeDescriptor[] targets) { synchronized (pendingReplications) { PendingBlockInfo found = pendingReplications.get(block); if (found == null) { pendingReplications.put(block, new PendingBlockInfo(targets)); } else { found.incrementReplicas(targets); found.setTimeStamp(); } } } /** * One replication request for this block has finished. * Decrement the number of pending replication requests * for this block. * * @param dn The DataNode that finishes the replication */ void decrement(BlockInfo block, DatanodeDescriptor dn) { synchronized (pendingReplications) { PendingBlockInfo found = pendingReplications.get(block); if (found != null) { if(LOG.isDebugEnabled()) { LOG.debug("Removing pending replication for " + block); } found.decrementReplicas(dn); if (found.getNumReplicas() <= 0) { pendingReplications.remove(block); } } } } /** * Remove the record about the given block from pendingReplications. * @param block The given block whose pending replication requests need to be * removed */ void remove(BlockInfo block) { synchronized (pendingReplications) { pendingReplications.remove(block); } } public void clear() { synchronized (pendingReplications) { pendingReplications.clear(); timedOutItems.clear(); } } /** * The total number of blocks that are undergoing replication */ int size() { return pendingReplications.size(); } /** * How many copies of this block is pending replication? */ int getNumReplicas(BlockInfo block) { synchronized (pendingReplications) { PendingBlockInfo found = pendingReplications.get(block); if (found != null) { return found.getNumReplicas(); } } return 0; } /** * Returns a list of blocks that have timed out their * replication requests. Returns null if no blocks have * timed out. */ BlockInfo[] getTimedOutBlocks() { synchronized (timedOutItems) { if (timedOutItems.size() <= 0) { return null; } BlockInfo[] blockList = timedOutItems.toArray( new BlockInfo[timedOutItems.size()]); timedOutItems.clear(); return blockList; } } /** * An object that contains information about a block that * is being replicated. It records the timestamp when the * system started replicating the most recent copy of this * block. It also records the list of Datanodes where the * replication requests are in progress. */ static class PendingBlockInfo { private long timeStamp; private final List<DatanodeDescriptor> targets; PendingBlockInfo(DatanodeDescriptor[] targets) { this.timeStamp = monotonicNow(); this.targets = targets == null ? new ArrayList<DatanodeDescriptor>() : new ArrayList<>(Arrays.asList(targets)); } long getTimeStamp() { return timeStamp; } void setTimeStamp() { timeStamp = monotonicNow(); } void incrementReplicas(DatanodeDescriptor... newTargets) { if (newTargets != null) { Collections.addAll(targets, newTargets); } } void decrementReplicas(DatanodeDescriptor dn) { targets.remove(dn); } int getNumReplicas() { return targets.size(); } } /* * A periodic thread that scans for blocks that never finished * their replication request. */ class PendingReplicationMonitor implements Runnable { @Override public void run() { while (fsRunning) { long period = Math.min(DEFAULT_RECHECK_INTERVAL, timeout); try { pendingReplicationCheck(); Thread.sleep(period); } catch (InterruptedException ie) { if(LOG.isDebugEnabled()) { LOG.debug("PendingReplicationMonitor thread is interrupted.", ie); } } } } /** * Iterate through all items and detect timed-out items */ void pendingReplicationCheck() { synchronized (pendingReplications) { Iterator<Map.Entry<BlockInfo, PendingBlockInfo>> iter = pendingReplications.entrySet().iterator(); long now = monotonicNow(); if(LOG.isDebugEnabled()) { LOG.debug("PendingReplicationMonitor checking Q"); } while (iter.hasNext()) { Map.Entry<BlockInfo, PendingBlockInfo> entry = iter.next(); PendingBlockInfo pendingBlock = entry.getValue(); if (now > pendingBlock.getTimeStamp() + timeout) { BlockInfo block = entry.getKey(); synchronized (timedOutItems) { timedOutItems.add(block); } LOG.warn("PendingReplicationMonitor timed out " + block); iter.remove(); } } } } } /* * Shuts down the pending replication monitor thread. * Waits for the thread to exit. */ void stop() { fsRunning = false; if(timerThread == null) return; timerThread.interrupt(); try { timerThread.join(3000); } catch (InterruptedException ie) { } } /** * Iterate through all items and print them. */ void metaSave(PrintWriter out) { synchronized (pendingReplications) { out.println("Metasave: Blocks being replicated: " + pendingReplications.size()); for (Map.Entry<BlockInfo, PendingBlockInfo> entry : pendingReplications.entrySet()) { PendingBlockInfo pendingBlock = entry.getValue(); Block block = entry.getKey(); out.println(block + " StartTime: " + new Time(pendingBlock.timeStamp) + " NumReplicaInProgress: " + pendingBlock.getNumReplicas()); } } } }
8,639
28.896194
79
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostFileManager.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.blockmanagement; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Function; import com.google.common.base.Joiner; import com.google.common.base.Preconditions; import com.google.common.collect.HashMultimap; import com.google.common.collect.Iterators; import com.google.common.collect.Multimap; import com.google.common.collect.UnmodifiableIterator; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.util.HostsFileReader; import javax.annotation.Nullable; import java.io.IOException; import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.URI; import java.net.URISyntaxException; import java.util.Collection; import java.util.HashSet; import java.util.Iterator; import java.util.Map; /** * This class manages the include and exclude files for HDFS. * <p/> * These files control which DataNodes the NameNode expects to see in the * cluster. Loosely speaking, the include file, if it exists and is not * empty, is a list of everything we expect to see. The exclude file is * a list of everything we want to ignore if we do see it. * <p/> * Entries may or may not specify a port. If they don't, we consider * them to apply to every DataNode on that host. The code canonicalizes the * entries into IP addresses. * <p/> * <p/> * The code ignores all entries that the DNS fails to resolve their IP * addresses. This is okay because by default the NN rejects the registrations * of DNs when it fails to do a forward and reverse lookup. Note that DNS * resolutions are only done during the loading time to minimize the latency. */ class HostFileManager { private static final Log LOG = LogFactory.getLog(HostFileManager.class); private HostSet includes = new HostSet(); private HostSet excludes = new HostSet(); private static HostSet readFile(String type, String filename) throws IOException { HostSet res = new HostSet(); if (!filename.isEmpty()) { HashSet<String> entrySet = new HashSet<String>(); HostsFileReader.readFileToSet(type, filename, entrySet); for (String str : entrySet) { InetSocketAddress addr = parseEntry(type, filename, str); if (addr != null) { res.add(addr); } } } return res; } @VisibleForTesting static InetSocketAddress parseEntry(String type, String fn, String line) { try { URI uri = new URI("dummy", line, null, null, null); int port = uri.getPort() == -1 ? 0 : uri.getPort(); InetSocketAddress addr = new InetSocketAddress(uri.getHost(), port); if (addr.isUnresolved()) { LOG.warn(String.format("Failed to resolve address `%s` in `%s`. " + "Ignoring in the %s list.", line, fn, type)); return null; } return addr; } catch (URISyntaxException e) { LOG.warn(String.format("Failed to parse `%s` in `%s`. " + "Ignoring in " + "the %s list.", line, fn, type)); } return null; } static InetSocketAddress resolvedAddressFromDatanodeID(DatanodeID id) { return new InetSocketAddress(id.getIpAddr(), id.getXferPort()); } synchronized HostSet getIncludes() { return includes; } synchronized HostSet getExcludes() { return excludes; } // If the includes list is empty, act as if everything is in the // includes list. synchronized boolean isIncluded(DatanodeID dn) { return includes.isEmpty() || includes.match (resolvedAddressFromDatanodeID(dn)); } synchronized boolean isExcluded(DatanodeID dn) { return excludes.match(resolvedAddressFromDatanodeID(dn)); } synchronized boolean hasIncludes() { return !includes.isEmpty(); } void refresh(String includeFile, String excludeFile) throws IOException { HostSet newIncludes = readFile("included", includeFile); HostSet newExcludes = readFile("excluded", excludeFile); synchronized (this) { includes = newIncludes; excludes = newExcludes; } } /** * The HostSet allows efficient queries on matching wildcard addresses. * <p/> * For InetSocketAddress A and B with the same host address, * we define a partial order between A and B, A <= B iff A.getPort() == B * .getPort() || B.getPort() == 0. */ static class HostSet implements Iterable<InetSocketAddress> { // Host -> lists of ports private final Multimap<InetAddress, Integer> addrs = HashMultimap.create(); /** * The function that checks whether there exists an entry foo in the set * so that foo <= addr. */ boolean matchedBy(InetSocketAddress addr) { Collection<Integer> ports = addrs.get(addr.getAddress()); return addr.getPort() == 0 ? !ports.isEmpty() : ports.contains(addr .getPort()); } /** * The function that checks whether there exists an entry foo in the set * so that addr <= foo. */ boolean match(InetSocketAddress addr) { int port = addr.getPort(); Collection<Integer> ports = addrs.get(addr.getAddress()); boolean exactMatch = ports.contains(port); boolean genericMatch = ports.contains(0); return exactMatch || genericMatch; } boolean isEmpty() { return addrs.isEmpty(); } int size() { return addrs.size(); } void add(InetSocketAddress addr) { Preconditions.checkArgument(!addr.isUnresolved()); addrs.put(addr.getAddress(), addr.getPort()); } @Override public Iterator<InetSocketAddress> iterator() { return new UnmodifiableIterator<InetSocketAddress>() { private final Iterator<Map.Entry<InetAddress, Integer>> it = addrs.entries().iterator(); @Override public boolean hasNext() { return it.hasNext(); } @Override public InetSocketAddress next() { Map.Entry<InetAddress, Integer> e = it.next(); return new InetSocketAddress(e.getKey(), e.getValue()); } }; } @Override public String toString() { StringBuilder sb = new StringBuilder("HostSet("); Joiner.on(",").appendTo(sb, Iterators.transform(iterator(), new Function<InetSocketAddress, String>() { @Override public String apply(@Nullable InetSocketAddress addr) { assert addr != null; return addr.getAddress().getHostAddress() + ":" + addr.getPort(); } })); return sb.append(")").toString(); } } }
7,440
33.133028
80
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementStatusDefault.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.blockmanagement; public class BlockPlacementStatusDefault implements BlockPlacementStatus { private int requiredRacks = 0; private int currentRacks = 0; public BlockPlacementStatusDefault(int currentRacks, int requiredRacks){ this.requiredRacks = requiredRacks; this.currentRacks = currentRacks; } @Override public boolean isPlacementPolicySatisfied() { return requiredRacks <= currentRacks; } @Override public String getErrorDescription() { if (isPlacementPolicySatisfied()) { return null; } return "Block should be additionally replicated on " + (requiredRacks - currentRacks) + " more rack(s)."; } }
1,517
32.733333
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.blockmanagement; import java.io.PrintWriter; import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.Calendar; import java.util.GregorianCalendar; import java.util.HashMap; import java.util.List; import java.util.Map; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.util.LightWeightHashSet; import org.apache.hadoop.util.Time; import org.apache.hadoop.hdfs.DFSUtil; import com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; /** * Keeps a Collection for every named machine containing blocks * that have recently been invalidated and are thought to live * on the machine in question. */ @InterfaceAudience.Private class InvalidateBlocks { /** Mapping: DatanodeInfo -> Collection of Blocks */ private final Map<DatanodeInfo, LightWeightHashSet<Block>> node2blocks = new HashMap<DatanodeInfo, LightWeightHashSet<Block>>(); /** The total number of blocks in the map. */ private long numBlocks = 0L; private final int blockInvalidateLimit; /** * The period of pending time for block invalidation since the NameNode * startup */ private final long pendingPeriodInMs; /** the startup time */ private final long startupTime = Time.monotonicNow(); InvalidateBlocks(final int blockInvalidateLimit, long pendingPeriodInMs) { this.blockInvalidateLimit = blockInvalidateLimit; this.pendingPeriodInMs = pendingPeriodInMs; printBlockDeletionTime(BlockManager.LOG); } private void printBlockDeletionTime(final Logger log) { log.info(DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_KEY + " is set to " + DFSUtil.durationToString(pendingPeriodInMs)); SimpleDateFormat sdf = new SimpleDateFormat("yyyy MMM dd HH:mm:ss"); Calendar calendar = new GregorianCalendar(); calendar.add(Calendar.SECOND, (int) (this.pendingPeriodInMs / 1000)); log.info("The block deletion will start around " + sdf.format(calendar.getTime())); } /** @return the number of blocks to be invalidated . */ synchronized long numBlocks() { return numBlocks; } /** * @return true if the given storage has the given block listed for * invalidation. Blocks are compared including their generation stamps: * if a block is pending invalidation but with a different generation stamp, * returns false. */ synchronized boolean contains(final DatanodeInfo dn, final Block block) { final LightWeightHashSet<Block> s = node2blocks.get(dn); if (s == null) { return false; // no invalidate blocks for this storage ID } Block blockInSet = s.getElement(block); return blockInSet != null && block.getGenerationStamp() == blockInSet.getGenerationStamp(); } /** * Add a block to the block collection * which will be invalidated on the specified datanode. */ synchronized void add(final Block block, final DatanodeInfo datanode, final boolean log) { LightWeightHashSet<Block> set = node2blocks.get(datanode); if (set == null) { set = new LightWeightHashSet<Block>(); node2blocks.put(datanode, set); } if (set.add(block)) { numBlocks++; if (log) { NameNode.blockStateChangeLog.debug("BLOCK* {}: add {} to {}", getClass().getSimpleName(), block, datanode); } } } /** Remove a storage from the invalidatesSet */ synchronized void remove(final DatanodeInfo dn) { final LightWeightHashSet<Block> blocks = node2blocks.remove(dn); if (blocks != null) { numBlocks -= blocks.size(); } } /** Remove the block from the specified storage. */ synchronized void remove(final DatanodeInfo dn, final Block block) { final LightWeightHashSet<Block> v = node2blocks.get(dn); if (v != null && v.remove(block)) { numBlocks--; if (v.isEmpty()) { node2blocks.remove(dn); } } } /** Print the contents to out. */ synchronized void dump(final PrintWriter out) { final int size = node2blocks.values().size(); out.println("Metasave: Blocks " + numBlocks + " waiting deletion from " + size + " datanodes."); if (size == 0) { return; } for(Map.Entry<DatanodeInfo, LightWeightHashSet<Block>> entry : node2blocks.entrySet()) { final LightWeightHashSet<Block> blocks = entry.getValue(); if (blocks.size() > 0) { out.println(entry.getKey()); out.println(blocks); } } } /** @return a list of the storage IDs. */ synchronized List<DatanodeInfo> getDatanodes() { return new ArrayList<DatanodeInfo>(node2blocks.keySet()); } /** * @return the remianing pending time */ @VisibleForTesting long getInvalidationDelay() { return pendingPeriodInMs - (Time.monotonicNow() - startupTime); } synchronized List<Block> invalidateWork(final DatanodeDescriptor dn) { final long delay = getInvalidationDelay(); if (delay > 0) { if (BlockManager.LOG.isDebugEnabled()) { BlockManager.LOG .debug("Block deletion is delayed during NameNode startup. " + "The deletion will start after " + delay + " ms."); } return null; } final LightWeightHashSet<Block> set = node2blocks.get(dn); if (set == null) { return null; } // # blocks that can be sent in one message is limited final int limit = blockInvalidateLimit; final List<Block> toInvalidate = set.pollN(limit); // If we send everything in this message, remove this node entry if (set.isEmpty()) { remove(dn); } dn.addBlocksToBeInvalidated(toInvalidate); numBlocks -= toInvalidate.size(); return toInvalidate; } synchronized void clear() { node2blocks.clear(); numBlocks = 0; } }
6,848
32.247573
92
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/AvailableSpaceBlockPlacementPolicy.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.blockmanagement; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_AVAILABLE_SPACE_BLOCK_PLACEMENT_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_AVAILABLE_SPACE_BLOCK_PLACEMENT_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_DEFAULT; import java.util.Random; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.net.NetworkTopology; /** * Space balanced block placement policy. */ public class AvailableSpaceBlockPlacementPolicy extends BlockPlacementPolicyDefault { private static final Log LOG = LogFactory .getLog(AvailableSpaceBlockPlacementPolicy.class); private static final Random RAND = new Random(); private int balancedPreference = (int) (100 * DFS_NAMENODE_AVAILABLE_SPACE_BLOCK_PLACEMENT_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_DEFAULT); @Override public void initialize(Configuration conf, FSClusterStats stats, NetworkTopology clusterMap, Host2NodesMap host2datanodeMap) { super.initialize(conf, stats, clusterMap, host2datanodeMap); float balancedPreferencePercent = conf.getFloat( DFS_NAMENODE_AVAILABLE_SPACE_BLOCK_PLACEMENT_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_KEY, DFS_NAMENODE_AVAILABLE_SPACE_BLOCK_PLACEMENT_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_DEFAULT); LOG.info("Available space block placement policy initialized: " + DFSConfigKeys.DFS_NAMENODE_AVAILABLE_SPACE_BLOCK_PLACEMENT_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_KEY + " = " + balancedPreferencePercent); if (balancedPreferencePercent > 1.0) { LOG.warn("The value of " + DFS_NAMENODE_AVAILABLE_SPACE_BLOCK_PLACEMENT_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_KEY + " is greater than 1.0 but should be in the range 0.0 - 1.0"); } if (balancedPreferencePercent < 0.5) { LOG.warn("The value of " + DFS_NAMENODE_AVAILABLE_SPACE_BLOCK_PLACEMENT_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_KEY + " is less than 0.5 so datanodes with more used percent will" + " receive more block allocations."); } balancedPreference = (int) (100 * balancedPreferencePercent); } @Override protected DatanodeDescriptor chooseDataNode(String scope) { DatanodeDescriptor a = (DatanodeDescriptor) clusterMap.chooseRandom(scope); DatanodeDescriptor b = (DatanodeDescriptor) clusterMap.chooseRandom(scope); int ret = compareDataNode(a, b); if (ret == 0) { return a; } else if (ret < 0) { return (RAND.nextInt(100) < balancedPreference) ? a : b; } else { return (RAND.nextInt(100) < balancedPreference) ? b : a; } } /** * Compare the two data nodes. */ protected int compareDataNode(final DatanodeDescriptor a, final DatanodeDescriptor b) { if (a.equals(b) || Math.abs(a.getDfsUsedPercent() - b.getDfsUsedPercent()) < 5) { return 0; } return a.getDfsUsedPercent() < b.getDfsUsedPercent() ? -1 : 1; } }
4,007
40.75
146
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.blockmanagement; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX; import static org.apache.hadoop.util.ExitUtil.terminate; import java.io.IOException; import java.io.PrintWriter; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.EnumSet; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Queue; import java.util.Set; import java.util.TreeMap; import java.util.TreeSet; import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.atomic.AtomicLong; import javax.management.ObjectName; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.HAUtil; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; import org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.fs.FileEncryptionInfo; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier.AccessMode; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; import org.apache.hadoop.hdfs.server.blockmanagement.CorruptReplicasMap.Reason; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo.AddBlockResult; import org.apache.hadoop.hdfs.server.blockmanagement.PendingDataNodeMessages.ReportedBlockInfo; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.namenode.CachedBlock; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory; import org.apache.hadoop.hdfs.server.namenode.Namesystem; import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; import org.apache.hadoop.hdfs.server.protocol.BlockCommand; import org.apache.hadoop.hdfs.server.protocol.BlockReportContext; import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations; import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations; import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State; import org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand; import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo; import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks; import org.apache.hadoop.hdfs.util.LightWeightLinkedSet; import org.apache.hadoop.metrics2.util.MBeans; import org.apache.hadoop.net.Node; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.LightWeightGSet; import org.apache.hadoop.util.Time; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.collect.Sets; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Keeps information related to the blocks stored in the Hadoop cluster. */ @InterfaceAudience.Private public class BlockManager implements BlockStatsMXBean { public static final Logger LOG = LoggerFactory.getLogger(BlockManager.class); public static final Logger blockLog = NameNode.blockStateChangeLog; private static final String QUEUE_REASON_CORRUPT_STATE = "it has the wrong state or generation stamp"; private static final String QUEUE_REASON_FUTURE_GENSTAMP = "generation stamp is in the future"; private final Namesystem namesystem; private final DatanodeManager datanodeManager; private final HeartbeatManager heartbeatManager; private final BlockTokenSecretManager blockTokenSecretManager; private final PendingDataNodeMessages pendingDNMessages = new PendingDataNodeMessages(); private volatile long pendingReplicationBlocksCount = 0L; private volatile long corruptReplicaBlocksCount = 0L; private volatile long underReplicatedBlocksCount = 0L; private volatile long scheduledReplicationBlocksCount = 0L; private final AtomicLong excessBlocksCount = new AtomicLong(0L); private final AtomicLong postponedMisreplicatedBlocksCount = new AtomicLong(0L); private final long startupDelayBlockDeletionInMs; private final BlockReportLeaseManager blockReportLeaseManager; private ObjectName mxBeanName; /** Used by metrics */ public long getPendingReplicationBlocksCount() { return pendingReplicationBlocksCount; } /** Used by metrics */ public long getUnderReplicatedBlocksCount() { return underReplicatedBlocksCount; } /** Used by metrics */ public long getCorruptReplicaBlocksCount() { return corruptReplicaBlocksCount; } /** Used by metrics */ public long getScheduledReplicationBlocksCount() { return scheduledReplicationBlocksCount; } /** Used by metrics */ public long getPendingDeletionBlocksCount() { return invalidateBlocks.numBlocks(); } /** Used by metrics */ public long getStartupDelayBlockDeletionInMs() { return startupDelayBlockDeletionInMs; } /** Used by metrics */ public long getExcessBlocksCount() { return excessBlocksCount.get(); } /** Used by metrics */ public long getPostponedMisreplicatedBlocksCount() { return postponedMisreplicatedBlocksCount.get(); } /** Used by metrics */ public int getPendingDataNodeMessageCount() { return pendingDNMessages.count(); } /**replicationRecheckInterval is how often namenode checks for new replication work*/ private final long replicationRecheckInterval; /** * Mapping: Block -> { BlockCollection, datanodes, self ref } * Updated only in response to client-sent information. */ final BlocksMap blocksMap; /** Replication thread. */ final Daemon replicationThread = new Daemon(new ReplicationMonitor()); /** Store blocks -> datanodedescriptor(s) map of corrupt replicas */ final CorruptReplicasMap corruptReplicas = new CorruptReplicasMap(); /** Blocks to be invalidated. */ private final InvalidateBlocks invalidateBlocks; /** * After a failover, over-replicated blocks may not be handled * until all of the replicas have done a block report to the * new active. This is to make sure that this NameNode has been * notified of all block deletions that might have been pending * when the failover happened. */ private final Set<Block> postponedMisreplicatedBlocks = Sets.newHashSet(); /** * Maps a StorageID to the set of blocks that are "extra" for this * DataNode. We'll eventually remove these extras. */ public final Map<String, LightWeightLinkedSet<Block>> excessReplicateMap = new TreeMap<String, LightWeightLinkedSet<Block>>(); /** * Store set of Blocks that need to be replicated 1 or more times. * We also store pending replication-orders. */ public final UnderReplicatedBlocks neededReplications = new UnderReplicatedBlocks(); @VisibleForTesting final PendingReplicationBlocks pendingReplications; /** The maximum number of replicas allowed for a block */ public final short maxReplication; /** * The maximum number of outgoing replication streams a given node should have * at one time considering all but the highest priority replications needed. */ int maxReplicationStreams; /** * The maximum number of outgoing replication streams a given node should have * at one time. */ int replicationStreamsHardLimit; /** Minimum copies needed or else write is disallowed */ public final short minReplication; /** Default number of replicas */ public final int defaultReplication; /** value returned by MAX_CORRUPT_FILES_RETURNED */ final int maxCorruptFilesReturned; final float blocksInvalidateWorkPct; final int blocksReplWorkMultiplier; /** variable to enable check for enough racks */ final boolean shouldCheckForEnoughRacks; // whether or not to issue block encryption keys. final boolean encryptDataTransfer; // Max number of blocks to log info about during a block report. private final long maxNumBlocksToLog; /** * When running inside a Standby node, the node may receive block reports * from datanodes before receiving the corresponding namespace edits from * the active NameNode. Thus, it will postpone them for later processing, * instead of marking the blocks as corrupt. */ private boolean shouldPostponeBlocksFromFuture = false; /** * Process replication queues asynchronously to allow namenode safemode exit * and failover to be faster. HDFS-5496 */ private Daemon replicationQueuesInitializer = null; /** * Number of blocks to process asychronously for replication queues * initialization once aquired the namesystem lock. Remaining blocks will be * processed again after aquiring lock again. */ private int numBlocksPerIteration; /** * Progress of the Replication queues initialisation. */ private double replicationQueuesInitProgress = 0.0; /** for block replicas placement */ private BlockPlacementPolicy blockplacement; private final BlockStoragePolicySuite storagePolicySuite; /** Check whether name system is running before terminating */ private boolean checkNSRunning = true; public BlockManager(final Namesystem namesystem, final Configuration conf) throws IOException { this.namesystem = namesystem; datanodeManager = new DatanodeManager(this, namesystem, conf); heartbeatManager = datanodeManager.getHeartbeatManager(); startupDelayBlockDeletionInMs = conf.getLong( DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_KEY, DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_DEFAULT) * 1000L; invalidateBlocks = new InvalidateBlocks( datanodeManager.blockInvalidateLimit, startupDelayBlockDeletionInMs); // Compute the map capacity by allocating 2% of total memory blocksMap = new BlocksMap( LightWeightGSet.computeCapacity(2.0, "BlocksMap")); blockplacement = BlockPlacementPolicy.getInstance( conf, datanodeManager.getFSClusterStats(), datanodeManager.getNetworkTopology(), datanodeManager.getHost2DatanodeMap()); storagePolicySuite = BlockStoragePolicySuite.createDefaultSuite(); pendingReplications = new PendingReplicationBlocks(conf.getInt( DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_DEFAULT) * 1000L); blockTokenSecretManager = createBlockTokenSecretManager(conf); this.maxCorruptFilesReturned = conf.getInt( DFSConfigKeys.DFS_DEFAULT_MAX_CORRUPT_FILES_RETURNED_KEY, DFSConfigKeys.DFS_DEFAULT_MAX_CORRUPT_FILES_RETURNED); this.defaultReplication = conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, DFSConfigKeys.DFS_REPLICATION_DEFAULT); final int maxR = conf.getInt(DFSConfigKeys.DFS_REPLICATION_MAX_KEY, DFSConfigKeys.DFS_REPLICATION_MAX_DEFAULT); final int minR = conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY, DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_DEFAULT); if (minR <= 0) throw new IOException("Unexpected configuration parameters: " + DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY + " = " + minR + " <= 0"); if (maxR > Short.MAX_VALUE) throw new IOException("Unexpected configuration parameters: " + DFSConfigKeys.DFS_REPLICATION_MAX_KEY + " = " + maxR + " > " + Short.MAX_VALUE); if (minR > maxR) throw new IOException("Unexpected configuration parameters: " + DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY + " = " + minR + " > " + DFSConfigKeys.DFS_REPLICATION_MAX_KEY + " = " + maxR); this.minReplication = (short)minR; this.maxReplication = (short)maxR; this.maxReplicationStreams = conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_DEFAULT); this.replicationStreamsHardLimit = conf.getInt( DFSConfigKeys.DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_KEY, DFSConfigKeys.DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_DEFAULT); this.shouldCheckForEnoughRacks = conf.get(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY) == null ? false : true; this.blocksInvalidateWorkPct = DFSUtil.getInvalidateWorkPctPerIteration(conf); this.blocksReplWorkMultiplier = DFSUtil.getReplWorkMultiplier(conf); this.replicationRecheckInterval = conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT) * 1000L; this.encryptDataTransfer = conf.getBoolean(DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY, DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_DEFAULT); this.maxNumBlocksToLog = conf.getLong(DFSConfigKeys.DFS_MAX_NUM_BLOCKS_TO_LOG_KEY, DFSConfigKeys.DFS_MAX_NUM_BLOCKS_TO_LOG_DEFAULT); this.numBlocksPerIteration = conf.getInt( DFSConfigKeys.DFS_BLOCK_MISREPLICATION_PROCESSING_LIMIT, DFSConfigKeys.DFS_BLOCK_MISREPLICATION_PROCESSING_LIMIT_DEFAULT); this.blockReportLeaseManager = new BlockReportLeaseManager(conf); LOG.info("defaultReplication = " + defaultReplication); LOG.info("maxReplication = " + maxReplication); LOG.info("minReplication = " + minReplication); LOG.info("maxReplicationStreams = " + maxReplicationStreams); LOG.info("shouldCheckForEnoughRacks = " + shouldCheckForEnoughRacks); LOG.info("replicationRecheckInterval = " + replicationRecheckInterval); LOG.info("encryptDataTransfer = " + encryptDataTransfer); LOG.info("maxNumBlocksToLog = " + maxNumBlocksToLog); } private static BlockTokenSecretManager createBlockTokenSecretManager( final Configuration conf) throws IOException { final boolean isEnabled = conf.getBoolean( DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_DEFAULT); LOG.info(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY + "=" + isEnabled); if (!isEnabled) { if (UserGroupInformation.isSecurityEnabled()) { String errMessage = "Security is enabled but block access tokens " + "(via " + DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY + ") " + "aren't enabled. This may cause issues " + "when clients attempt to connect to a DataNode. Aborting NameNode"; throw new IOException(errMessage); } return null; } final long updateMin = conf.getLong( DFSConfigKeys.DFS_BLOCK_ACCESS_KEY_UPDATE_INTERVAL_KEY, DFSConfigKeys.DFS_BLOCK_ACCESS_KEY_UPDATE_INTERVAL_DEFAULT); final long lifetimeMin = conf.getLong( DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_LIFETIME_KEY, DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_LIFETIME_DEFAULT); final String encryptionAlgorithm = conf.get( DFSConfigKeys.DFS_DATA_ENCRYPTION_ALGORITHM_KEY); LOG.info(DFSConfigKeys.DFS_BLOCK_ACCESS_KEY_UPDATE_INTERVAL_KEY + "=" + updateMin + " min(s), " + DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_LIFETIME_KEY + "=" + lifetimeMin + " min(s), " + DFSConfigKeys.DFS_DATA_ENCRYPTION_ALGORITHM_KEY + "=" + encryptionAlgorithm); String nsId = DFSUtil.getNamenodeNameServiceId(conf); boolean isHaEnabled = HAUtil.isHAEnabled(conf, nsId); if (isHaEnabled) { String thisNnId = HAUtil.getNameNodeId(conf, nsId); String otherNnId = HAUtil.getNameNodeIdOfOtherNode(conf, nsId); return new BlockTokenSecretManager(updateMin*60*1000L, lifetimeMin*60*1000L, thisNnId.compareTo(otherNnId) < 0 ? 0 : 1, null, encryptionAlgorithm); } else { return new BlockTokenSecretManager(updateMin*60*1000L, lifetimeMin*60*1000L, 0, null, encryptionAlgorithm); } } public BlockStoragePolicy getStoragePolicy(final String policyName) { return storagePolicySuite.getPolicy(policyName); } public BlockStoragePolicy getStoragePolicy(final byte policyId) { return storagePolicySuite.getPolicy(policyId); } public BlockStoragePolicy[] getStoragePolicies() { return storagePolicySuite.getAllPolicies(); } public void setBlockPoolId(String blockPoolId) { if (isBlockTokenEnabled()) { blockTokenSecretManager.setBlockPoolId(blockPoolId); } } public BlockStoragePolicySuite getStoragePolicySuite() { return storagePolicySuite; } /** get the BlockTokenSecretManager */ @VisibleForTesting public BlockTokenSecretManager getBlockTokenSecretManager() { return blockTokenSecretManager; } /** Allow silent termination of replication monitor for testing */ @VisibleForTesting void enableRMTerminationForTesting() { checkNSRunning = false; } private boolean isBlockTokenEnabled() { return blockTokenSecretManager != null; } /** Should the access keys be updated? */ boolean shouldUpdateBlockKey(final long updateTime) throws IOException { return isBlockTokenEnabled()? blockTokenSecretManager.updateKeys(updateTime) : false; } public void activate(Configuration conf) { pendingReplications.start(); datanodeManager.activate(conf); this.replicationThread.start(); mxBeanName = MBeans.register("NameNode", "BlockStats", this); } public void close() { try { replicationThread.interrupt(); replicationThread.join(3000); } catch (InterruptedException ie) { } datanodeManager.close(); pendingReplications.stop(); blocksMap.close(); } /** @return the datanodeManager */ public DatanodeManager getDatanodeManager() { return datanodeManager; } @VisibleForTesting public BlockPlacementPolicy getBlockPlacementPolicy() { return blockplacement; } /** Set BlockPlacementPolicy */ public void setBlockPlacementPolicy(BlockPlacementPolicy newpolicy) { if (newpolicy == null) { throw new HadoopIllegalArgumentException("newpolicy == null"); } this.blockplacement = newpolicy; } /** Dump meta data to out. */ public void metaSave(PrintWriter out) { assert namesystem.hasWriteLock(); final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>(); final List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>(); datanodeManager.fetchDatanodes(live, dead, false); out.println("Live Datanodes: " + live.size()); out.println("Dead Datanodes: " + dead.size()); // // Dump contents of neededReplication // synchronized (neededReplications) { out.println("Metasave: Blocks waiting for replication: " + neededReplications.size()); for (Block block : neededReplications) { dumpBlockMeta(block, out); } } // Dump any postponed over-replicated blocks out.println("Mis-replicated blocks that have been postponed:"); for (Block block : postponedMisreplicatedBlocks) { dumpBlockMeta(block, out); } // Dump blocks from pendingReplication pendingReplications.metaSave(out); // Dump blocks that are waiting to be deleted invalidateBlocks.dump(out); // Dump all datanodes getDatanodeManager().datanodeDump(out); } /** * Dump the metadata for the given block in a human-readable * form. */ private void dumpBlockMeta(Block block, PrintWriter out) { List<DatanodeDescriptor> containingNodes = new ArrayList<DatanodeDescriptor>(); List<DatanodeStorageInfo> containingLiveReplicasNodes = new ArrayList<DatanodeStorageInfo>(); NumberReplicas numReplicas = new NumberReplicas(); // source node returned is not used chooseSourceDatanode(block, containingNodes, containingLiveReplicasNodes, numReplicas, UnderReplicatedBlocks.LEVEL); // containingLiveReplicasNodes can include READ_ONLY_SHARED replicas which are // not included in the numReplicas.liveReplicas() count assert containingLiveReplicasNodes.size() >= numReplicas.liveReplicas(); int usableReplicas = numReplicas.liveReplicas() + numReplicas.decommissionedAndDecommissioning(); if (block instanceof BlockInfo) { BlockCollection bc = ((BlockInfo) block).getBlockCollection(); String fileName = (bc == null) ? "[orphaned]" : bc.getName(); out.print(fileName + ": "); } // l: == live:, d: == decommissioned c: == corrupt e: == excess out.print(block + ((usableReplicas > 0)? "" : " MISSING") + " (replicas:" + " l: " + numReplicas.liveReplicas() + " d: " + numReplicas.decommissionedAndDecommissioning() + " c: " + numReplicas.corruptReplicas() + " e: " + numReplicas.excessReplicas() + ") "); Collection<DatanodeDescriptor> corruptNodes = corruptReplicas.getNodes(block); for (DatanodeStorageInfo storage : blocksMap.getStorages(block)) { final DatanodeDescriptor node = storage.getDatanodeDescriptor(); String state = ""; if (corruptNodes != null && corruptNodes.contains(node)) { state = "(corrupt)"; } else if (node.isDecommissioned() || node.isDecommissionInProgress()) { state = "(decommissioned)"; } if (storage.areBlockContentsStale()) { state += " (block deletions maybe out of date)"; } out.print(" " + node + state + " : "); } out.println(""); } /** @return maxReplicationStreams */ public int getMaxReplicationStreams() { return maxReplicationStreams; } /** * @return true if the block has minimum replicas */ public boolean checkMinReplication(BlockInfo block) { return (countNodes(block).liveReplicas() >= minReplication); } /** * Commit a block of a file * * @param block block to be committed * @param commitBlock - contains client reported block length and generation * @return true if the block is changed to committed state. * @throws IOException if the block does not have at least a minimal number * of replicas reported from data-nodes. */ private static boolean commitBlock( final BlockInfoContiguousUnderConstruction block, final Block commitBlock) throws IOException { if (block.getBlockUCState() == BlockUCState.COMMITTED) return false; assert block.getNumBytes() <= commitBlock.getNumBytes() : "commitBlock length is less than the stored one " + commitBlock.getNumBytes() + " vs. " + block.getNumBytes(); block.commitBlock(commitBlock); return true; } /** * Commit the last block of the file and mark it as complete if it has * meets the minimum replication requirement * * @param bc block collection * @param commitBlock - contains client reported block length and generation * @return true if the last block is changed to committed state. * @throws IOException if the block does not have at least a minimal number * of replicas reported from data-nodes. */ public boolean commitOrCompleteLastBlock(BlockCollection bc, Block commitBlock) throws IOException { if(commitBlock == null) return false; // not committing, this is a block allocation retry BlockInfo lastBlock = bc.getLastBlock(); if(lastBlock == null) return false; // no blocks in file yet if(lastBlock.isComplete()) return false; // already completed (e.g. by syncBlock) final boolean b = commitBlock( (BlockInfoContiguousUnderConstruction) lastBlock, commitBlock); if(countNodes(lastBlock).liveReplicas() >= minReplication) completeBlock(bc, bc.numBlocks()-1, false); return b; } /** * Convert a specified block of the file to a complete block. * @param bc file * @param blkIndex block index in the file * @throws IOException if the block does not have at least a minimal number * of replicas reported from data-nodes. */ private BlockInfo completeBlock(final BlockCollection bc, final int blkIndex, boolean force) throws IOException { if(blkIndex < 0) return null; BlockInfo curBlock = bc.getBlocks()[blkIndex]; if(curBlock.isComplete()) return curBlock; BlockInfoContiguousUnderConstruction ucBlock = (BlockInfoContiguousUnderConstruction) curBlock; int numNodes = ucBlock.numNodes(); if (!force && numNodes < minReplication) throw new IOException("Cannot complete block: " + "block does not satisfy minimal replication requirement."); if(!force && ucBlock.getBlockUCState() != BlockUCState.COMMITTED) throw new IOException( "Cannot complete block: block has not been COMMITTED by the client"); BlockInfo completeBlock = ucBlock.convertToCompleteBlock(); // replace penultimate block in file bc.setBlock(blkIndex, completeBlock); // Since safe-mode only counts complete blocks, and we now have // one more complete block, we need to adjust the total up, and // also count it as safe, if we have at least the minimum replica // count. (We may not have the minimum replica count yet if this is // a "forced" completion when a file is getting closed by an // OP_CLOSE edit on the standby). namesystem.adjustSafeModeBlockTotals(0, 1); namesystem.incrementSafeBlockCount( Math.min(numNodes, minReplication)); // replace block in the blocksMap return blocksMap.replaceBlock(completeBlock); } private BlockInfo completeBlock(final BlockCollection bc, final BlockInfo block, boolean force) throws IOException { BlockInfo[] fileBlocks = bc.getBlocks(); for(int idx = 0; idx < fileBlocks.length; idx++) if(fileBlocks[idx] == block) { return completeBlock(bc, idx, force); } return block; } /** * Force the given block in the given file to be marked as complete, * regardless of whether enough replicas are present. This is necessary * when tailing edit logs as a Standby. */ public BlockInfo forceCompleteBlock(final BlockCollection bc, final BlockInfoContiguousUnderConstruction block) throws IOException { block.commitBlock(block); return completeBlock(bc, block, true); } /** * Convert the last block of the file to an under construction block.<p> * The block is converted only if the file has blocks and the last one * is a partial block (its size is less than the preferred block size). * The converted block is returned to the client. * The client uses the returned block locations to form the data pipeline * for this block.<br> * The methods returns null if there is no partial block at the end. * The client is supposed to allocate a new block with the next call. * * @param bc file * @param bytesToRemove num of bytes to remove from block * @return the last block locations if the block is partial or null otherwise */ public LocatedBlock convertLastBlockToUnderConstruction( BlockCollection bc, long bytesToRemove) throws IOException { BlockInfo oldBlock = bc.getLastBlock(); if(oldBlock == null || bc.getPreferredBlockSize() == oldBlock.getNumBytes() - bytesToRemove) return null; assert oldBlock == getStoredBlock(oldBlock) : "last block of the file is not in blocksMap"; DatanodeStorageInfo[] targets = getStorages(oldBlock); BlockInfoContiguousUnderConstruction ucBlock = bc.setLastBlock(oldBlock, targets); blocksMap.replaceBlock(ucBlock); // Remove block from replication queue. NumberReplicas replicas = countNodes(ucBlock); neededReplications.remove(ucBlock, replicas.liveReplicas(), replicas.decommissionedAndDecommissioning(), getReplication(ucBlock)); pendingReplications.remove(ucBlock); // remove this block from the list of pending blocks to be deleted. for (DatanodeStorageInfo storage : targets) { invalidateBlocks.remove(storage.getDatanodeDescriptor(), oldBlock); } // Adjust safe-mode totals, since under-construction blocks don't // count in safe-mode. namesystem.adjustSafeModeBlockTotals( // decrement safe if we had enough targets.length >= minReplication ? -1 : 0, // always decrement total blocks -1); final long fileLength = bc.computeContentSummary(getStoragePolicySuite()).getLength(); final long pos = fileLength - ucBlock.getNumBytes(); return createLocatedBlock(ucBlock, pos, BlockTokenIdentifier.AccessMode.WRITE); } /** * Get all valid locations of the block */ private List<DatanodeStorageInfo> getValidLocations(Block block) { final List<DatanodeStorageInfo> locations = new ArrayList<DatanodeStorageInfo>(blocksMap.numNodes(block)); for(DatanodeStorageInfo storage : blocksMap.getStorages(block)) { // filter invalidate replicas if(!invalidateBlocks.contains(storage.getDatanodeDescriptor(), block)) { locations.add(storage); } } return locations; } private List<LocatedBlock> createLocatedBlockList( final BlockInfo[] blocks, final long offset, final long length, final int nrBlocksToReturn, final AccessMode mode) throws IOException { int curBlk = 0; long curPos = 0, blkSize = 0; int nrBlocks = (blocks[0].getNumBytes() == 0) ? 0 : blocks.length; for (curBlk = 0; curBlk < nrBlocks; curBlk++) { blkSize = blocks[curBlk].getNumBytes(); assert blkSize > 0 : "Block of size 0"; if (curPos + blkSize > offset) { break; } curPos += blkSize; } if (nrBlocks > 0 && curBlk == nrBlocks) // offset >= end of file return Collections.<LocatedBlock>emptyList(); long endOff = offset + length; List<LocatedBlock> results = new ArrayList<LocatedBlock>(blocks.length); do { results.add(createLocatedBlock(blocks[curBlk], curPos, mode)); curPos += blocks[curBlk].getNumBytes(); curBlk++; } while (curPos < endOff && curBlk < blocks.length && results.size() < nrBlocksToReturn); return results; } private LocatedBlock createLocatedBlock(final BlockInfo[] blocks, final long endPos, final AccessMode mode) throws IOException { int curBlk = 0; long curPos = 0; int nrBlocks = (blocks[0].getNumBytes() == 0) ? 0 : blocks.length; for (curBlk = 0; curBlk < nrBlocks; curBlk++) { long blkSize = blocks[curBlk].getNumBytes(); if (curPos + blkSize >= endPos) { break; } curPos += blkSize; } return createLocatedBlock(blocks[curBlk], curPos, mode); } private LocatedBlock createLocatedBlock(final BlockInfo blk, final long pos, final AccessMode mode) throws IOException { final LocatedBlock lb = createLocatedBlock(blk, pos); if (mode != null) { setBlockToken(lb, mode); } return lb; } /** @return a LocatedBlock for the given block */ private LocatedBlock createLocatedBlock(final BlockInfo blk, final long pos ) throws IOException { if (blk instanceof BlockInfoContiguousUnderConstruction) { if (blk.isComplete()) { throw new IOException( "blk instanceof BlockInfoUnderConstruction && blk.isComplete()" + ", blk=" + blk); } final BlockInfoContiguousUnderConstruction uc = (BlockInfoContiguousUnderConstruction) blk; final DatanodeStorageInfo[] storages = uc.getExpectedStorageLocations(); final ExtendedBlock eb = new ExtendedBlock(namesystem.getBlockPoolId(), blk); return newLocatedBlock(eb, storages, pos, false); } // get block locations final int numCorruptNodes = countNodes(blk).corruptReplicas(); final int numCorruptReplicas = corruptReplicas.numCorruptReplicas(blk); if (numCorruptNodes != numCorruptReplicas) { LOG.warn("Inconsistent number of corrupt replicas for " + blk + " blockMap has " + numCorruptNodes + " but corrupt replicas map has " + numCorruptReplicas); } final int numNodes = blocksMap.numNodes(blk); final boolean isCorrupt = numCorruptNodes == numNodes; final int numMachines = isCorrupt ? numNodes: numNodes - numCorruptNodes; final DatanodeStorageInfo[] machines = new DatanodeStorageInfo[numMachines]; int j = 0; if (numMachines > 0) { for(DatanodeStorageInfo storage : blocksMap.getStorages(blk)) { final DatanodeDescriptor d = storage.getDatanodeDescriptor(); final boolean replicaCorrupt = corruptReplicas.isReplicaCorrupt(blk, d); if (isCorrupt || (!replicaCorrupt)) machines[j++] = storage; } } assert j == machines.length : "isCorrupt: " + isCorrupt + " numMachines: " + numMachines + " numNodes: " + numNodes + " numCorrupt: " + numCorruptNodes + " numCorruptRepls: " + numCorruptReplicas; final ExtendedBlock eb = new ExtendedBlock(namesystem.getBlockPoolId(), blk); return newLocatedBlock(eb, machines, pos, isCorrupt); } /** Create a LocatedBlocks. */ public LocatedBlocks createLocatedBlocks(final BlockInfo[] blocks, final long fileSizeExcludeBlocksUnderConstruction, final boolean isFileUnderConstruction, final long offset, final long length, final boolean needBlockToken, final boolean inSnapshot, FileEncryptionInfo feInfo) throws IOException { assert namesystem.hasReadLock(); if (blocks == null) { return null; } else if (blocks.length == 0) { return new LocatedBlocks(0, isFileUnderConstruction, Collections.<LocatedBlock>emptyList(), null, false, feInfo); } else { if (LOG.isDebugEnabled()) { LOG.debug("blocks = " + java.util.Arrays.asList(blocks)); } final AccessMode mode = needBlockToken? BlockTokenIdentifier.AccessMode.READ: null; final List<LocatedBlock> locatedblocks = createLocatedBlockList( blocks, offset, length, Integer.MAX_VALUE, mode); final LocatedBlock lastlb; final boolean isComplete; if (!inSnapshot) { final BlockInfo last = blocks[blocks.length - 1]; final long lastPos = last.isComplete()? fileSizeExcludeBlocksUnderConstruction - last.getNumBytes() : fileSizeExcludeBlocksUnderConstruction; lastlb = createLocatedBlock(last, lastPos, mode); isComplete = last.isComplete(); } else { lastlb = createLocatedBlock(blocks, fileSizeExcludeBlocksUnderConstruction, mode); isComplete = true; } return new LocatedBlocks( fileSizeExcludeBlocksUnderConstruction, isFileUnderConstruction, locatedblocks, lastlb, isComplete, feInfo); } } /** @return current access keys. */ public ExportedBlockKeys getBlockKeys() { return isBlockTokenEnabled()? blockTokenSecretManager.exportKeys() : ExportedBlockKeys.DUMMY_KEYS; } /** Generate a block token for the located block. */ public void setBlockToken(final LocatedBlock b, final AccessMode mode) throws IOException { if (isBlockTokenEnabled()) { // Use cached UGI if serving RPC calls. b.setBlockToken(blockTokenSecretManager.generateToken( NameNode.getRemoteUser().getShortUserName(), b.getBlock(), EnumSet.of(mode))); } } void addKeyUpdateCommand(final List<DatanodeCommand> cmds, final DatanodeDescriptor nodeinfo) { // check access key update if (isBlockTokenEnabled() && nodeinfo.needKeyUpdate) { cmds.add(new KeyUpdateCommand(blockTokenSecretManager.exportKeys())); nodeinfo.needKeyUpdate = false; } } public DataEncryptionKey generateDataEncryptionKey() { if (isBlockTokenEnabled() && encryptDataTransfer) { return blockTokenSecretManager.generateDataEncryptionKey(); } else { return null; } } /** * Clamp the specified replication between the minimum and the maximum * replication levels. */ public short adjustReplication(short replication) { return replication < minReplication? minReplication : replication > maxReplication? maxReplication: replication; } /** * Check whether the replication parameter is within the range * determined by system configuration. */ public void verifyReplication(String src, short replication, String clientName) throws IOException { if (replication >= minReplication && replication <= maxReplication) { //common case. avoid building 'text' return; } String text = "file " + src + ((clientName != null) ? " on client " + clientName : "") + ".\n" + "Requested replication " + replication; if (replication > maxReplication) throw new IOException(text + " exceeds maximum " + maxReplication); if (replication < minReplication) throw new IOException(text + " is less than the required minimum " + minReplication); } /** * Check if a block is replicated to at least the minimum replication. */ public boolean isSufficientlyReplicated(BlockInfo b) { // Compare against the lesser of the minReplication and number of live DNs. final int replication = Math.min(minReplication, getDatanodeManager().getNumLiveDataNodes()); return countNodes(b).liveReplicas() >= replication; } /** * return a list of blocks & their locations on <code>datanode</code> whose * total size is <code>size</code> * * @param datanode on which blocks are located * @param size total size of blocks */ public BlocksWithLocations getBlocks(DatanodeID datanode, long size ) throws IOException { namesystem.checkOperation(OperationCategory.READ); namesystem.readLock(); try { namesystem.checkOperation(OperationCategory.READ); return getBlocksWithLocations(datanode, size); } finally { namesystem.readUnlock(); } } /** Get all blocks with location information from a datanode. */ private BlocksWithLocations getBlocksWithLocations(final DatanodeID datanode, final long size) throws UnregisteredNodeException { final DatanodeDescriptor node = getDatanodeManager().getDatanode(datanode); if (node == null) { blockLog.warn("BLOCK* getBlocks: Asking for blocks from an" + " unrecorded node {}", datanode); throw new HadoopIllegalArgumentException( "Datanode " + datanode + " not found."); } int numBlocks = node.numBlocks(); if(numBlocks == 0) { return new BlocksWithLocations(new BlockWithLocations[0]); } Iterator<BlockInfo> iter = node.getBlockIterator(); // starting from a random block int startBlock = ThreadLocalRandom.current().nextInt(numBlocks); // skip blocks for(int i=0; i<startBlock; i++) { iter.next(); } List<BlockWithLocations> results = new ArrayList<BlockWithLocations>(); long totalSize = 0; BlockInfo curBlock; while(totalSize<size && iter.hasNext()) { curBlock = iter.next(); if(!curBlock.isComplete()) continue; totalSize += addBlock(curBlock, results); } if(totalSize<size) { iter = node.getBlockIterator(); // start from the beginning for(int i=0; i<startBlock&&totalSize<size; i++) { curBlock = iter.next(); if(!curBlock.isComplete()) continue; totalSize += addBlock(curBlock, results); } } return new BlocksWithLocations( results.toArray(new BlockWithLocations[results.size()])); } /** Remove the blocks associated to the given datanode. */ void removeBlocksAssociatedTo(final DatanodeDescriptor node) { final Iterator<? extends Block> it = node.getBlockIterator(); while(it.hasNext()) { removeStoredBlock(it.next(), node); } // Remove all pending DN messages referencing this DN. pendingDNMessages.removeAllMessagesForDatanode(node); node.resetBlocks(); invalidateBlocks.remove(node); } /** Remove the blocks associated to the given DatanodeStorageInfo. */ void removeBlocksAssociatedTo(final DatanodeStorageInfo storageInfo) { assert namesystem.hasWriteLock(); final Iterator<? extends Block> it = storageInfo.getBlockIterator(); DatanodeDescriptor node = storageInfo.getDatanodeDescriptor(); while(it.hasNext()) { Block block = it.next(); removeStoredBlock(block, node); invalidateBlocks.remove(node, block); } namesystem.checkSafeMode(); } /** * Adds block to list of blocks which will be invalidated on specified * datanode and log the operation */ void addToInvalidates(final Block block, final DatanodeInfo datanode) { if (!namesystem.isPopulatingReplQueues()) { return; } invalidateBlocks.add(block, datanode, true); } /** * Adds block to list of blocks which will be invalidated on all its * datanodes. */ private void addToInvalidates(Block b) { if (!namesystem.isPopulatingReplQueues()) { return; } StringBuilder datanodes = new StringBuilder(); for(DatanodeStorageInfo storage : blocksMap.getStorages(b, State.NORMAL)) { final DatanodeDescriptor node = storage.getDatanodeDescriptor(); invalidateBlocks.add(b, node, false); datanodes.append(node).append(" "); } if (datanodes.length() != 0) { blockLog.debug("BLOCK* addToInvalidates: {} {}", b, datanodes.toString()); } } /** * Remove all block invalidation tasks under this datanode UUID; * used when a datanode registers with a new UUID and the old one * is wiped. */ void removeFromInvalidates(final DatanodeInfo datanode) { if (!namesystem.isPopulatingReplQueues()) { return; } invalidateBlocks.remove(datanode); } /** * Mark the block belonging to datanode as corrupt * @param blk Block to be marked as corrupt * @param dn Datanode which holds the corrupt replica * @param storageID if known, null otherwise. * @param reason a textual reason why the block should be marked corrupt, * for logging purposes */ public void findAndMarkBlockAsCorrupt(final ExtendedBlock blk, final DatanodeInfo dn, String storageID, String reason) throws IOException { assert namesystem.hasWriteLock(); final BlockInfo storedBlock = getStoredBlock(blk.getLocalBlock()); if (storedBlock == null) { // Check if the replica is in the blockMap, if not // ignore the request for now. This could happen when BlockScanner // thread of Datanode reports bad block before Block reports are sent // by the Datanode on startup blockLog.debug("BLOCK* findAndMarkBlockAsCorrupt: {} not found", blk); return; } DatanodeDescriptor node = getDatanodeManager().getDatanode(dn); if (node == null) { throw new IOException("Cannot mark " + blk + " as corrupt because datanode " + dn + " (" + dn.getDatanodeUuid() + ") does not exist"); } markBlockAsCorrupt(new BlockToMarkCorrupt(storedBlock, blk.getGenerationStamp(), reason, Reason.CORRUPTION_REPORTED), storageID == null ? null : node.getStorageInfo(storageID), node); } /** * * @param b * @param storageInfo storage that contains the block, if known. null otherwise. * @throws IOException */ private void markBlockAsCorrupt(BlockToMarkCorrupt b, DatanodeStorageInfo storageInfo, DatanodeDescriptor node) throws IOException { if (b.corrupted.isDeleted()) { blockLog.debug("BLOCK markBlockAsCorrupt: {} cannot be marked as" + " corrupt as it does not belong to any file", b); addToInvalidates(b.corrupted, node); return; } short expectedReplicas = b.corrupted.getBlockCollection().getPreferredBlockReplication(); // Add replica to the data-node if it is not already there if (storageInfo != null) { storageInfo.addBlock(b.stored); } // Add this replica to corruptReplicas Map corruptReplicas.addToCorruptReplicasMap(b.corrupted, node, b.reason, b.reasonCode); NumberReplicas numberOfReplicas = countNodes(b.stored); boolean hasEnoughLiveReplicas = numberOfReplicas.liveReplicas() >= expectedReplicas; boolean minReplicationSatisfied = numberOfReplicas.liveReplicas() >= minReplication; boolean hasMoreCorruptReplicas = minReplicationSatisfied && (numberOfReplicas.liveReplicas() + numberOfReplicas.corruptReplicas()) > expectedReplicas; boolean corruptedDuringWrite = minReplicationSatisfied && (b.stored.getGenerationStamp() > b.corrupted.getGenerationStamp()); // case 1: have enough number of live replicas // case 2: corrupted replicas + live replicas > Replication factor // case 3: Block is marked corrupt due to failure while writing. In this // case genstamp will be different than that of valid block. // In all these cases we can delete the replica. // In case of 3, rbw block will be deleted and valid block can be replicated if (hasEnoughLiveReplicas || hasMoreCorruptReplicas || corruptedDuringWrite) { // the block is over-replicated so invalidate the replicas immediately invalidateBlock(b, node); } else if (namesystem.isPopulatingReplQueues()) { // add the block to neededReplication updateNeededReplications(b.stored, -1, 0); } } /** * Invalidates the given block on the given datanode. * @return true if the block was successfully invalidated and no longer * present in the BlocksMap */ private boolean invalidateBlock(BlockToMarkCorrupt b, DatanodeInfo dn ) throws IOException { blockLog.debug("BLOCK* invalidateBlock: {} on {}", b, dn); DatanodeDescriptor node = getDatanodeManager().getDatanode(dn); if (node == null) { throw new IOException("Cannot invalidate " + b + " because datanode " + dn + " does not exist."); } // Check how many copies we have of the block NumberReplicas nr = countNodes(b.stored); if (nr.replicasOnStaleNodes() > 0) { blockLog.debug("BLOCK* invalidateBlocks: postponing " + "invalidation of {} on {} because {} replica(s) are located on " + "nodes with potentially out-of-date block reports", b, dn, nr.replicasOnStaleNodes()); postponeBlock(b.corrupted); return false; } else if (nr.liveReplicas() >= 1) { // If we have at least one copy on a live node, then we can delete it. addToInvalidates(b.corrupted, dn); removeStoredBlock(b.stored, node); blockLog.debug("BLOCK* invalidateBlocks: {} on {} listed for deletion.", b, dn); return true; } else { blockLog.debug("BLOCK* invalidateBlocks: {} on {} is the only copy and" + " was not deleted", b, dn); return false; } } public void setPostponeBlocksFromFuture(boolean postpone) { this.shouldPostponeBlocksFromFuture = postpone; } private void postponeBlock(Block blk) { if (postponedMisreplicatedBlocks.add(blk)) { postponedMisreplicatedBlocksCount.incrementAndGet(); } } void updateState() { pendingReplicationBlocksCount = pendingReplications.size(); underReplicatedBlocksCount = neededReplications.size(); corruptReplicaBlocksCount = corruptReplicas.size(); } /** Return number of under-replicated but not missing blocks */ public int getUnderReplicatedNotMissingBlocks() { return neededReplications.getUnderReplicatedBlockCount(); } /** * Schedule blocks for deletion at datanodes * @param nodesToProcess number of datanodes to schedule deletion work * @return total number of block for deletion */ int computeInvalidateWork(int nodesToProcess) { final List<DatanodeInfo> nodes = invalidateBlocks.getDatanodes(); Collections.shuffle(nodes); nodesToProcess = Math.min(nodes.size(), nodesToProcess); int blockCnt = 0; for (DatanodeInfo dnInfo : nodes) { int blocks = invalidateWorkForOneNode(dnInfo); if (blocks > 0) { blockCnt += blocks; if (--nodesToProcess == 0) { break; } } } return blockCnt; } /** * Scan blocks in {@link #neededReplications} and assign replication * work to data-nodes they belong to. * * The number of process blocks equals either twice the number of live * data-nodes or the number of under-replicated blocks whichever is less. * * @return number of blocks scheduled for replication during this iteration. */ int computeReplicationWork(int blocksToProcess) { List<List<BlockInfo>> blocksToReplicate = null; namesystem.writeLock(); try { // Choose the blocks to be replicated blocksToReplicate = neededReplications .chooseUnderReplicatedBlocks(blocksToProcess); } finally { namesystem.writeUnlock(); } return computeReplicationWorkForBlocks(blocksToReplicate); } /** Replicate a set of blocks * * @param blocksToReplicate blocks to be replicated, for each priority * @return the number of blocks scheduled for replication */ @VisibleForTesting int computeReplicationWorkForBlocks(List<List<BlockInfo>> blocksToReplicate) { int requiredReplication, numEffectiveReplicas; List<DatanodeDescriptor> containingNodes; DatanodeDescriptor srcNode; BlockCollection bc = null; int additionalReplRequired; int scheduledWork = 0; List<ReplicationWork> work = new LinkedList<ReplicationWork>(); namesystem.writeLock(); try { synchronized (neededReplications) { for (int priority = 0; priority < blocksToReplicate.size(); priority++) { for (BlockInfo block : blocksToReplicate.get(priority)) { // block should belong to a file bc = blocksMap.getBlockCollection(block); // abandoned block or block reopened for append if(bc == null || (bc.isUnderConstruction() && block.equals(bc.getLastBlock()))) { neededReplications.remove(block, priority); // remove from neededReplications continue; } requiredReplication = bc.getPreferredBlockReplication(); // get a source data-node containingNodes = new ArrayList<DatanodeDescriptor>(); List<DatanodeStorageInfo> liveReplicaNodes = new ArrayList<DatanodeStorageInfo>(); NumberReplicas numReplicas = new NumberReplicas(); srcNode = chooseSourceDatanode( block, containingNodes, liveReplicaNodes, numReplicas, priority); if(srcNode == null) { // block can not be replicated from any node LOG.debug("Block " + block + " cannot be repl from any node"); continue; } // liveReplicaNodes can include READ_ONLY_SHARED replicas which are // not included in the numReplicas.liveReplicas() count assert liveReplicaNodes.size() >= numReplicas.liveReplicas(); // do not schedule more if enough replicas is already pending numEffectiveReplicas = numReplicas.liveReplicas() + pendingReplications.getNumReplicas(block); if (numEffectiveReplicas >= requiredReplication) { if ( (pendingReplications.getNumReplicas(block) > 0) || (blockHasEnoughRacks(block)) ) { neededReplications.remove(block, priority); // remove from neededReplications blockLog.debug("BLOCK* Removing {} from neededReplications as" + " it has enough replicas", block); continue; } } if (numReplicas.liveReplicas() < requiredReplication) { additionalReplRequired = requiredReplication - numEffectiveReplicas; } else { additionalReplRequired = 1; // Needed on a new rack } work.add(new ReplicationWork(block, bc, srcNode, containingNodes, liveReplicaNodes, additionalReplRequired, priority)); } } } } finally { namesystem.writeUnlock(); } final Set<Node> excludedNodes = new HashSet<Node>(); for(ReplicationWork rw : work){ // Exclude all of the containing nodes from being targets. // This list includes decommissioning or corrupt nodes. excludedNodes.clear(); for (DatanodeDescriptor dn : rw.containingNodes) { excludedNodes.add(dn); } // choose replication targets: NOT HOLDING THE GLOBAL LOCK // It is costly to extract the filename for which chooseTargets is called, // so for now we pass in the block collection itself. rw.chooseTargets(blockplacement, storagePolicySuite, excludedNodes); } namesystem.writeLock(); try { for(ReplicationWork rw : work){ final DatanodeStorageInfo[] targets = rw.targets; if(targets == null || targets.length == 0){ rw.targets = null; continue; } synchronized (neededReplications) { BlockInfo block = rw.block; int priority = rw.priority; // Recheck since global lock was released // block should belong to a file bc = blocksMap.getBlockCollection(block); // abandoned block or block reopened for append if(bc == null || (bc.isUnderConstruction() && block.equals(bc.getLastBlock()))) { neededReplications.remove(block, priority); // remove from neededReplications rw.targets = null; continue; } requiredReplication = bc.getPreferredBlockReplication(); // do not schedule more if enough replicas is already pending NumberReplicas numReplicas = countNodes(block); numEffectiveReplicas = numReplicas.liveReplicas() + pendingReplications.getNumReplicas(block); if (numEffectiveReplicas >= requiredReplication) { if ( (pendingReplications.getNumReplicas(block) > 0) || (blockHasEnoughRacks(block)) ) { neededReplications.remove(block, priority); // remove from neededReplications rw.targets = null; blockLog.debug("BLOCK* Removing {} from neededReplications as" + " it has enough replicas", block); continue; } } if ( (numReplicas.liveReplicas() >= requiredReplication) && (!blockHasEnoughRacks(block)) ) { if (rw.srcNode.getNetworkLocation().equals( targets[0].getDatanodeDescriptor().getNetworkLocation())) { //No use continuing, unless a new rack in this case continue; } } // Add block to the to be replicated list rw.srcNode.addBlockToBeReplicated(block, targets); scheduledWork++; DatanodeStorageInfo.incrementBlocksScheduled(targets); // Move the block-replication into a "pending" state. // The reason we use 'pending' is so we can retry // replications that fail after an appropriate amount of time. pendingReplications.increment(block, DatanodeStorageInfo.toDatanodeDescriptors(targets)); blockLog.debug("BLOCK* block {} is moved from neededReplications to " + "pendingReplications", block); // remove from neededReplications if(numEffectiveReplicas + targets.length >= requiredReplication) { neededReplications.remove(block, priority); // remove from neededReplications } } } } finally { namesystem.writeUnlock(); } if (blockLog.isInfoEnabled()) { // log which blocks have been scheduled for replication for(ReplicationWork rw : work){ DatanodeStorageInfo[] targets = rw.targets; if (targets != null && targets.length != 0) { StringBuilder targetList = new StringBuilder("datanode(s)"); for (int k = 0; k < targets.length; k++) { targetList.append(' '); targetList.append(targets[k].getDatanodeDescriptor()); } blockLog.debug("BLOCK* ask {} to replicate {} to {}", rw.srcNode, rw.block, targetList); } } } if (blockLog.isDebugEnabled()) { blockLog.debug("BLOCK* neededReplications = {} pendingReplications = {}", neededReplications.size(), pendingReplications.size()); } return scheduledWork; } /** Choose target for WebHDFS redirection. */ public DatanodeStorageInfo[] chooseTarget4WebHDFS(String src, DatanodeDescriptor clientnode, Set<Node> excludes, long blocksize) { return blockplacement.chooseTarget(src, 1, clientnode, Collections.<DatanodeStorageInfo>emptyList(), false, excludes, blocksize, storagePolicySuite.getDefaultPolicy()); } /** Choose target for getting additional datanodes for an existing pipeline. */ public DatanodeStorageInfo[] chooseTarget4AdditionalDatanode(String src, int numAdditionalNodes, Node clientnode, List<DatanodeStorageInfo> chosen, Set<Node> excludes, long blocksize, byte storagePolicyID) { final BlockStoragePolicy storagePolicy = storagePolicySuite.getPolicy(storagePolicyID); return blockplacement.chooseTarget(src, numAdditionalNodes, clientnode, chosen, true, excludes, blocksize, storagePolicy); } /** * Choose target datanodes for creating a new block. * * @throws IOException * if the number of targets < minimum replication. * @see BlockPlacementPolicy#chooseTarget(String, int, Node, * Set, long, List, BlockStoragePolicy) */ public DatanodeStorageInfo[] chooseTarget4NewBlock(final String src, final int numOfReplicas, final Node client, final Set<Node> excludedNodes, final long blocksize, final List<String> favoredNodes, final byte storagePolicyID) throws IOException { List<DatanodeDescriptor> favoredDatanodeDescriptors = getDatanodeDescriptors(favoredNodes); final BlockStoragePolicy storagePolicy = storagePolicySuite.getPolicy(storagePolicyID); final DatanodeStorageInfo[] targets = blockplacement.chooseTarget(src, numOfReplicas, client, excludedNodes, blocksize, favoredDatanodeDescriptors, storagePolicy); if (targets.length < minReplication) { throw new IOException("File " + src + " could only be replicated to " + targets.length + " nodes instead of minReplication (=" + minReplication + "). There are " + getDatanodeManager().getNetworkTopology().getNumOfLeaves() + " datanode(s) running and " + (excludedNodes == null? "no": excludedNodes.size()) + " node(s) are excluded in this operation."); } return targets; } /** * Get list of datanode descriptors for given list of nodes. Nodes are * hostaddress:port or just hostaddress. */ List<DatanodeDescriptor> getDatanodeDescriptors(List<String> nodes) { List<DatanodeDescriptor> datanodeDescriptors = null; if (nodes != null) { datanodeDescriptors = new ArrayList<DatanodeDescriptor>(nodes.size()); for (int i = 0; i < nodes.size(); i++) { DatanodeDescriptor node = datanodeManager.getDatanodeDescriptor(nodes.get(i)); if (node != null) { datanodeDescriptors.add(node); } } } return datanodeDescriptors; } /** * Parse the data-nodes the block belongs to and choose one, * which will be the replication source. * * We prefer nodes that are in DECOMMISSION_INPROGRESS state to other nodes * since the former do not have write traffic and hence are less busy. * We do not use already decommissioned nodes as a source. * Otherwise we choose a random node among those that did not reach their * replication limits. However, if the replication is of the highest priority * and all nodes have reached their replication limits, we will choose a * random node despite the replication limit. * * In addition form a list of all nodes containing the block * and calculate its replication numbers. * * @param block Block for which a replication source is needed * @param containingNodes List to be populated with nodes found to contain the * given block * @param nodesContainingLiveReplicas List to be populated with nodes found to * contain live replicas of the given block * @param numReplicas NumberReplicas instance to be initialized with the * counts of live, corrupt, excess, and * decommissioned replicas of the given * block. * @param priority integer representing replication priority of the given * block * @return the DatanodeDescriptor of the chosen node from which to replicate * the given block */ @VisibleForTesting DatanodeDescriptor chooseSourceDatanode(Block block, List<DatanodeDescriptor> containingNodes, List<DatanodeStorageInfo> nodesContainingLiveReplicas, NumberReplicas numReplicas, int priority) { containingNodes.clear(); nodesContainingLiveReplicas.clear(); DatanodeDescriptor srcNode = null; int live = 0; int decommissioned = 0; int decommissioning = 0; int corrupt = 0; int excess = 0; Collection<DatanodeDescriptor> nodesCorrupt = corruptReplicas.getNodes(block); for(DatanodeStorageInfo storage : blocksMap.getStorages(block)) { final DatanodeDescriptor node = storage.getDatanodeDescriptor(); LightWeightLinkedSet<Block> excessBlocks = excessReplicateMap.get(node.getDatanodeUuid()); int countableReplica = storage.getState() == State.NORMAL ? 1 : 0; if ((nodesCorrupt != null) && (nodesCorrupt.contains(node))) corrupt += countableReplica; else if (node.isDecommissionInProgress()) { decommissioning += countableReplica; } else if (node.isDecommissioned()) { decommissioned += countableReplica; } else if (excessBlocks != null && excessBlocks.contains(block)) { excess += countableReplica; } else { nodesContainingLiveReplicas.add(storage); live += countableReplica; } containingNodes.add(node); // Check if this replica is corrupt // If so, do not select the node as src node if ((nodesCorrupt != null) && nodesCorrupt.contains(node)) continue; if(priority != UnderReplicatedBlocks.QUEUE_HIGHEST_PRIORITY && !node.isDecommissionInProgress() && node.getNumberOfBlocksToBeReplicated() >= maxReplicationStreams) { continue; // already reached replication limit } if (node.getNumberOfBlocksToBeReplicated() >= replicationStreamsHardLimit) { continue; } // the block must not be scheduled for removal on srcNode if(excessBlocks != null && excessBlocks.contains(block)) continue; // never use already decommissioned nodes if(node.isDecommissioned()) continue; // We got this far, current node is a reasonable choice if (srcNode == null) { srcNode = node; continue; } // switch to a different node randomly // this to prevent from deterministically selecting the same node even // if the node failed to replicate the block on previous iterations if(ThreadLocalRandom.current().nextBoolean()) srcNode = node; } if(numReplicas != null) numReplicas.initialize(live, decommissioned, decommissioning, corrupt, excess, 0); return srcNode; } /** * If there were any replication requests that timed out, reap them * and put them back into the neededReplication queue */ private void processPendingReplications() { BlockInfo[] timedOutItems = pendingReplications.getTimedOutBlocks(); if (timedOutItems != null) { namesystem.writeLock(); try { for (int i = 0; i < timedOutItems.length; i++) { /* * Use the blockinfo from the blocksmap to be certain we're working * with the most up-to-date block information (e.g. genstamp). */ BlockInfo bi = blocksMap.getStoredBlock(timedOutItems[i]); if (bi == null) { continue; } NumberReplicas num = countNodes(timedOutItems[i]); if (isNeededReplication(bi, getReplication(bi), num.liveReplicas())) { neededReplications.add(bi, num.liveReplicas(), num.decommissionedAndDecommissioning(), getReplication(bi)); } } } finally { namesystem.writeUnlock(); } /* If we know the target datanodes where the replication timedout, * we could invoke decBlocksScheduled() on it. Its ok for now. */ } } public long requestBlockReportLeaseId(DatanodeRegistration nodeReg) { assert namesystem.hasReadLock(); DatanodeDescriptor node = null; try { node = datanodeManager.getDatanode(nodeReg); } catch (UnregisteredNodeException e) { LOG.warn("Unregistered datanode {}", nodeReg); return 0; } if (node == null) { LOG.warn("Failed to find datanode {}", nodeReg); return 0; } // Request a new block report lease. The BlockReportLeaseManager has // its own internal locking. long leaseId = blockReportLeaseManager.requestLease(node); BlockManagerFaultInjector.getInstance(). requestBlockReportLease(node, leaseId); return leaseId; } /** * StatefulBlockInfo is used to build the "toUC" list, which is a list of * updates to the information about under-construction blocks. * Besides the block in question, it provides the ReplicaState * reported by the datanode in the block report. */ static class StatefulBlockInfo { final BlockInfoContiguousUnderConstruction storedBlock; final Block reportedBlock; final ReplicaState reportedState; StatefulBlockInfo(BlockInfoContiguousUnderConstruction storedBlock, Block reportedBlock, ReplicaState reportedState) { this.storedBlock = storedBlock; this.reportedBlock = reportedBlock; this.reportedState = reportedState; } } /** * BlockToMarkCorrupt is used to build the "toCorrupt" list, which is a * list of blocks that should be considered corrupt due to a block report. */ private static class BlockToMarkCorrupt { /** The corrupted block in a datanode. */ final BlockInfo corrupted; /** The corresponding block stored in the BlockManager. */ final BlockInfo stored; /** The reason to mark corrupt. */ final String reason; /** The reason code to be stored */ final Reason reasonCode; BlockToMarkCorrupt(BlockInfo corrupted, BlockInfo stored, String reason, Reason reasonCode) { Preconditions.checkNotNull(corrupted, "corrupted is null"); Preconditions.checkNotNull(stored, "stored is null"); this.corrupted = corrupted; this.stored = stored; this.reason = reason; this.reasonCode = reasonCode; } BlockToMarkCorrupt(BlockInfo stored, String reason, Reason reasonCode) { this(stored, stored, reason, reasonCode); } BlockToMarkCorrupt(BlockInfo stored, long gs, String reason, Reason reasonCode) { this(new BlockInfoContiguous((BlockInfoContiguous)stored), stored, reason, reasonCode); //the corrupted block in datanode has a different generation stamp corrupted.setGenerationStamp(gs); } @Override public String toString() { return corrupted + "(" + (corrupted == stored? "same as stored": "stored=" + stored) + ")"; } } /** * The given storage is reporting all its blocks. * Update the (storage-->block list) and (block-->storage list) maps. * * @return true if all known storages of the given DN have finished reporting. * @throws IOException */ public boolean processReport(final DatanodeID nodeID, final DatanodeStorage storage, final BlockListAsLongs newReport, BlockReportContext context, boolean lastStorageInRpc) throws IOException { namesystem.writeLock(); final long startTime = Time.monotonicNow(); //after acquiring write lock final long endTime; DatanodeDescriptor node; Collection<Block> invalidatedBlocks = null; try { node = datanodeManager.getDatanode(nodeID); if (node == null || !node.isAlive) { throw new IOException( "ProcessReport from dead or unregistered node: " + nodeID); } // To minimize startup time, we discard any second (or later) block reports // that we receive while still in startup phase. DatanodeStorageInfo storageInfo = node.getStorageInfo(storage.getStorageID()); if (storageInfo == null) { // We handle this for backwards compatibility. storageInfo = node.updateStorage(storage); } if (namesystem.isInStartupSafeMode() && storageInfo.getBlockReportCount() > 0) { blockLog.info("BLOCK* processReport: " + "discarded non-initial block report from {}" + " because namenode still in startup phase", nodeID); return !node.hasStaleStorages(); } if (context != null) { if (!blockReportLeaseManager.checkLease(node, startTime, context.getLeaseId())) { return false; } } if (storageInfo.getBlockReportCount() == 0) { // The first block report can be processed a lot more efficiently than // ordinary block reports. This shortens restart times. LOG.info("Processing first storage report for " + storageInfo.getStorageID() + " from datanode " + nodeID.getDatanodeUuid()); processFirstBlockReport(storageInfo, newReport); } else { invalidatedBlocks = processReport(storageInfo, newReport); } storageInfo.receivedBlockReport(); if (context != null) { storageInfo.setLastBlockReportId(context.getReportId()); if (lastStorageInRpc) { int rpcsSeen = node.updateBlockReportContext(context); if (rpcsSeen >= context.getTotalRpcs()) { long leaseId = blockReportLeaseManager.removeLease(node); BlockManagerFaultInjector.getInstance(). removeBlockReportLease(node, leaseId); List<DatanodeStorageInfo> zombies = node.removeZombieStorages(); if (zombies.isEmpty()) { LOG.debug("processReport 0x{}: no zombie storages found.", Long.toHexString(context.getReportId())); } else { for (DatanodeStorageInfo zombie : zombies) { removeZombieReplicas(context, zombie); } } node.clearBlockReportContext(); } else { LOG.debug("processReport 0x{}: {} more RPCs remaining in this " + "report.", Long.toHexString(context.getReportId()), (context.getTotalRpcs() - rpcsSeen) ); } } } } finally { endTime = Time.monotonicNow(); namesystem.writeUnlock(); } if (invalidatedBlocks != null) { for (Block b : invalidatedBlocks) { blockLog.info("BLOCK* processReport: {} on node {} size {} does not " + "belong to any file", b, node, b.getNumBytes()); } } // Log the block report processing stats from Namenode perspective final NameNodeMetrics metrics = NameNode.getNameNodeMetrics(); if (metrics != null) { metrics.addBlockReport((int) (endTime - startTime)); } blockLog.info("BLOCK* processReport: from storage {} node {}, " + "blocks: {}, hasStaleStorage: {}, processing time: {} msecs", storage .getStorageID(), nodeID, newReport.getNumberOfBlocks(), node.hasStaleStorages(), (endTime - startTime)); return !node.hasStaleStorages(); } private void removeZombieReplicas(BlockReportContext context, DatanodeStorageInfo zombie) { LOG.warn("processReport 0x{}: removing zombie storage {}, which no " + "longer exists on the DataNode.", Long.toHexString(context.getReportId()), zombie.getStorageID()); assert(namesystem.hasWriteLock()); Iterator<BlockInfo> iter = zombie.getBlockIterator(); int prevBlocks = zombie.numBlocks(); while (iter.hasNext()) { BlockInfo block = iter.next(); // We assume that a block can be on only one storage in a DataNode. // That's why we pass in the DatanodeDescriptor rather than the // DatanodeStorageInfo. // TODO: remove this assumption in case we want to put a block on // more than one storage on a datanode (and because it's a difficult // assumption to really enforce) removeStoredBlock(block, zombie.getDatanodeDescriptor()); invalidateBlocks.remove(zombie.getDatanodeDescriptor(), block); } assert(zombie.numBlocks() == 0); LOG.warn("processReport 0x{}: removed {} replicas from storage {}, " + "which no longer exists on the DataNode.", Long.toHexString(context.getReportId()), prevBlocks, zombie.getStorageID()); } /** * Rescan the list of blocks which were previously postponed. */ void rescanPostponedMisreplicatedBlocks() { if (getPostponedMisreplicatedBlocksCount() == 0) { return; } long startTimeRescanPostponedMisReplicatedBlocks = Time.monotonicNow(); long startPostponedMisReplicatedBlocksCount = getPostponedMisreplicatedBlocksCount(); namesystem.writeLock(); try { // blocksPerRescan is the configured number of blocks per rescan. // Randomly select blocksPerRescan consecutive blocks from the HashSet // when the number of blocks remaining is larger than blocksPerRescan. // The reason we don't always pick the first blocksPerRescan blocks is to // handle the case if for some reason some datanodes remain in // content stale state for a long time and only impact the first // blocksPerRescan blocks. int i = 0; long startIndex = 0; long blocksPerRescan = datanodeManager.getBlocksPerPostponedMisreplicatedBlocksRescan(); long base = getPostponedMisreplicatedBlocksCount() - blocksPerRescan; if (base > 0) { startIndex = ThreadLocalRandom.current().nextLong() % (base+1); if (startIndex < 0) { startIndex += (base+1); } } Iterator<Block> it = postponedMisreplicatedBlocks.iterator(); for (int tmp = 0; tmp < startIndex; tmp++) { it.next(); } for (;it.hasNext(); i++) { Block b = it.next(); if (i >= blocksPerRescan) { break; } BlockInfo bi = blocksMap.getStoredBlock(b); if (bi == null) { if (LOG.isDebugEnabled()) { LOG.debug("BLOCK* rescanPostponedMisreplicatedBlocks: " + "Postponed mis-replicated block " + b + " no longer found " + "in block map."); } it.remove(); postponedMisreplicatedBlocksCount.decrementAndGet(); continue; } MisReplicationResult res = processMisReplicatedBlock(bi); if (LOG.isDebugEnabled()) { LOG.debug("BLOCK* rescanPostponedMisreplicatedBlocks: " + "Re-scanned block " + b + ", result is " + res); } if (res != MisReplicationResult.POSTPONE) { it.remove(); postponedMisreplicatedBlocksCount.decrementAndGet(); } } } finally { namesystem.writeUnlock(); long endPostponedMisReplicatedBlocksCount = getPostponedMisreplicatedBlocksCount(); LOG.info("Rescan of postponedMisreplicatedBlocks completed in " + (Time.monotonicNow() - startTimeRescanPostponedMisReplicatedBlocks) + " msecs. " + endPostponedMisReplicatedBlocksCount + " blocks are left. " + (startPostponedMisReplicatedBlocksCount - endPostponedMisReplicatedBlocksCount) + " blocks are removed."); } } private Collection<Block> processReport( final DatanodeStorageInfo storageInfo, final BlockListAsLongs report) throws IOException { // Normal case: // Modify the (block-->datanode) map, according to the difference // between the old and new block report. // Collection<BlockInfo> toAdd = new LinkedList<BlockInfo>(); Collection<Block> toRemove = new TreeSet<Block>(); Collection<Block> toInvalidate = new LinkedList<Block>(); Collection<BlockToMarkCorrupt> toCorrupt = new LinkedList<BlockToMarkCorrupt>(); Collection<StatefulBlockInfo> toUC = new LinkedList<StatefulBlockInfo>(); reportDiff(storageInfo, report, toAdd, toRemove, toInvalidate, toCorrupt, toUC); DatanodeDescriptor node = storageInfo.getDatanodeDescriptor(); // Process the blocks on each queue for (StatefulBlockInfo b : toUC) { addStoredBlockUnderConstruction(b, storageInfo); } for (Block b : toRemove) { removeStoredBlock(b, node); } int numBlocksLogged = 0; for (BlockInfo b : toAdd) { addStoredBlock(b, storageInfo, null, numBlocksLogged < maxNumBlocksToLog); numBlocksLogged++; } if (numBlocksLogged > maxNumBlocksToLog) { blockLog.info("BLOCK* processReport: logged info for {} of {} " + "reported.", maxNumBlocksToLog, numBlocksLogged); } for (Block b : toInvalidate) { addToInvalidates(b, node); } for (BlockToMarkCorrupt b : toCorrupt) { markBlockAsCorrupt(b, storageInfo, node); } return toInvalidate; } /** * Mark block replicas as corrupt except those on the storages in * newStorages list. */ public void markBlockReplicasAsCorrupt(BlockInfo block, long oldGenerationStamp, long oldNumBytes, DatanodeStorageInfo[] newStorages) throws IOException { assert namesystem.hasWriteLock(); BlockToMarkCorrupt b = null; if (block.getGenerationStamp() != oldGenerationStamp) { b = new BlockToMarkCorrupt(block, oldGenerationStamp, "genstamp does not match " + oldGenerationStamp + " : " + block.getGenerationStamp(), Reason.GENSTAMP_MISMATCH); } else if (block.getNumBytes() != oldNumBytes) { b = new BlockToMarkCorrupt(block, "length does not match " + oldNumBytes + " : " + block.getNumBytes(), Reason.SIZE_MISMATCH); } else { return; } for (DatanodeStorageInfo storage : getStorages(block)) { boolean isCorrupt = true; if (newStorages != null) { for (DatanodeStorageInfo newStorage : newStorages) { if (newStorage!= null && storage.equals(newStorage)) { isCorrupt = false; break; } } } if (isCorrupt) { blockLog.debug("BLOCK* markBlockReplicasAsCorrupt: mark block replica" + " {} on {} as corrupt because the dn is not in the new committed " + "storage list.", b, storage.getDatanodeDescriptor()); markBlockAsCorrupt(b, storage, storage.getDatanodeDescriptor()); } } } /** * processFirstBlockReport is intended only for processing "initial" block * reports, the first block report received from a DN after it registers. * It just adds all the valid replicas to the datanode, without calculating * a toRemove list (since there won't be any). It also silently discards * any invalid blocks, thereby deferring their processing until * the next block report. * @param storageInfo - DatanodeStorageInfo that sent the report * @param report - the initial block report, to be processed * @throws IOException */ private void processFirstBlockReport( final DatanodeStorageInfo storageInfo, final BlockListAsLongs report) throws IOException { if (report == null) return; assert (namesystem.hasWriteLock()); assert (storageInfo.getBlockReportCount() == 0); for (BlockReportReplica iblk : report) { ReplicaState reportedState = iblk.getState(); if (LOG.isDebugEnabled()) { LOG.debug("Initial report of block " + iblk.getBlockName() + " on " + storageInfo.getDatanodeDescriptor() + " size " + iblk.getNumBytes() + " replicaState = " + reportedState); } if (shouldPostponeBlocksFromFuture && namesystem.isGenStampInFuture(iblk)) { queueReportedBlock(storageInfo, iblk, reportedState, QUEUE_REASON_FUTURE_GENSTAMP); continue; } BlockInfo storedBlock = blocksMap.getStoredBlock(iblk); // If block does not belong to any file, we are done. if (storedBlock == null) continue; // If block is corrupt, mark it and continue to next block. BlockUCState ucState = storedBlock.getBlockUCState(); BlockToMarkCorrupt c = checkReplicaCorrupt( iblk, reportedState, storedBlock, ucState, storageInfo.getDatanodeDescriptor()); if (c != null) { if (shouldPostponeBlocksFromFuture) { // In the Standby, we may receive a block report for a file that we // just have an out-of-date gen-stamp or state for, for example. queueReportedBlock(storageInfo, iblk, reportedState, QUEUE_REASON_CORRUPT_STATE); } else { markBlockAsCorrupt(c, storageInfo, storageInfo.getDatanodeDescriptor()); } continue; } // If block is under construction, add this replica to its list if (isBlockUnderConstruction(storedBlock, ucState, reportedState)) { ((BlockInfoContiguousUnderConstruction)storedBlock) .addReplicaIfNotPresent(storageInfo, iblk, reportedState); // OpenFileBlocks only inside snapshots also will be added to safemode // threshold. So we need to update such blocks to safemode // refer HDFS-5283 BlockInfoContiguousUnderConstruction blockUC = (BlockInfoContiguousUnderConstruction) storedBlock; if (namesystem.isInSnapshot(blockUC)) { int numOfReplicas = blockUC.getNumExpectedLocations(); namesystem.incrementSafeBlockCount(numOfReplicas); } //and fall through to next clause } //add replica if appropriate if (reportedState == ReplicaState.FINALIZED) { addStoredBlockImmediate(storedBlock, storageInfo); } } } private void reportDiff(DatanodeStorageInfo storageInfo, BlockListAsLongs newReport, Collection<BlockInfo> toAdd, // add to DatanodeDescriptor Collection<Block> toRemove, // remove from DatanodeDescriptor Collection<Block> toInvalidate, // should be removed from DN Collection<BlockToMarkCorrupt> toCorrupt, // add to corrupt replicas list Collection<StatefulBlockInfo> toUC) { // add to under-construction list // place a delimiter in the list which separates blocks // that have been reported from those that have not BlockInfo delimiter = new BlockInfoContiguous(new Block(), (short) 1); AddBlockResult result = storageInfo.addBlock(delimiter); assert result == AddBlockResult.ADDED : "Delimiting block cannot be present in the node"; int headIndex = 0; //currently the delimiter is in the head of the list int curIndex; if (newReport == null) { newReport = BlockListAsLongs.EMPTY; } // scan the report and process newly reported blocks for (BlockReportReplica iblk : newReport) { ReplicaState iState = iblk.getState(); BlockInfo storedBlock = processReportedBlock(storageInfo, iblk, iState, toAdd, toInvalidate, toCorrupt, toUC); // move block to the head of the list if (storedBlock != null && (curIndex = storedBlock.findStorageInfo(storageInfo)) >= 0) { headIndex = storageInfo.moveBlockToHead(storedBlock, curIndex, headIndex); } } // collect blocks that have not been reported // all of them are next to the delimiter Iterator<BlockInfo> it = storageInfo.new BlockIterator(delimiter.getNext(0)); while(it.hasNext()) toRemove.add(it.next()); storageInfo.removeBlock(delimiter); } /** * Process a block replica reported by the data-node. * No side effects except adding to the passed-in Collections. * * <ol> * <li>If the block is not known to the system (not in blocksMap) then the * data-node should be notified to invalidate this block.</li> * <li>If the reported replica is valid that is has the same generation stamp * and length as recorded on the name-node, then the replica location should * be added to the name-node.</li> * <li>If the reported replica is not valid, then it is marked as corrupt, * which triggers replication of the existing valid replicas. * Corrupt replicas are removed from the system when the block * is fully replicated.</li> * <li>If the reported replica is for a block currently marked "under * construction" in the NN, then it should be added to the * BlockInfoUnderConstruction's list of replicas.</li> * </ol> * * @param storageInfo DatanodeStorageInfo that sent the report. * @param block reported block replica * @param reportedState reported replica state * @param toAdd add to DatanodeDescriptor * @param toInvalidate missing blocks (not in the blocks map) * should be removed from the data-node * @param toCorrupt replicas with unexpected length or generation stamp; * add to corrupt replicas * @param toUC replicas of blocks currently under construction * @return the up-to-date stored block, if it should be kept. * Otherwise, null. */ private BlockInfo processReportedBlock( final DatanodeStorageInfo storageInfo, final Block block, final ReplicaState reportedState, final Collection<BlockInfo> toAdd, final Collection<Block> toInvalidate, final Collection<BlockToMarkCorrupt> toCorrupt, final Collection<StatefulBlockInfo> toUC) { DatanodeDescriptor dn = storageInfo.getDatanodeDescriptor(); if(LOG.isDebugEnabled()) { LOG.debug("Reported block " + block + " on " + dn + " size " + block.getNumBytes() + " replicaState = " + reportedState); } if (shouldPostponeBlocksFromFuture && namesystem.isGenStampInFuture(block)) { queueReportedBlock(storageInfo, block, reportedState, QUEUE_REASON_FUTURE_GENSTAMP); return null; } // find block by blockId BlockInfo storedBlock = blocksMap.getStoredBlock(block); if(storedBlock == null) { // If blocksMap does not contain reported block id, // the replica should be removed from the data-node. toInvalidate.add(new Block(block)); return null; } BlockUCState ucState = storedBlock.getBlockUCState(); // Block is on the NN if(LOG.isDebugEnabled()) { LOG.debug("In memory blockUCState = " + ucState); } // Ignore replicas already scheduled to be removed from the DN if(invalidateBlocks.contains(dn, block)) { /* * TODO: following assertion is incorrect, see HDFS-2668 assert * storedBlock.findDatanode(dn) < 0 : "Block " + block + * " in recentInvalidatesSet should not appear in DN " + dn; */ return storedBlock; } BlockToMarkCorrupt c = checkReplicaCorrupt( block, reportedState, storedBlock, ucState, dn); if (c != null) { if (shouldPostponeBlocksFromFuture) { // If the block is an out-of-date generation stamp or state, // but we're the standby, we shouldn't treat it as corrupt, // but instead just queue it for later processing. // TODO: Pretty confident this should be s/storedBlock/block below, // since we should be postponing the info of the reported block, not // the stored block. See HDFS-6289 for more context. queueReportedBlock(storageInfo, storedBlock, reportedState, QUEUE_REASON_CORRUPT_STATE); } else { toCorrupt.add(c); } return storedBlock; } if (isBlockUnderConstruction(storedBlock, ucState, reportedState)) { toUC.add(new StatefulBlockInfo( (BlockInfoContiguousUnderConstruction) storedBlock, new Block(block), reportedState)); return storedBlock; } // Add replica if appropriate. If the replica was previously corrupt // but now okay, it might need to be updated. if (reportedState == ReplicaState.FINALIZED && (storedBlock.findStorageInfo(storageInfo) == -1 || corruptReplicas.isReplicaCorrupt(storedBlock, dn))) { toAdd.add(storedBlock); } return storedBlock; } /** * Queue the given reported block for later processing in the * standby node. @see PendingDataNodeMessages. * @param reason a textual reason to report in the debug logs */ private void queueReportedBlock(DatanodeStorageInfo storageInfo, Block block, ReplicaState reportedState, String reason) { assert shouldPostponeBlocksFromFuture; if (LOG.isDebugEnabled()) { LOG.debug("Queueing reported block " + block + " in state " + reportedState + " from datanode " + storageInfo.getDatanodeDescriptor() + " for later processing because " + reason + "."); } pendingDNMessages.enqueueReportedBlock(storageInfo, block, reportedState); } /** * Try to process any messages that were previously queued for the given * block. This is called from FSEditLogLoader whenever a block's state * in the namespace has changed or a new block has been created. */ public void processQueuedMessagesForBlock(Block b) throws IOException { Queue<ReportedBlockInfo> queue = pendingDNMessages.takeBlockQueue(b); if (queue == null) { // Nothing to re-process return; } processQueuedMessages(queue); } private void processQueuedMessages(Iterable<ReportedBlockInfo> rbis) throws IOException { for (ReportedBlockInfo rbi : rbis) { if (LOG.isDebugEnabled()) { LOG.debug("Processing previouly queued message " + rbi); } if (rbi.getReportedState() == null) { // This is a DELETE_BLOCK request DatanodeStorageInfo storageInfo = rbi.getStorageInfo(); removeStoredBlock(rbi.getBlock(), storageInfo.getDatanodeDescriptor()); } else { processAndHandleReportedBlock(rbi.getStorageInfo(), rbi.getBlock(), rbi.getReportedState(), null); } } } /** * Process any remaining queued datanode messages after entering * active state. At this point they will not be re-queued since * we are the definitive master node and thus should be up-to-date * with the namespace information. */ public void processAllPendingDNMessages() throws IOException { assert !shouldPostponeBlocksFromFuture : "processAllPendingDNMessages() should be called after disabling " + "block postponement."; int count = pendingDNMessages.count(); if (count > 0) { LOG.info("Processing " + count + " messages from DataNodes " + "that were previously queued during standby state"); } processQueuedMessages(pendingDNMessages.takeAll()); assert pendingDNMessages.count() == 0; } /** * The next two methods test the various cases under which we must conclude * the replica is corrupt, or under construction. These are laid out * as switch statements, on the theory that it is easier to understand * the combinatorics of reportedState and ucState that way. It should be * at least as efficient as boolean expressions. * * @return a BlockToMarkCorrupt object, or null if the replica is not corrupt */ private BlockToMarkCorrupt checkReplicaCorrupt( Block reported, ReplicaState reportedState, BlockInfo storedBlock, BlockUCState ucState, DatanodeDescriptor dn) { switch(reportedState) { case FINALIZED: switch(ucState) { case COMPLETE: case COMMITTED: if (storedBlock.getGenerationStamp() != reported.getGenerationStamp()) { final long reportedGS = reported.getGenerationStamp(); return new BlockToMarkCorrupt(storedBlock, reportedGS, "block is " + ucState + " and reported genstamp " + reportedGS + " does not match genstamp in block map " + storedBlock.getGenerationStamp(), Reason.GENSTAMP_MISMATCH); } else if (storedBlock.getNumBytes() != reported.getNumBytes()) { return new BlockToMarkCorrupt(storedBlock, "block is " + ucState + " and reported length " + reported.getNumBytes() + " does not match " + "length in block map " + storedBlock.getNumBytes(), Reason.SIZE_MISMATCH); } else { return null; // not corrupt } case UNDER_CONSTRUCTION: if (storedBlock.getGenerationStamp() > reported.getGenerationStamp()) { final long reportedGS = reported.getGenerationStamp(); return new BlockToMarkCorrupt(storedBlock, reportedGS, "block is " + ucState + " and reported state " + reportedState + ", But reported genstamp " + reportedGS + " does not match genstamp in block map " + storedBlock.getGenerationStamp(), Reason.GENSTAMP_MISMATCH); } return null; default: return null; } case RBW: case RWR: if (!storedBlock.isComplete()) { return null; // not corrupt } else if (storedBlock.getGenerationStamp() != reported.getGenerationStamp()) { final long reportedGS = reported.getGenerationStamp(); return new BlockToMarkCorrupt(storedBlock, reportedGS, "reported " + reportedState + " replica with genstamp " + reportedGS + " does not match COMPLETE block's genstamp in block map " + storedBlock.getGenerationStamp(), Reason.GENSTAMP_MISMATCH); } else { // COMPLETE block, same genstamp if (reportedState == ReplicaState.RBW) { // If it's a RBW report for a COMPLETE block, it may just be that // the block report got a little bit delayed after the pipeline // closed. So, ignore this report, assuming we will get a // FINALIZED replica later. See HDFS-2791 LOG.info("Received an RBW replica for " + storedBlock + " on " + dn + ": ignoring it, since it is " + "complete with the same genstamp"); return null; } else { return new BlockToMarkCorrupt(storedBlock, "reported replica has invalid state " + reportedState, Reason.INVALID_STATE); } } case RUR: // should not be reported case TEMPORARY: // should not be reported default: String msg = "Unexpected replica state " + reportedState + " for block: " + storedBlock + " on " + dn + " size " + storedBlock.getNumBytes(); // log here at WARN level since this is really a broken HDFS invariant LOG.warn(msg); return new BlockToMarkCorrupt(storedBlock, msg, Reason.INVALID_STATE); } } private boolean isBlockUnderConstruction(BlockInfo storedBlock, BlockUCState ucState, ReplicaState reportedState) { switch(reportedState) { case FINALIZED: switch(ucState) { case UNDER_CONSTRUCTION: case UNDER_RECOVERY: return true; default: return false; } case RBW: case RWR: return (!storedBlock.isComplete()); case RUR: // should not be reported case TEMPORARY: // should not be reported default: return false; } } void addStoredBlockUnderConstruction(StatefulBlockInfo ucBlock, DatanodeStorageInfo storageInfo) throws IOException { BlockInfoContiguousUnderConstruction block = ucBlock.storedBlock; block.addReplicaIfNotPresent( storageInfo, ucBlock.reportedBlock, ucBlock.reportedState); if (ucBlock.reportedState == ReplicaState.FINALIZED && (block.findStorageInfo(storageInfo) < 0)) { addStoredBlock(block, storageInfo, null, true); } } /** * Faster version of {@link #addStoredBlock}, * intended for use with initial block report at startup. If not in startup * safe mode, will call standard addStoredBlock(). Assumes this method is * called "immediately" so there is no need to refresh the storedBlock from * blocksMap. Doesn't handle underReplication/overReplication, or worry about * pendingReplications or corruptReplicas, because it's in startup safe mode. * Doesn't log every block, because there are typically millions of them. * * @throws IOException */ private void addStoredBlockImmediate(BlockInfo storedBlock, DatanodeStorageInfo storageInfo) throws IOException { assert (storedBlock != null && namesystem.hasWriteLock()); if (!namesystem.isInStartupSafeMode() || namesystem.isPopulatingReplQueues()) { addStoredBlock(storedBlock, storageInfo, null, false); return; } // just add it AddBlockResult result = storageInfo.addBlock(storedBlock); // Now check for completion of blocks and safe block count int numCurrentReplica = countLiveNodes(storedBlock); if (storedBlock.getBlockUCState() == BlockUCState.COMMITTED && numCurrentReplica >= minReplication) { completeBlock(storedBlock.getBlockCollection(), storedBlock, false); } else if (storedBlock.isComplete() && result == AddBlockResult.ADDED) { // check whether safe replication is reached for the block // only complete blocks are counted towards that. // In the case that the block just became complete above, completeBlock() // handles the safe block count maintenance. namesystem.incrementSafeBlockCount(numCurrentReplica); } } /** * Modify (block-->datanode) map. Remove block from set of * needed replications if this takes care of the problem. * @return the block that is stored in blockMap. */ private Block addStoredBlock(final BlockInfo block, DatanodeStorageInfo storageInfo, DatanodeDescriptor delNodeHint, boolean logEveryBlock) throws IOException { assert block != null && namesystem.hasWriteLock(); BlockInfo storedBlock; DatanodeDescriptor node = storageInfo.getDatanodeDescriptor(); if (block instanceof BlockInfoContiguousUnderConstruction) { //refresh our copy in case the block got completed in another thread storedBlock = blocksMap.getStoredBlock(block); } else { storedBlock = block; } if (storedBlock == null || storedBlock.isDeleted()) { // If this block does not belong to anyfile, then we are done. blockLog.debug("BLOCK* addStoredBlock: {} on {} size {} but it does not" + " belong to any file", block, node, block.getNumBytes()); // we could add this block to invalidate set of this datanode. // it will happen in next block report otherwise. return block; } BlockCollection bc = storedBlock.getBlockCollection(); assert bc != null : "Block must belong to a file"; // add block to the datanode AddBlockResult result = storageInfo.addBlock(storedBlock); int curReplicaDelta; if (result == AddBlockResult.ADDED) { curReplicaDelta = 1; if (logEveryBlock) { logAddStoredBlock(storedBlock, node); } } else if (result == AddBlockResult.REPLACED) { curReplicaDelta = 0; blockLog.warn("BLOCK* addStoredBlock: block {} moved to storageType " + "{} on node {}", storedBlock, storageInfo.getStorageType(), node); } else { // if the same block is added again and the replica was corrupt // previously because of a wrong gen stamp, remove it from the // corrupt block list. corruptReplicas.removeFromCorruptReplicasMap(block, node, Reason.GENSTAMP_MISMATCH); curReplicaDelta = 0; blockLog.warn("BLOCK* addStoredBlock: Redundant addStoredBlock request" + " received for {} on node {} size {}", storedBlock, node, storedBlock.getNumBytes()); } // Now check for completion of blocks and safe block count NumberReplicas num = countNodes(storedBlock); int numLiveReplicas = num.liveReplicas(); int numCurrentReplica = numLiveReplicas + pendingReplications.getNumReplicas(storedBlock); if(storedBlock.getBlockUCState() == BlockUCState.COMMITTED && numLiveReplicas >= minReplication) { storedBlock = completeBlock(bc, storedBlock, false); } else if (storedBlock.isComplete() && result == AddBlockResult.ADDED) { // check whether safe replication is reached for the block // only complete blocks are counted towards that // Is no-op if not in safe mode. // In the case that the block just became complete above, completeBlock() // handles the safe block count maintenance. namesystem.incrementSafeBlockCount(numCurrentReplica); } // if file is under construction, then done for now if (bc.isUnderConstruction()) { return storedBlock; } // do not try to handle over/under-replicated blocks during first safe mode if (!namesystem.isPopulatingReplQueues()) { return storedBlock; } // handle underReplication/overReplication short fileReplication = bc.getPreferredBlockReplication(); if (!isNeededReplication(storedBlock, fileReplication, numCurrentReplica)) { neededReplications.remove(storedBlock, numCurrentReplica, num.decommissionedAndDecommissioning(), fileReplication); } else { updateNeededReplications(storedBlock, curReplicaDelta, 0); } if (numCurrentReplica > fileReplication) { processOverReplicatedBlock(storedBlock, fileReplication, node, delNodeHint); } // If the file replication has reached desired value // we can remove any corrupt replicas the block may have int corruptReplicasCount = corruptReplicas.numCorruptReplicas(storedBlock); int numCorruptNodes = num.corruptReplicas(); if (numCorruptNodes != corruptReplicasCount) { LOG.warn("Inconsistent number of corrupt replicas for " + storedBlock + "blockMap has " + numCorruptNodes + " but corrupt replicas map has " + corruptReplicasCount); } if ((corruptReplicasCount > 0) && (numLiveReplicas >= fileReplication)) invalidateCorruptReplicas(storedBlock); return storedBlock; } private void logAddStoredBlock(BlockInfo storedBlock, DatanodeDescriptor node) { if (!blockLog.isDebugEnabled()) { return; } StringBuilder sb = new StringBuilder(500); sb.append("BLOCK* addStoredBlock: blockMap updated: ") .append(node) .append(" is added to "); storedBlock.appendStringTo(sb); sb.append(" size " ) .append(storedBlock.getNumBytes()); blockLog.debug(sb.toString()); } /** * Invalidate corrupt replicas. * <p> * This will remove the replicas from the block's location list, * add them to {@link #invalidateBlocks} so that they could be further * deleted from the respective data-nodes, * and remove the block from corruptReplicasMap. * <p> * This method should be called when the block has sufficient * number of live replicas. * * @param blk Block whose corrupt replicas need to be invalidated */ private void invalidateCorruptReplicas(BlockInfo blk) { Collection<DatanodeDescriptor> nodes = corruptReplicas.getNodes(blk); boolean removedFromBlocksMap = true; if (nodes == null) return; // make a copy of the array of nodes in order to avoid // ConcurrentModificationException, when the block is removed from the node DatanodeDescriptor[] nodesCopy = nodes.toArray(new DatanodeDescriptor[0]); for (DatanodeDescriptor node : nodesCopy) { try { if (!invalidateBlock(new BlockToMarkCorrupt(blk, null, Reason.ANY), node)) { removedFromBlocksMap = false; } } catch (IOException e) { blockLog.debug("invalidateCorruptReplicas error in deleting bad block" + " {} on {}", blk, node, e); removedFromBlocksMap = false; } } // Remove the block from corruptReplicasMap if (removedFromBlocksMap) { corruptReplicas.removeFromCorruptReplicasMap(blk); } } /** * For each block in the name-node verify whether it belongs to any file, * over or under replicated. Place it into the respective queue. */ public void processMisReplicatedBlocks() { assert namesystem.hasWriteLock(); stopReplicationInitializer(); neededReplications.clear(); replicationQueuesInitializer = new Daemon() { @Override public void run() { try { processMisReplicatesAsync(); } catch (InterruptedException ie) { LOG.info("Interrupted while processing replication queues."); } catch (Exception e) { LOG.error("Error while processing replication queues async", e); } } }; replicationQueuesInitializer.setName("Replication Queue Initializer"); replicationQueuesInitializer.start(); } /* * Stop the ongoing initialisation of replication queues */ private void stopReplicationInitializer() { if (replicationQueuesInitializer != null) { replicationQueuesInitializer.interrupt(); try { replicationQueuesInitializer.join(); } catch (final InterruptedException e) { LOG.warn("Interrupted while waiting for replicationQueueInitializer. Returning.."); return; } finally { replicationQueuesInitializer = null; } } } /* * Since the BlocksMapGset does not throw the ConcurrentModificationException * and supports further iteration after modification to list, there is a * chance of missing the newly added block while iterating. Since every * addition to blocksMap will check for mis-replication, missing mis-replication * check for new blocks will not be a problem. */ private void processMisReplicatesAsync() throws InterruptedException { long nrInvalid = 0, nrOverReplicated = 0; long nrUnderReplicated = 0, nrPostponed = 0, nrUnderConstruction = 0; long startTimeMisReplicatedScan = Time.monotonicNow(); Iterator<BlockInfo> blocksItr = blocksMap.getBlocks().iterator(); long totalBlocks = blocksMap.size(); replicationQueuesInitProgress = 0; long totalProcessed = 0; long sleepDuration = Math.max(1, Math.min(numBlocksPerIteration/1000, 10000)); while (namesystem.isRunning() && !Thread.currentThread().isInterrupted()) { int processed = 0; namesystem.writeLockInterruptibly(); try { while (processed < numBlocksPerIteration && blocksItr.hasNext()) { BlockInfo block = blocksItr.next(); MisReplicationResult res = processMisReplicatedBlock(block); if (LOG.isTraceEnabled()) { LOG.trace("block " + block + ": " + res); } switch (res) { case UNDER_REPLICATED: nrUnderReplicated++; break; case OVER_REPLICATED: nrOverReplicated++; break; case INVALID: nrInvalid++; break; case POSTPONE: nrPostponed++; postponeBlock(block); break; case UNDER_CONSTRUCTION: nrUnderConstruction++; break; case OK: break; default: throw new AssertionError("Invalid enum value: " + res); } processed++; } totalProcessed += processed; // there is a possibility that if any of the blocks deleted/added during // initialisation, then progress might be different. replicationQueuesInitProgress = Math.min((double) totalProcessed / totalBlocks, 1.0); if (!blocksItr.hasNext()) { LOG.info("Total number of blocks = " + blocksMap.size()); LOG.info("Number of invalid blocks = " + nrInvalid); LOG.info("Number of under-replicated blocks = " + nrUnderReplicated); LOG.info("Number of over-replicated blocks = " + nrOverReplicated + ((nrPostponed > 0) ? (" (" + nrPostponed + " postponed)") : "")); LOG.info("Number of blocks being written = " + nrUnderConstruction); NameNode.stateChangeLog .info("STATE* Replication Queue initialization " + "scan for invalid, over- and under-replicated blocks " + "completed in " + (Time.monotonicNow() - startTimeMisReplicatedScan) + " msec"); break; } } finally { namesystem.writeUnlock(); // Make sure it is out of the write lock for sufficiently long time. Thread.sleep(sleepDuration); } } if (Thread.currentThread().isInterrupted()) { LOG.info("Interrupted while processing replication queues."); } } /** * Get the progress of the Replication queues initialisation * * @return Returns values between 0 and 1 for the progress. */ public double getReplicationQueuesInitProgress() { return replicationQueuesInitProgress; } /** * Process a single possibly misreplicated block. This adds it to the * appropriate queues if necessary, and returns a result code indicating * what happened with it. */ private MisReplicationResult processMisReplicatedBlock(BlockInfo block) { if (block.isDeleted()) { // block does not belong to any file addToInvalidates(block); return MisReplicationResult.INVALID; } if (!block.isComplete()) { // Incomplete blocks are never considered mis-replicated -- // they'll be reached when they are completed or recovered. return MisReplicationResult.UNDER_CONSTRUCTION; } // calculate current replication short expectedReplication = block.getBlockCollection().getPreferredBlockReplication(); NumberReplicas num = countNodes(block); int numCurrentReplica = num.liveReplicas(); // add to under-replicated queue if need to be if (isNeededReplication(block, expectedReplication, numCurrentReplica)) { if (neededReplications.add(block, numCurrentReplica, num .decommissionedAndDecommissioning(), expectedReplication)) { return MisReplicationResult.UNDER_REPLICATED; } } if (numCurrentReplica > expectedReplication) { if (num.replicasOnStaleNodes() > 0) { // If any of the replicas of this block are on nodes that are // considered "stale", then these replicas may in fact have // already been deleted. So, we cannot safely act on the // over-replication until a later point in time, when // the "stale" nodes have block reported. return MisReplicationResult.POSTPONE; } // over-replicated block processOverReplicatedBlock(block, expectedReplication, null, null); return MisReplicationResult.OVER_REPLICATED; } return MisReplicationResult.OK; } /** Set replication for the blocks. */ public void setReplication(final short oldRepl, final short newRepl, final String src, final BlockInfo... blocks) { if (newRepl == oldRepl) { return; } // update needReplication priority queues for(BlockInfo b : blocks) { updateNeededReplications(b, 0, newRepl-oldRepl); } if (oldRepl > newRepl) { // old replication > the new one; need to remove copies LOG.info("Decreasing replication from " + oldRepl + " to " + newRepl + " for " + src); for(BlockInfo b : blocks) { processOverReplicatedBlock(b, newRepl, null, null); } } else { // replication factor is increased LOG.info("Increasing replication from " + oldRepl + " to " + newRepl + " for " + src); } } /** * Find how many of the containing nodes are "extra", if any. * If there are any extras, call chooseExcessReplicates() to * mark them in the excessReplicateMap. */ private void processOverReplicatedBlock(final Block block, final short replication, final DatanodeDescriptor addedNode, DatanodeDescriptor delNodeHint) { assert namesystem.hasWriteLock(); if (addedNode == delNodeHint) { delNodeHint = null; } Collection<DatanodeStorageInfo> nonExcess = new ArrayList<DatanodeStorageInfo>(); Collection<DatanodeDescriptor> corruptNodes = corruptReplicas .getNodes(block); for(DatanodeStorageInfo storage : blocksMap.getStorages(block, State.NORMAL)) { final DatanodeDescriptor cur = storage.getDatanodeDescriptor(); if (storage.areBlockContentsStale()) { LOG.info("BLOCK* processOverReplicatedBlock: " + "Postponing processing of over-replicated " + block + " since storage + " + storage + "datanode " + cur + " does not yet have up-to-date " + "block information."); postponeBlock(block); return; } LightWeightLinkedSet<Block> excessBlocks = excessReplicateMap.get(cur .getDatanodeUuid()); if (excessBlocks == null || !excessBlocks.contains(block)) { if (!cur.isDecommissionInProgress() && !cur.isDecommissioned()) { // exclude corrupt replicas if (corruptNodes == null || !corruptNodes.contains(cur)) { nonExcess.add(storage); } } } } chooseExcessReplicates(nonExcess, block, replication, addedNode, delNodeHint, blockplacement); } /** * We want "replication" replicates for the block, but we now have too many. * In this method, copy enough nodes from 'srcNodes' into 'dstNodes' such that: * * srcNodes.size() - dstNodes.size() == replication * * We pick node that make sure that replicas are spread across racks and * also try hard to pick one with least free space. * The algorithm is first to pick a node with least free space from nodes * that are on a rack holding more than one replicas of the block. * So removing such a replica won't remove a rack. * If no such a node is available, * then pick a node with least free space */ private void chooseExcessReplicates(final Collection<DatanodeStorageInfo> nonExcess, Block b, short replication, DatanodeDescriptor addedNode, DatanodeDescriptor delNodeHint, BlockPlacementPolicy replicator) { assert namesystem.hasWriteLock(); // first form a rack to datanodes map and BlockCollection bc = getBlockCollection(b); final BlockStoragePolicy storagePolicy = storagePolicySuite.getPolicy(bc.getStoragePolicyID()); final List<StorageType> excessTypes = storagePolicy.chooseExcess( replication, DatanodeStorageInfo.toStorageTypes(nonExcess)); final Map<String, List<DatanodeStorageInfo>> rackMap = new HashMap<String, List<DatanodeStorageInfo>>(); final List<DatanodeStorageInfo> moreThanOne = new ArrayList<DatanodeStorageInfo>(); final List<DatanodeStorageInfo> exactlyOne = new ArrayList<DatanodeStorageInfo>(); // split nodes into two sets // moreThanOne contains nodes on rack with more than one replica // exactlyOne contains the remaining nodes replicator.splitNodesWithRack(nonExcess, rackMap, moreThanOne, exactlyOne); // pick one node to delete that favors the delete hint // otherwise pick one with least space from priSet if it is not empty // otherwise one node with least space from remains boolean firstOne = true; final DatanodeStorageInfo delNodeHintStorage = DatanodeStorageInfo.getDatanodeStorageInfo(nonExcess, delNodeHint); final DatanodeStorageInfo addedNodeStorage = DatanodeStorageInfo.getDatanodeStorageInfo(nonExcess, addedNode); while (nonExcess.size() - replication > 0) { final DatanodeStorageInfo cur; if (useDelHint(firstOne, delNodeHintStorage, addedNodeStorage, moreThanOne, excessTypes)) { cur = delNodeHintStorage; } else { // regular excessive replica removal cur = replicator.chooseReplicaToDelete(bc, b, replication, moreThanOne, exactlyOne, excessTypes); } firstOne = false; // adjust rackmap, moreThanOne, and exactlyOne replicator.adjustSetsWithChosenReplica(rackMap, moreThanOne, exactlyOne, cur); nonExcess.remove(cur); addToExcessReplicate(cur.getDatanodeDescriptor(), b); // // The 'excessblocks' tracks blocks until we get confirmation // that the datanode has deleted them; the only way we remove them // is when we get a "removeBlock" message. // // The 'invalidate' list is used to inform the datanode the block // should be deleted. Items are removed from the invalidate list // upon giving instructions to the namenode. // addToInvalidates(b, cur.getDatanodeDescriptor()); blockLog.debug("BLOCK* chooseExcessReplicates: " +"({}, {}) is added to invalidated blocks set", cur, b); } } /** Check if we can use delHint */ static boolean useDelHint(boolean isFirst, DatanodeStorageInfo delHint, DatanodeStorageInfo added, List<DatanodeStorageInfo> moreThan1Racks, List<StorageType> excessTypes) { if (!isFirst) { return false; // only consider delHint for the first case } else if (delHint == null) { return false; // no delHint } else if (!excessTypes.contains(delHint.getStorageType())) { return false; // delHint storage type is not an excess type } else { // check if removing delHint reduces the number of racks if (moreThan1Racks.contains(delHint)) { return true; // delHint and some other nodes are under the same rack } else if (added != null && !moreThan1Racks.contains(added)) { return true; // the added node adds a new rack } return false; // removing delHint reduces the number of racks; } } private void addToExcessReplicate(DatanodeInfo dn, Block block) { assert namesystem.hasWriteLock(); LightWeightLinkedSet<Block> excessBlocks = excessReplicateMap.get(dn.getDatanodeUuid()); if (excessBlocks == null) { excessBlocks = new LightWeightLinkedSet<Block>(); excessReplicateMap.put(dn.getDatanodeUuid(), excessBlocks); } if (excessBlocks.add(block)) { excessBlocksCount.incrementAndGet(); blockLog.debug("BLOCK* addToExcessReplicate: ({}, {}) is added to" + " excessReplicateMap", dn, block); } } private void removeStoredBlock(DatanodeStorageInfo storageInfo, Block block, DatanodeDescriptor node) { if (shouldPostponeBlocksFromFuture && namesystem.isGenStampInFuture(block)) { queueReportedBlock(storageInfo, block, null, QUEUE_REASON_FUTURE_GENSTAMP); return; } removeStoredBlock(block, node); } /** * Modify (block-->datanode) map. Possibly generate replication tasks, if the * removed block is still valid. */ public void removeStoredBlock(Block block, DatanodeDescriptor node) { blockLog.debug("BLOCK* removeStoredBlock: {} from {}", block, node); assert (namesystem.hasWriteLock()); { BlockInfo storedBlock = getStoredBlock(block); if (storedBlock == null || !blocksMap.removeNode(storedBlock, node)) { blockLog.debug("BLOCK* removeStoredBlock: {} has already been" + " removed from node {}", block, node); return; } CachedBlock cblock = namesystem.getCacheManager().getCachedBlocks() .get(new CachedBlock(block.getBlockId(), (short) 0, false)); if (cblock != null) { boolean removed = false; removed |= node.getPendingCached().remove(cblock); removed |= node.getCached().remove(cblock); removed |= node.getPendingUncached().remove(cblock); if (removed) { blockLog.debug("BLOCK* removeStoredBlock: {} removed from caching " + "related lists on node {}", block, node); } } // // It's possible that the block was removed because of a datanode // failure. If the block is still valid, check if replication is // necessary. In that case, put block on a possibly-will- // be-replicated list. // BlockCollection bc = blocksMap.getBlockCollection(block); if (bc != null) { namesystem.decrementSafeBlockCount(storedBlock); updateNeededReplications(storedBlock, -1, 0); } // // We've removed a block from a node, so it's definitely no longer // in "excess" there. // LightWeightLinkedSet<Block> excessBlocks = excessReplicateMap.get(node .getDatanodeUuid()); if (excessBlocks != null) { if (excessBlocks.remove(block)) { excessBlocksCount.decrementAndGet(); blockLog.debug("BLOCK* removeStoredBlock: {} is removed from " + "excessBlocks", block); if (excessBlocks.size() == 0) { excessReplicateMap.remove(node.getDatanodeUuid()); } } } // Remove the replica from corruptReplicas corruptReplicas.removeFromCorruptReplicasMap(block, node); } } /** * Get all valid locations of the block & add the block to results * return the length of the added block; 0 if the block is not added */ private long addBlock(Block block, List<BlockWithLocations> results) { final List<DatanodeStorageInfo> locations = getValidLocations(block); if(locations.size() == 0) { return 0; } else { final String[] datanodeUuids = new String[locations.size()]; final String[] storageIDs = new String[datanodeUuids.length]; final StorageType[] storageTypes = new StorageType[datanodeUuids.length]; for(int i = 0; i < locations.size(); i++) { final DatanodeStorageInfo s = locations.get(i); datanodeUuids[i] = s.getDatanodeDescriptor().getDatanodeUuid(); storageIDs[i] = s.getStorageID(); storageTypes[i] = s.getStorageType(); } results.add(new BlockWithLocations(block, datanodeUuids, storageIDs, storageTypes)); return block.getNumBytes(); } } /** * The given node is reporting that it received a certain block. */ @VisibleForTesting void addBlock(DatanodeStorageInfo storageInfo, Block block, String delHint) throws IOException { DatanodeDescriptor node = storageInfo.getDatanodeDescriptor(); // Decrement number of blocks scheduled to this datanode. // for a retry request (of DatanodeProtocol#blockReceivedAndDeleted with // RECEIVED_BLOCK), we currently also decrease the approximate number. node.decrementBlocksScheduled(storageInfo.getStorageType()); // get the deletion hint node DatanodeDescriptor delHintNode = null; if (delHint != null && delHint.length() != 0) { delHintNode = datanodeManager.getDatanode(delHint); if (delHintNode == null) { blockLog.warn("BLOCK* blockReceived: {} is expected to be removed " + "from an unrecorded node {}", block, delHint); } } // // Modify the blocks->datanode map and node's map. // BlockInfo storedBlock = getStoredBlock(block); if (storedBlock != null) { pendingReplications.decrement(getStoredBlock(block), node); } processAndHandleReportedBlock(storageInfo, block, ReplicaState.FINALIZED, delHintNode); } private void processAndHandleReportedBlock( DatanodeStorageInfo storageInfo, Block block, ReplicaState reportedState, DatanodeDescriptor delHintNode) throws IOException { // blockReceived reports a finalized block Collection<BlockInfo> toAdd = new LinkedList<BlockInfo>(); Collection<Block> toInvalidate = new LinkedList<Block>(); Collection<BlockToMarkCorrupt> toCorrupt = new LinkedList<BlockToMarkCorrupt>(); Collection<StatefulBlockInfo> toUC = new LinkedList<StatefulBlockInfo>(); final DatanodeDescriptor node = storageInfo.getDatanodeDescriptor(); processReportedBlock(storageInfo, block, reportedState, toAdd, toInvalidate, toCorrupt, toUC); // the block is only in one of the to-do lists // if it is in none then data-node already has it assert toUC.size() + toAdd.size() + toInvalidate.size() + toCorrupt.size() <= 1 : "The block should be only in one of the lists."; for (StatefulBlockInfo b : toUC) { addStoredBlockUnderConstruction(b, storageInfo); } long numBlocksLogged = 0; for (BlockInfo b : toAdd) { addStoredBlock(b, storageInfo, delHintNode, numBlocksLogged < maxNumBlocksToLog); numBlocksLogged++; } if (numBlocksLogged > maxNumBlocksToLog) { blockLog.debug("BLOCK* addBlock: logged info for {} of {} reported.", maxNumBlocksToLog, numBlocksLogged); } for (Block b : toInvalidate) { blockLog.debug("BLOCK* addBlock: block {} on node {} size {} does not " + "belong to any file", b, node, b.getNumBytes()); addToInvalidates(b, node); } for (BlockToMarkCorrupt b : toCorrupt) { markBlockAsCorrupt(b, storageInfo, node); } } /** * The given node is reporting incremental information about some blocks. * This includes blocks that are starting to be received, completed being * received, or deleted. * * This method must be called with FSNamesystem lock held. */ public void processIncrementalBlockReport(final DatanodeID nodeID, final StorageReceivedDeletedBlocks srdb) throws IOException { assert namesystem.hasWriteLock(); int received = 0; int deleted = 0; int receiving = 0; final DatanodeDescriptor node = datanodeManager.getDatanode(nodeID); if (node == null || !node.isAlive) { blockLog.warn("BLOCK* processIncrementalBlockReport" + " is received from dead or unregistered node {}", nodeID); throw new IOException( "Got incremental block report from unregistered or dead node"); } DatanodeStorageInfo storageInfo = node.getStorageInfo(srdb.getStorage().getStorageID()); if (storageInfo == null) { // The DataNode is reporting an unknown storage. Usually the NN learns // about new storages from heartbeats but during NN restart we may // receive a block report or incremental report before the heartbeat. // We must handle this for protocol compatibility. This issue was // uncovered by HDFS-6094. storageInfo = node.updateStorage(srdb.getStorage()); } for (ReceivedDeletedBlockInfo rdbi : srdb.getBlocks()) { switch (rdbi.getStatus()) { case DELETED_BLOCK: removeStoredBlock(storageInfo, rdbi.getBlock(), node); deleted++; break; case RECEIVED_BLOCK: addBlock(storageInfo, rdbi.getBlock(), rdbi.getDelHints()); received++; break; case RECEIVING_BLOCK: receiving++; processAndHandleReportedBlock(storageInfo, rdbi.getBlock(), ReplicaState.RBW, null); break; default: String msg = "Unknown block status code reported by " + nodeID + ": " + rdbi; blockLog.warn(msg); assert false : msg; // if assertions are enabled, throw. break; } blockLog.debug("BLOCK* block {}: {} is received from {}", rdbi.getStatus(), rdbi.getBlock(), nodeID); } blockLog.debug("*BLOCK* NameNode.processIncrementalBlockReport: from " + "{} receiving: {}, received: {}, deleted: {}", nodeID, receiving, received, deleted); } /** * Return the number of nodes hosting a given block, grouped * by the state of those replicas. */ public NumberReplicas countNodes(BlockInfo b) { int decommissioned = 0; int decommissioning = 0; int live = 0; int corrupt = 0; int excess = 0; int stale = 0; Collection<DatanodeDescriptor> nodesCorrupt = corruptReplicas.getNodes(b); for(DatanodeStorageInfo storage : blocksMap.getStorages(b, State.NORMAL)) { final DatanodeDescriptor node = storage.getDatanodeDescriptor(); if ((nodesCorrupt != null) && (nodesCorrupt.contains(node))) { corrupt++; } else if (node.isDecommissionInProgress()) { decommissioning++; } else if (node.isDecommissioned()) { decommissioned++; } else { LightWeightLinkedSet<Block> blocksExcess = excessReplicateMap.get(node .getDatanodeUuid()); if (blocksExcess != null && blocksExcess.contains(b)) { excess++; } else { live++; } } if (storage.areBlockContentsStale()) { stale++; } } return new NumberReplicas(live, decommissioned, decommissioning, corrupt, excess, stale); } /** * Simpler, faster form of {@link #countNodes} that only returns the number * of live nodes. If in startup safemode (or its 30-sec extension period), * then it gains speed by ignoring issues of excess replicas or nodes * that are decommissioned or in process of becoming decommissioned. * If not in startup, then it calls {@link #countNodes} instead. * * @param b - the block being tested * @return count of live nodes for this block */ int countLiveNodes(BlockInfo b) { if (!namesystem.isInStartupSafeMode()) { return countNodes(b).liveReplicas(); } // else proceed with fast case int live = 0; Collection<DatanodeDescriptor> nodesCorrupt = corruptReplicas.getNodes(b); for(DatanodeStorageInfo storage : blocksMap.getStorages(b, State.NORMAL)) { final DatanodeDescriptor node = storage.getDatanodeDescriptor(); if ((nodesCorrupt == null) || (!nodesCorrupt.contains(node))) live++; } return live; } /** * On stopping decommission, check if the node has excess replicas. * If there are any excess replicas, call processOverReplicatedBlock(). * Process over replicated blocks only when active NN is out of safe mode. */ void processOverReplicatedBlocksOnReCommission( final DatanodeDescriptor srcNode) { if (!namesystem.isPopulatingReplQueues()) { return; } final Iterator<BlockInfo> it = srcNode.getBlockIterator(); int numOverReplicated = 0; while(it.hasNext()) { final BlockInfo block = it.next(); BlockCollection bc = blocksMap.getBlockCollection(block); short expectedReplication = bc.getPreferredBlockReplication(); NumberReplicas num = countNodes(block); int numCurrentReplica = num.liveReplicas(); if (numCurrentReplica > expectedReplication) { // over-replicated block processOverReplicatedBlock(block, expectedReplication, null, null); numOverReplicated++; } } LOG.info("Invalidated " + numOverReplicated + " over-replicated blocks on " + srcNode + " during recommissioning"); } /** * Returns whether a node can be safely decommissioned based on its * liveness. Dead nodes cannot always be safely decommissioned. */ boolean isNodeHealthyForDecommission(DatanodeDescriptor node) { if (!node.checkBlockReportReceived()) { LOG.info("Node {} hasn't sent its first block report.", node); return false; } if (node.isAlive) { return true; } updateState(); if (pendingReplicationBlocksCount == 0 && underReplicatedBlocksCount == 0) { LOG.info("Node {} is dead and there are no under-replicated" + " blocks or blocks pending replication. Safe to decommission.", node); return true; } LOG.warn("Node {} is dead " + "while decommission is in progress. Cannot be safely " + "decommissioned since there is risk of reduced " + "data durability or data loss. Either restart the failed node or" + " force decommissioning by removing, calling refreshNodes, " + "then re-adding to the excludes files.", node); return false; } public int getActiveBlockCount() { return blocksMap.size(); } public DatanodeStorageInfo[] getStorages(BlockInfo block) { final DatanodeStorageInfo[] storages = new DatanodeStorageInfo[block.numNodes()]; int i = 0; for(DatanodeStorageInfo s : blocksMap.getStorages(block)) { storages[i++] = s; } return storages; } public int getTotalBlocks() { return blocksMap.size(); } public void removeBlock(BlockInfo block) { assert namesystem.hasWriteLock(); // No need to ACK blocks that are being removed entirely // from the namespace, since the removal of the associated // file already removes them from the block map below. block.setNumBytes(BlockCommand.NO_ACK); addToInvalidates(block); removeBlockFromMap(block); // Remove the block from pendingReplications and neededReplications pendingReplications.remove(block); neededReplications.remove(block, UnderReplicatedBlocks.LEVEL); if (postponedMisreplicatedBlocks.remove(block)) { postponedMisreplicatedBlocksCount.decrementAndGet(); } } public BlockInfo getStoredBlock(Block block) { return blocksMap.getStoredBlock(block); } /** updates a block in under replication queue */ private void updateNeededReplications(final BlockInfo block, final int curReplicasDelta, int expectedReplicasDelta) { namesystem.writeLock(); try { if (!namesystem.isPopulatingReplQueues()) { return; } NumberReplicas repl = countNodes(block); int curExpectedReplicas = getReplication(block); if (isNeededReplication(block, curExpectedReplicas, repl.liveReplicas())) { neededReplications.update(block, repl.liveReplicas(), repl .decommissionedAndDecommissioning(), curExpectedReplicas, curReplicasDelta, expectedReplicasDelta); } else { int oldReplicas = repl.liveReplicas()-curReplicasDelta; int oldExpectedReplicas = curExpectedReplicas-expectedReplicasDelta; neededReplications.remove(block, oldReplicas, repl.decommissionedAndDecommissioning(), oldExpectedReplicas); } } finally { namesystem.writeUnlock(); } } /** * Check replication of the blocks in the collection. * If any block is needed replication, insert it into the replication queue. * Otherwise, if the block is more than the expected replication factor, * process it as an over replicated block. */ public void checkReplication(BlockCollection bc) { final short expected = bc.getPreferredBlockReplication(); for (BlockInfo block : bc.getBlocks()) { final NumberReplicas n = countNodes(block); if (isNeededReplication(block, expected, n.liveReplicas())) { neededReplications.add(block, n.liveReplicas(), n.decommissionedAndDecommissioning(), expected); } else if (n.liveReplicas() > expected) { processOverReplicatedBlock(block, expected, null, null); } } } /** * Check that the indicated blocks are present and * replicated. */ public boolean checkBlocksProperlyReplicated( String src, BlockInfo[] blocks) { for (BlockInfo b: blocks) { if (!b.isComplete()) { final BlockInfoContiguousUnderConstruction uc = (BlockInfoContiguousUnderConstruction)b; final int numNodes = b.numNodes(); LOG.info("BLOCK* " + b + " is not COMPLETE (ucState = " + uc.getBlockUCState() + ", replication# = " + numNodes + (numNodes < minReplication ? " < ": " >= ") + " minimum = " + minReplication + ") in file " + src); return false; } } return true; } /** * @return 0 if the block is not found; * otherwise, return the replication factor of the block. */ private int getReplication(Block block) { final BlockCollection bc = blocksMap.getBlockCollection(block); return bc == null? 0: bc.getPreferredBlockReplication(); } /** * Get blocks to invalidate for <i>nodeId</i> * in {@link #invalidateBlocks}. * * @return number of blocks scheduled for removal during this iteration. */ private int invalidateWorkForOneNode(DatanodeInfo dn) { final List<Block> toInvalidate; namesystem.writeLock(); try { // blocks should not be replicated or removed if safe mode is on if (namesystem.isInSafeMode()) { LOG.debug("In safemode, not computing replication work"); return 0; } try { DatanodeDescriptor dnDescriptor = datanodeManager.getDatanode(dn); if (dnDescriptor == null) { LOG.warn("DataNode " + dn + " cannot be found with UUID " + dn.getDatanodeUuid() + ", removing block invalidation work."); invalidateBlocks.remove(dn); return 0; } toInvalidate = invalidateBlocks.invalidateWork(dnDescriptor); if (toInvalidate == null) { return 0; } } catch(UnregisteredNodeException une) { return 0; } } finally { namesystem.writeUnlock(); } blockLog.debug("BLOCK* {}: ask {} to delete {}", getClass().getSimpleName(), dn, toInvalidate); return toInvalidate.size(); } boolean blockHasEnoughRacks(Block b) { if (!this.shouldCheckForEnoughRacks) { return true; } boolean enoughRacks = false;; Collection<DatanodeDescriptor> corruptNodes = corruptReplicas.getNodes(b); int numExpectedReplicas = getReplication(b); String rackName = null; for(DatanodeStorageInfo storage : blocksMap.getStorages(b)) { final DatanodeDescriptor cur = storage.getDatanodeDescriptor(); if (!cur.isDecommissionInProgress() && !cur.isDecommissioned()) { if ((corruptNodes == null ) || !corruptNodes.contains(cur)) { if (numExpectedReplicas == 1 || (numExpectedReplicas > 1 && !datanodeManager.hasClusterEverBeenMultiRack())) { enoughRacks = true; break; } String rackNameNew = cur.getNetworkLocation(); if (rackName == null) { rackName = rackNameNew; } else if (!rackName.equals(rackNameNew)) { enoughRacks = true; break; } } } } return enoughRacks; } /** * A block needs replication if the number of replicas is less than expected * or if it does not have enough racks. */ boolean isNeededReplication(Block b, int expected, int current) { return current < expected || !blockHasEnoughRacks(b); } public long getMissingBlocksCount() { // not locking return this.neededReplications.getCorruptBlockSize(); } public long getMissingReplOneBlocksCount() { // not locking return this.neededReplications.getCorruptReplOneBlockSize(); } public BlockInfo addBlockCollection(BlockInfo block, BlockCollection bc) { return blocksMap.addBlockCollection(block, bc); } public BlockCollection getBlockCollection(Block b) { return blocksMap.getBlockCollection(b); } /** @return an iterator of the datanodes. */ public Iterable<DatanodeStorageInfo> getStorages(final Block block) { return blocksMap.getStorages(block); } public int numCorruptReplicas(Block block) { return corruptReplicas.numCorruptReplicas(block); } public void removeBlockFromMap(Block block) { removeFromExcessReplicateMap(block); blocksMap.removeBlock(block); // If block is removed from blocksMap remove it from corruptReplicasMap corruptReplicas.removeFromCorruptReplicasMap(block); } /** * If a block is removed from blocksMap, remove it from excessReplicateMap. */ private void removeFromExcessReplicateMap(Block block) { for (DatanodeStorageInfo info : blocksMap.getStorages(block)) { String uuid = info.getDatanodeDescriptor().getDatanodeUuid(); LightWeightLinkedSet<Block> excessReplicas = excessReplicateMap.get(uuid); if (excessReplicas != null) { if (excessReplicas.remove(block)) { excessBlocksCount.decrementAndGet(); if (excessReplicas.isEmpty()) { excessReplicateMap.remove(uuid); } } } } } public int getCapacity() { return blocksMap.getCapacity(); } /** * Return an iterator over the set of blocks for which there are no replicas. */ public Iterator<BlockInfo> getCorruptReplicaBlockIterator() { return neededReplications.iterator( UnderReplicatedBlocks.QUEUE_WITH_CORRUPT_BLOCKS); } /** * Get the replicas which are corrupt for a given block. */ public Collection<DatanodeDescriptor> getCorruptReplicas(Block block) { return corruptReplicas.getNodes(block); } /** * Get reason for certain corrupted replicas for a given block and a given dn. */ public String getCorruptReason(Block block, DatanodeDescriptor node) { return corruptReplicas.getCorruptReason(block, node); } /** @return the size of UnderReplicatedBlocks */ public int numOfUnderReplicatedBlocks() { return neededReplications.size(); } /** * Periodically calls computeReplicationWork(). */ private class ReplicationMonitor implements Runnable { @Override public void run() { while (namesystem.isRunning()) { try { // Process replication work only when active NN is out of safe mode. if (namesystem.isPopulatingReplQueues()) { computeDatanodeWork(); processPendingReplications(); rescanPostponedMisreplicatedBlocks(); } Thread.sleep(replicationRecheckInterval); } catch (Throwable t) { if (!namesystem.isRunning()) { LOG.info("Stopping ReplicationMonitor."); if (!(t instanceof InterruptedException)) { LOG.info("ReplicationMonitor received an exception" + " while shutting down.", t); } break; } else if (!checkNSRunning && t instanceof InterruptedException) { LOG.info("Stopping ReplicationMonitor for testing."); break; } LOG.error("ReplicationMonitor thread received Runtime exception. ", t); terminate(1, t); } } } } /** * Compute block replication and block invalidation work that can be scheduled * on data-nodes. The datanode will be informed of this work at the next * heartbeat. * * @return number of blocks scheduled for replication or removal. */ int computeDatanodeWork() { // Blocks should not be replicated or removed if in safe mode. // It's OK to check safe mode here w/o holding lock, in the worst // case extra replications will be scheduled, and these will get // fixed up later. if (namesystem.isInSafeMode()) { return 0; } final int numlive = heartbeatManager.getLiveDatanodeCount(); final int blocksToProcess = numlive * this.blocksReplWorkMultiplier; final int nodesToProcess = (int) Math.ceil(numlive * this.blocksInvalidateWorkPct); int workFound = this.computeReplicationWork(blocksToProcess); // Update counters namesystem.writeLock(); try { this.updateState(); this.scheduledReplicationBlocksCount = workFound; } finally { namesystem.writeUnlock(); } workFound += this.computeInvalidateWork(nodesToProcess); return workFound; } /** * Clear all queues that hold decisions previously made by * this NameNode. */ public void clearQueues() { neededReplications.clear(); pendingReplications.clear(); excessReplicateMap.clear(); invalidateBlocks.clear(); datanodeManager.clearPendingQueues(); postponedMisreplicatedBlocks.clear(); postponedMisreplicatedBlocksCount.set(0); }; public static LocatedBlock newLocatedBlock( ExtendedBlock b, DatanodeStorageInfo[] storages, long startOffset, boolean corrupt) { // startOffset is unknown return new LocatedBlock( b, DatanodeStorageInfo.toDatanodeInfos(storages), DatanodeStorageInfo.toStorageIDs(storages), DatanodeStorageInfo.toStorageTypes(storages), startOffset, corrupt, null); } private static class ReplicationWork { private final BlockInfo block; private final BlockCollection bc; private final DatanodeDescriptor srcNode; private final List<DatanodeDescriptor> containingNodes; private final List<DatanodeStorageInfo> liveReplicaStorages; private final int additionalReplRequired; private DatanodeStorageInfo targets[]; private final int priority; public ReplicationWork(BlockInfo block, BlockCollection bc, DatanodeDescriptor srcNode, List<DatanodeDescriptor> containingNodes, List<DatanodeStorageInfo> liveReplicaStorages, int additionalReplRequired, int priority) { this.block = block; this.bc = bc; this.srcNode = srcNode; this.srcNode.incrementPendingReplicationWithoutTargets(); this.containingNodes = containingNodes; this.liveReplicaStorages = liveReplicaStorages; this.additionalReplRequired = additionalReplRequired; this.priority = priority; this.targets = null; } private void chooseTargets(BlockPlacementPolicy blockplacement, BlockStoragePolicySuite storagePolicySuite, Set<Node> excludedNodes) { try { targets = blockplacement.chooseTarget(bc.getName(), additionalReplRequired, srcNode, liveReplicaStorages, false, excludedNodes, block.getNumBytes(), storagePolicySuite.getPolicy(bc.getStoragePolicyID())); } finally { srcNode.decrementPendingReplicationWithoutTargets(); } } } /** * A simple result enum for the result of * {@link BlockManager#processMisReplicatedBlock(BlockInfo)}. */ enum MisReplicationResult { /** The block should be invalidated since it belongs to a deleted file. */ INVALID, /** The block is currently under-replicated. */ UNDER_REPLICATED, /** The block is currently over-replicated. */ OVER_REPLICATED, /** A decision can't currently be made about this block. */ POSTPONE, /** The block is under construction, so should be ignored */ UNDER_CONSTRUCTION, /** The block is properly replicated */ OK } public void shutdown() { stopReplicationInitializer(); blocksMap.close(); MBeans.unregister(mxBeanName); mxBeanName = null; } public void clear() { clearQueues(); blocksMap.clear(); } public BlockReportLeaseManager getBlockReportLeaseManager() { return blockReportLeaseManager; } @Override // BlockStatsMXBean public Map<StorageType, StorageTypeStats> getStorageTypeStats() { return datanodeManager.getDatanodeStatistics().getStorageTypeStats(); } }
151,066
37.774897
138
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/OutOfV1GenerationStampsException.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.blockmanagement; import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * This exception is thrown when the name node runs out of V1 generation * stamps. * */ @InterfaceAudience.Private @InterfaceStability.Evolving public class OutOfV1GenerationStampsException extends IOException { private static final long serialVersionUID = 1L; public OutOfV1GenerationStampsException() { super("Out of V1 (legacy) generation stamps\n"); } }
1,388
34.615385
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.blockmanagement; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.Iterator; import java.util.Map; import java.util.TreeMap; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.ipc.Server; import com.google.common.annotations.VisibleForTesting; /** * Stores information about all corrupt blocks in the File System. * A Block is considered corrupt only if all of its replicas are * corrupt. While reporting replicas of a Block, we hide any corrupt * copies. These copies are removed once Block is found to have * expected number of good replicas. * Mapping: Block -> TreeSet<DatanodeDescriptor> */ @InterfaceAudience.Private public class CorruptReplicasMap{ /** The corruption reason code */ public static enum Reason { NONE, // not specified. ANY, // wildcard reason GENSTAMP_MISMATCH, // mismatch in generation stamps SIZE_MISMATCH, // mismatch in sizes INVALID_STATE, // invalid state CORRUPTION_REPORTED // client or datanode reported the corruption } private final Map<Block, Map<DatanodeDescriptor, Reason>> corruptReplicasMap = new HashMap<Block, Map<DatanodeDescriptor, Reason>>(); /** * Mark the block belonging to datanode as corrupt. * * @param blk Block to be added to CorruptReplicasMap * @param dn DatanodeDescriptor which holds the corrupt replica * @param reason a textual reason (for logging purposes) * @param reasonCode the enum representation of the reason */ void addToCorruptReplicasMap(Block blk, DatanodeDescriptor dn, String reason, Reason reasonCode) { Map <DatanodeDescriptor, Reason> nodes = corruptReplicasMap.get(blk); if (nodes == null) { nodes = new HashMap<DatanodeDescriptor, Reason>(); corruptReplicasMap.put(blk, nodes); } String reasonText; if (reason != null) { reasonText = " because " + reason; } else { reasonText = ""; } if (!nodes.keySet().contains(dn)) { NameNode.blockStateChangeLog.debug( "BLOCK NameSystem.addToCorruptReplicasMap: {} added as corrupt on " + "{} by {} {}", blk.getBlockName(), dn, Server.getRemoteIp(), reasonText); } else { NameNode.blockStateChangeLog.debug( "BLOCK NameSystem.addToCorruptReplicasMap: duplicate requested for" + " {} to add as corrupt on {} by {} {}", blk.getBlockName(), dn, Server.getRemoteIp(), reasonText); } // Add the node or update the reason. nodes.put(dn, reasonCode); } /** * Remove Block from CorruptBlocksMap * * @param blk Block to be removed */ void removeFromCorruptReplicasMap(Block blk) { if (corruptReplicasMap != null) { corruptReplicasMap.remove(blk); } } /** * Remove the block at the given datanode from CorruptBlockMap * @param blk block to be removed * @param datanode datanode where the block is located * @return true if the removal is successful; false if the replica is not in the map */ boolean removeFromCorruptReplicasMap(Block blk, DatanodeDescriptor datanode) { return removeFromCorruptReplicasMap(blk, datanode, Reason.ANY); } boolean removeFromCorruptReplicasMap(Block blk, DatanodeDescriptor datanode, Reason reason) { Map <DatanodeDescriptor, Reason> datanodes = corruptReplicasMap.get(blk); if (datanodes==null) return false; // if reasons can be compared but don't match, return false. Reason storedReason = datanodes.get(datanode); if (reason != Reason.ANY && storedReason != null && reason != storedReason) { return false; } if (datanodes.remove(datanode) != null) { // remove the replicas if (datanodes.isEmpty()) { // remove the block if there is no more corrupted replicas corruptReplicasMap.remove(blk); } return true; } return false; } /** * Get Nodes which have corrupt replicas of Block * * @param blk Block for which nodes are requested * @return collection of nodes. Null if does not exists */ Collection<DatanodeDescriptor> getNodes(Block blk) { Map <DatanodeDescriptor, Reason> nodes = corruptReplicasMap.get(blk); if (nodes == null) return null; return nodes.keySet(); } /** * Check if replica belonging to Datanode is corrupt * * @param blk Block to check * @param node DatanodeDescriptor which holds the replica * @return true if replica is corrupt, false if does not exists in this map */ boolean isReplicaCorrupt(Block blk, DatanodeDescriptor node) { Collection<DatanodeDescriptor> nodes = getNodes(blk); return ((nodes != null) && (nodes.contains(node))); } int numCorruptReplicas(Block blk) { Collection<DatanodeDescriptor> nodes = getNodes(blk); return (nodes == null) ? 0 : nodes.size(); } int size() { return corruptReplicasMap.size(); } /** * Return a range of corrupt replica block ids. Up to numExpectedBlocks * blocks starting at the next block after startingBlockId are returned * (fewer if numExpectedBlocks blocks are unavailable). If startingBlockId * is null, up to numExpectedBlocks blocks are returned from the beginning. * If startingBlockId cannot be found, null is returned. * * @param numExpectedBlocks Number of block ids to return. * 0 <= numExpectedBlocks <= 100 * @param startingBlockId Block id from which to start. If null, start at * beginning. * @return Up to numExpectedBlocks blocks from startingBlockId if it exists * */ @VisibleForTesting long[] getCorruptReplicaBlockIdsForTesting(int numExpectedBlocks, Long startingBlockId) { if (numExpectedBlocks < 0 || numExpectedBlocks > 100) { return null; } Iterator<Block> blockIt = new TreeMap<>(corruptReplicasMap).keySet().iterator(); // if the starting block id was specified, iterate over keys until // we find the matching block. If we find a matching block, break // to leave the iterator on the next block after the specified block. if (startingBlockId != null) { boolean isBlockFound = false; while (blockIt.hasNext()) { Block b = blockIt.next(); if (b.getBlockId() == startingBlockId) { isBlockFound = true; break; } } if (!isBlockFound) { return null; } } ArrayList<Long> corruptReplicaBlockIds = new ArrayList<Long>(); // append up to numExpectedBlocks blockIds to our list for(int i=0; i<numExpectedBlocks && blockIt.hasNext(); i++) { corruptReplicaBlockIds.add(blockIt.next().getBlockId()); } long[] ret = new long[corruptReplicaBlockIds.size()]; for(int i=0; i<ret.length; i++) { ret[i] = corruptReplicaBlockIds.get(i); } return ret; } /** * return the reason about corrupted replica for a given block * on a given dn * @param block block that has corrupted replica * @param node datanode that contains this corrupted replica * @return reason */ String getCorruptReason(Block block, DatanodeDescriptor node) { Reason reason = null; if(corruptReplicasMap.containsKey(block)) { if (corruptReplicasMap.get(block).containsKey(node)) { reason = corruptReplicasMap.get(block).get(node); } } if (reason != null) { return reason.toString(); } else { return null; } } }
8,568
32.603922
80
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/UnresolvedTopologyException.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.blockmanagement; import java.io.IOException; /** * This exception is thrown if resolving topology path * for a node fails. */ public class UnresolvedTopologyException extends IOException { /** for java.io.Serializable */ private static final long serialVersionUID = 1L; public UnresolvedTopologyException(String text) { super(text); } }
1,205
33.457143
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/FSClusterStats.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.blockmanagement; import org.apache.hadoop.classification.InterfaceAudience; /** * This interface is used for retrieving the load related statistics of * the cluster. */ @InterfaceAudience.Private public interface FSClusterStats { /** * an indication of the total load of the cluster. * * @return a count of the total number of block transfers and block * writes that are currently occuring on the cluster. */ public int getTotalLoad(); /** * Indicate whether or not the cluster is now avoiding * to use stale DataNodes for writing. * * @return True if the cluster is currently avoiding using stale DataNodes * for writing targets, and false otherwise. */ public boolean isAvoidingStaleDataNodesForWrite(); /** * Indicates number of datanodes that are in service. * @return Number of datanodes that are both alive and not decommissioned. */ public int getNumDatanodesInService(); /** * an indication of the average load of non-decommission(ing|ed) nodes * eligible for block placement * * @return average of the in service number of block transfers and block * writes that are currently occurring on the cluster. */ public double getInServiceXceiverAverage(); }
2,115
33.688525
76
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.blockmanagement; import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.ContentSummary; /** * This interface is used by the block manager to expose a * few characteristics of a collection of Block/BlockUnderConstruction. */ @InterfaceAudience.Private public interface BlockCollection { /** * Get the last block of the collection. */ public BlockInfo getLastBlock(); /** * Get content summary. */ public ContentSummary computeContentSummary(BlockStoragePolicySuite bsps); /** * @return the number of blocks */ public int numBlocks(); /** * Get the blocks. */ public BlockInfo[] getBlocks(); /** * Get preferred block size for the collection * @return preferred block size in bytes */ public long getPreferredBlockSize(); /** * Get block replication for the collection * @return block replication value */ public short getPreferredBlockReplication(); /** * @return the storage policy ID. */ public byte getStoragePolicyID(); /** * Get the name of the collection. */ public String getName(); /** * Set the block at the given index. */ public void setBlock(int index, BlockInfo blk); /** * Convert the last block of the collection to an under-construction block * and set the locations. */ public BlockInfoContiguousUnderConstruction setLastBlock(BlockInfo lastBlock, DatanodeStorageInfo[] targets) throws IOException; /** * @return whether the block collection is under construction. */ public boolean isUnderConstruction(); }
2,476
26.522222
79
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/UnderReplicatedBlocks.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.blockmanagement; import java.util.ArrayList; import java.util.Iterator; import java.util.List; import org.apache.hadoop.hdfs.util.LightWeightLinkedSet; import org.apache.hadoop.hdfs.server.namenode.NameNode; /** * Keep prioritized queues of under replicated blocks. * Blocks have replication priority, with priority {@link #QUEUE_HIGHEST_PRIORITY} * indicating the highest priority. * </p> * Having a prioritised queue allows the {@link BlockManager} to select * which blocks to replicate first -it tries to give priority to data * that is most at risk or considered most valuable. * * <p/> * The policy for choosing which priority to give added blocks * is implemented in {@link #getPriority(int, int, int)}. * </p> * <p>The queue order is as follows:</p> * <ol> * <li>{@link #QUEUE_HIGHEST_PRIORITY}: the blocks that must be replicated * first. That is blocks with only one copy, or blocks with zero live * copies but a copy in a node being decommissioned. These blocks * are at risk of loss if the disk or server on which they * remain fails.</li> * <li>{@link #QUEUE_VERY_UNDER_REPLICATED}: blocks that are very * under-replicated compared to their expected values. Currently * that means the ratio of the ratio of actual:expected means that * there is <i>less than</i> 1:3.</li>. These blocks may not be at risk, * but they are clearly considered "important". * <li>{@link #QUEUE_UNDER_REPLICATED}: blocks that are also under * replicated, and the ratio of actual:expected is good enough that * they do not need to go into the {@link #QUEUE_VERY_UNDER_REPLICATED} * queue.</li> * <li>{@link #QUEUE_REPLICAS_BADLY_DISTRIBUTED}: there are as least as * many copies of a block as required, but the blocks are not adequately * distributed. Loss of a rack/switch could take all copies off-line.</li> * <li>{@link #QUEUE_WITH_CORRUPT_BLOCKS} This is for blocks that are corrupt * and for which there are no-non-corrupt copies (currently) available. * The policy here is to keep those corrupt blocks replicated, but give * blocks that are not corrupt higher priority.</li> * </ol> */ class UnderReplicatedBlocks implements Iterable<BlockInfo> { /** The total number of queues : {@value} */ static final int LEVEL = 5; /** The queue with the highest priority: {@value} */ static final int QUEUE_HIGHEST_PRIORITY = 0; /** The queue for blocks that are way below their expected value : {@value} */ static final int QUEUE_VERY_UNDER_REPLICATED = 1; /** The queue for "normally" under-replicated blocks: {@value} */ static final int QUEUE_UNDER_REPLICATED = 2; /** The queue for blocks that have the right number of replicas, * but which the block manager felt were badly distributed: {@value} */ static final int QUEUE_REPLICAS_BADLY_DISTRIBUTED = 3; /** The queue for corrupt blocks: {@value} */ static final int QUEUE_WITH_CORRUPT_BLOCKS = 4; /** the queues themselves */ private final List<LightWeightLinkedSet<BlockInfo>> priorityQueues = new ArrayList<>(LEVEL); /** The number of corrupt blocks with replication factor 1 */ private int corruptReplOneBlocks = 0; /** Create an object. */ UnderReplicatedBlocks() { for (int i = 0; i < LEVEL; i++) { priorityQueues.add(new LightWeightLinkedSet<BlockInfo>()); } } /** * Empty the queues. */ synchronized void clear() { for (int i = 0; i < LEVEL; i++) { priorityQueues.get(i).clear(); } corruptReplOneBlocks = 0; } /** Return the total number of under replication blocks */ synchronized int size() { int size = 0; for (int i = 0; i < LEVEL; i++) { size += priorityQueues.get(i).size(); } return size; } /** Return the number of under replication blocks excluding corrupt blocks */ synchronized int getUnderReplicatedBlockCount() { int size = 0; for (int i = 0; i < LEVEL; i++) { if (i != QUEUE_WITH_CORRUPT_BLOCKS) { size += priorityQueues.get(i).size(); } } return size; } /** Return the number of corrupt blocks */ synchronized int getCorruptBlockSize() { return priorityQueues.get(QUEUE_WITH_CORRUPT_BLOCKS).size(); } /** Return the number of corrupt blocks with replication factor 1 */ synchronized int getCorruptReplOneBlockSize() { return corruptReplOneBlocks; } /** Check if a block is in the neededReplication queue */ synchronized boolean contains(BlockInfo block) { for(LightWeightLinkedSet<BlockInfo> set : priorityQueues) { if (set.contains(block)) { return true; } } return false; } /** Return the priority of a block * @param curReplicas current number of replicas of the block * @param expectedReplicas expected number of replicas of the block * @return the priority for the blocks, between 0 and ({@link #LEVEL}-1) */ private int getPriority(int curReplicas, int decommissionedReplicas, int expectedReplicas) { assert curReplicas >= 0 : "Negative replicas!"; if (curReplicas >= expectedReplicas) { // Block has enough copies, but not enough racks return QUEUE_REPLICAS_BADLY_DISTRIBUTED; } else if (curReplicas == 0) { // If there are zero non-decommissioned replicas but there are // some decommissioned replicas, then assign them highest priority if (decommissionedReplicas > 0) { return QUEUE_HIGHEST_PRIORITY; } //all we have are corrupt blocks return QUEUE_WITH_CORRUPT_BLOCKS; } else if (curReplicas == 1) { //only on replica -risk of loss // highest priority return QUEUE_HIGHEST_PRIORITY; } else if ((curReplicas * 3) < expectedReplicas) { //there is less than a third as many blocks as requested; //this is considered very under-replicated return QUEUE_VERY_UNDER_REPLICATED; } else { //add to the normal queue for under replicated blocks return QUEUE_UNDER_REPLICATED; } } /** add a block to a under replication queue according to its priority * @param block a under replication block * @param curReplicas current number of replicas of the block * @param decomissionedReplicas the number of decommissioned replicas * @param expectedReplicas expected number of replicas of the block * @return true if the block was added to a queue. */ synchronized boolean add(BlockInfo block, int curReplicas, int decomissionedReplicas, int expectedReplicas) { assert curReplicas >= 0 : "Negative replicas!"; int priLevel = getPriority(curReplicas, decomissionedReplicas, expectedReplicas); if(priorityQueues.get(priLevel).add(block)) { if (priLevel == QUEUE_WITH_CORRUPT_BLOCKS && expectedReplicas == 1) { corruptReplOneBlocks++; } NameNode.blockStateChangeLog.debug( "BLOCK* NameSystem.UnderReplicationBlock.add: {}" + " has only {} replicas and need {} replicas so is added to" + " neededReplications at priority level {}", block, curReplicas, expectedReplicas, priLevel); return true; } return false; } /** remove a block from a under replication queue */ synchronized boolean remove(BlockInfo block, int oldReplicas, int decommissionedReplicas, int oldExpectedReplicas) { int priLevel = getPriority(oldReplicas, decommissionedReplicas, oldExpectedReplicas); boolean removedBlock = remove(block, priLevel); if (priLevel == QUEUE_WITH_CORRUPT_BLOCKS && oldExpectedReplicas == 1 && removedBlock) { corruptReplOneBlocks--; assert corruptReplOneBlocks >= 0 : "Number of corrupt blocks with replication factor 1 " + "should be non-negative"; } return removedBlock; } /** * Remove a block from the under replication queues. * * The priLevel parameter is a hint of which queue to query * first: if negative or &gt;= {@link #LEVEL} this shortcutting * is not attmpted. * * If the block is not found in the nominated queue, an attempt is made to * remove it from all queues. * * <i>Warning:</i> This is not a synchronized method. * @param block block to remove * @param priLevel expected privilege level * @return true if the block was found and removed from one of the priority queues */ boolean remove(BlockInfo block, int priLevel) { if(priLevel >= 0 && priLevel < LEVEL && priorityQueues.get(priLevel).remove(block)) { NameNode.blockStateChangeLog.debug( "BLOCK* NameSystem.UnderReplicationBlock.remove: Removing block {}" + " from priority queue {}", block, priLevel); return true; } else { // Try to remove the block from all queues if the block was // not found in the queue for the given priority level. for (int i = 0; i < LEVEL; i++) { if (priorityQueues.get(i).remove(block)) { NameNode.blockStateChangeLog.debug( "BLOCK* NameSystem.UnderReplicationBlock.remove: Removing block" + " {} from priority queue {}", block, priLevel); return true; } } } return false; } /** * Recalculate and potentially update the priority level of a block. * * If the block priority has changed from before an attempt is made to * remove it from the block queue. Regardless of whether or not the block * is in the block queue of (recalculate) priority, an attempt is made * to add it to that queue. This ensures that the block will be * in its expected priority queue (and only that queue) by the end of the * method call. * @param block a under replicated block * @param curReplicas current number of replicas of the block * @param decommissionedReplicas the number of decommissioned replicas * @param curExpectedReplicas expected number of replicas of the block * @param curReplicasDelta the change in the replicate count from before * @param expectedReplicasDelta the change in the expected replica count from before */ synchronized void update(BlockInfo block, int curReplicas, int decommissionedReplicas, int curExpectedReplicas, int curReplicasDelta, int expectedReplicasDelta) { int oldReplicas = curReplicas-curReplicasDelta; int oldExpectedReplicas = curExpectedReplicas-expectedReplicasDelta; int curPri = getPriority(curReplicas, decommissionedReplicas, curExpectedReplicas); int oldPri = getPriority(oldReplicas, decommissionedReplicas, oldExpectedReplicas); if(NameNode.stateChangeLog.isDebugEnabled()) { NameNode.stateChangeLog.debug("UnderReplicationBlocks.update " + block + " curReplicas " + curReplicas + " curExpectedReplicas " + curExpectedReplicas + " oldReplicas " + oldReplicas + " oldExpectedReplicas " + oldExpectedReplicas + " curPri " + curPri + " oldPri " + oldPri); } if(oldPri != curPri) { remove(block, oldPri); } if(priorityQueues.get(curPri).add(block)) { NameNode.blockStateChangeLog.debug( "BLOCK* NameSystem.UnderReplicationBlock.update: {} has only {} " + "replicas and needs {} replicas so is added to " + "neededReplications at priority level {}", block, curReplicas, curExpectedReplicas, curPri); } if (oldPri != curPri || expectedReplicasDelta != 0) { // corruptReplOneBlocks could possibly change if (curPri == QUEUE_WITH_CORRUPT_BLOCKS && curExpectedReplicas == 1) { // add a new corrupt block with replication factor 1 corruptReplOneBlocks++; } else if (oldPri == QUEUE_WITH_CORRUPT_BLOCKS && curExpectedReplicas - expectedReplicasDelta == 1) { // remove an existing corrupt block with replication factor 1 corruptReplOneBlocks--; } } } /** * Get a list of block lists to be replicated. The index of block lists * represents its replication priority. Iterates each block list in priority * order beginning with the highest priority list. Iterators use a bookmark to * resume where the previous iteration stopped. Returns when the block count * is met or iteration reaches the end of the lowest priority list, in which * case bookmarks for each block list are reset to the heads of their * respective lists. * * @param blocksToProcess - number of blocks to fetch from underReplicated * blocks. * @return Return a list of block lists to be replicated. The block list index * represents its replication priority. */ public synchronized List<List<BlockInfo>> chooseUnderReplicatedBlocks( int blocksToProcess) { // initialize data structure for the return value List<List<BlockInfo>> blocksToReplicate = new ArrayList<>(LEVEL); for (int i = 0; i < LEVEL; i++) { blocksToReplicate.add(new ArrayList<BlockInfo>()); } if (size() == 0) { // There are no blocks to collect. return blocksToReplicate; } int blockCount = 0; for (int priority = 0; priority < LEVEL; priority++) { // Go through all blocks that need replications with current priority. BlockIterator neededReplicationsIterator = iterator(priority); // Set the iterator to the first unprocessed block at this priority level. neededReplicationsIterator.setToBookmark(); blocksToProcess = Math.min(blocksToProcess, size()); if (blockCount == blocksToProcess) { break; // break if already expected blocks are obtained } // Loop through all remaining blocks in the list. while (blockCount < blocksToProcess && neededReplicationsIterator.hasNext()) { BlockInfo block = neededReplicationsIterator.next(); blocksToReplicate.get(priority).add(block); blockCount++; } if (!neededReplicationsIterator.hasNext() && neededReplicationsIterator.getPriority() == LEVEL - 1) { // Reset all priorities' bookmarks to the beginning because there were // no recently added blocks in any list. for (int i = 0; i < LEVEL; i++) { this.priorityQueues.get(i).resetBookmark(); } break; } } return blocksToReplicate; } /** returns an iterator of all blocks in a given priority queue */ synchronized BlockIterator iterator(int level) { return new BlockIterator(level); } /** return an iterator of all the under replication blocks */ @Override public synchronized BlockIterator iterator() { return new BlockIterator(); } /** * An iterator over blocks. */ class BlockIterator implements Iterator<BlockInfo> { private int level; private boolean isIteratorForLevel = false; private final List<Iterator<BlockInfo>> iterators = new ArrayList<>(); /** * Construct an iterator over all queues. */ private BlockIterator() { level=0; for(int i=0; i<LEVEL; i++) { iterators.add(priorityQueues.get(i).iterator()); } } /** * Constrict an iterator for a single queue level * @param l the priority level to iterate over */ private BlockIterator(int l) { level = l; isIteratorForLevel = true; iterators.add(priorityQueues.get(level).iterator()); } private void update() { if (isIteratorForLevel) { return; } while(level< LEVEL-1 && !iterators.get(level).hasNext()) { level++; } } @Override public BlockInfo next() { if (isIteratorForLevel) { return iterators.get(0).next(); } update(); return iterators.get(level).next(); } @Override public boolean hasNext() { if (isIteratorForLevel) { return iterators.get(0).hasNext(); } update(); return iterators.get(level).hasNext(); } @Override public void remove() { if (isIteratorForLevel) { iterators.get(0).remove(); } else { iterators.get(level).remove(); } } int getPriority() { return level; } /** * Sets iterator(s) to bookmarked elements. */ private synchronized void setToBookmark() { if (this.isIteratorForLevel) { this.iterators.set(0, priorityQueues.get(this.level) .getBookmark()); } else { for (int i = 0; i < LEVEL; i++) { this.iterators.set(i, priorityQueues.get(i).getBookmark()); } } } } }
17,847
36.26096
86
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguousUnderConstruction.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.blockmanagement; import java.io.IOException; import java.util.ArrayList; import java.util.Iterator; import java.util.List; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.namenode.NameNode; /** * Represents a block that is currently being constructed.<br> * This is usually the last block of a file opened for write or append. */ public class BlockInfoContiguousUnderConstruction extends BlockInfoContiguous { /** Block state. See {@link BlockUCState} */ private BlockUCState blockUCState; /** * Block replicas as assigned when the block was allocated. * This defines the pipeline order. */ private List<ReplicaUnderConstruction> replicas; /** * Index of the primary data node doing the recovery. Useful for log * messages. */ private int primaryNodeIndex = -1; /** * The new generation stamp, which this block will have * after the recovery succeeds. Also used as a recovery id to identify * the right recovery if any of the abandoned recoveries re-appear. */ private long blockRecoveryId = 0; /** * The block source to use in the event of copy-on-write truncate. */ private Block truncateBlock; /** * ReplicaUnderConstruction contains information about replicas while * they are under construction. * The GS, the length and the state of the replica is as reported by * the data-node. * It is not guaranteed, but expected, that data-nodes actually have * corresponding replicas. */ static class ReplicaUnderConstruction extends Block { private final DatanodeStorageInfo expectedLocation; private ReplicaState state; private boolean chosenAsPrimary; ReplicaUnderConstruction(Block block, DatanodeStorageInfo target, ReplicaState state) { super(block); this.expectedLocation = target; this.state = state; this.chosenAsPrimary = false; } /** * Expected block replica location as assigned when the block was allocated. * This defines the pipeline order. * It is not guaranteed, but expected, that the data-node actually has * the replica. */ private DatanodeStorageInfo getExpectedStorageLocation() { return expectedLocation; } /** * Get replica state as reported by the data-node. */ ReplicaState getState() { return state; } /** * Whether the replica was chosen for recovery. */ boolean getChosenAsPrimary() { return chosenAsPrimary; } /** * Set replica state. */ void setState(ReplicaState s) { state = s; } /** * Set whether this replica was chosen for recovery. */ void setChosenAsPrimary(boolean chosenAsPrimary) { this.chosenAsPrimary = chosenAsPrimary; } /** * Is data-node the replica belongs to alive. */ boolean isAlive() { return expectedLocation.getDatanodeDescriptor().isAlive; } @Override // Block public int hashCode() { return super.hashCode(); } @Override // Block public boolean equals(Object obj) { // Sufficient to rely on super's implementation return (this == obj) || super.equals(obj); } @Override public String toString() { final StringBuilder b = new StringBuilder(50); appendStringTo(b); return b.toString(); } @Override public void appendStringTo(StringBuilder sb) { sb.append("ReplicaUC[") .append(expectedLocation) .append("|") .append(state) .append("]"); } } /** * Create block and set its state to * {@link BlockUCState#UNDER_CONSTRUCTION}. */ public BlockInfoContiguousUnderConstruction(Block blk, short replication) { this(blk, replication, BlockUCState.UNDER_CONSTRUCTION, null); } /** * Create a block that is currently being constructed. */ public BlockInfoContiguousUnderConstruction(Block blk, short replication, BlockUCState state, DatanodeStorageInfo[] targets) { super(blk, replication); assert getBlockUCState() != BlockUCState.COMPLETE : "BlockInfoUnderConstruction cannot be in COMPLETE state"; this.blockUCState = state; setExpectedLocations(targets); } /** * Convert an under construction block to a complete block. * * @return BlockInfo - a complete block. * @throws IOException if the state of the block * (the generation stamp and the length) has not been committed by * the client or it does not have at least a minimal number of replicas * reported from data-nodes. */ BlockInfo convertToCompleteBlock() throws IOException { assert getBlockUCState() != BlockUCState.COMPLETE : "Trying to convert a COMPLETE block"; return new BlockInfoContiguous(this); } /** Set expected locations */ public void setExpectedLocations(DatanodeStorageInfo[] targets) { int numLocations = targets == null ? 0 : targets.length; this.replicas = new ArrayList<ReplicaUnderConstruction>(numLocations); for(int i = 0; i < numLocations; i++) replicas.add( new ReplicaUnderConstruction(this, targets[i], ReplicaState.RBW)); } /** * Create array of expected replica locations * (as has been assigned by chooseTargets()). */ public DatanodeStorageInfo[] getExpectedStorageLocations() { int numLocations = replicas == null ? 0 : replicas.size(); DatanodeStorageInfo[] storages = new DatanodeStorageInfo[numLocations]; for(int i = 0; i < numLocations; i++) storages[i] = replicas.get(i).getExpectedStorageLocation(); return storages; } /** Get the number of expected locations */ public int getNumExpectedLocations() { return replicas == null ? 0 : replicas.size(); } /** * Return the state of the block under construction. * @see BlockUCState */ @Override // BlockInfo public BlockUCState getBlockUCState() { return blockUCState; } void setBlockUCState(BlockUCState s) { blockUCState = s; } /** Get block recovery ID */ public long getBlockRecoveryId() { return blockRecoveryId; } /** Get recover block */ public Block getTruncateBlock() { return truncateBlock; } public void setTruncateBlock(Block recoveryBlock) { this.truncateBlock = recoveryBlock; } /** * Process the recorded replicas. When about to commit or finish the * pipeline recovery sort out bad replicas. * @param genStamp The final generation stamp for the block. */ public void setGenerationStampAndVerifyReplicas(long genStamp) { // Set the generation stamp for the block. setGenerationStamp(genStamp); if (replicas == null) return; // Remove the replicas with wrong gen stamp. // The replica list is unchanged. for (ReplicaUnderConstruction r : replicas) { if (genStamp != r.getGenerationStamp()) { r.getExpectedStorageLocation().removeBlock(this); NameNode.blockStateChangeLog.debug("BLOCK* Removing stale replica " + "from location: {}", r.getExpectedStorageLocation()); } } } /** * Commit block's length and generation stamp as reported by the client. * Set block state to {@link BlockUCState#COMMITTED}. * @param block - contains client reported block length and generation * @throws IOException if block ids are inconsistent. */ void commitBlock(Block block) throws IOException { if(getBlockId() != block.getBlockId()) throw new IOException("Trying to commit inconsistent block: id = " + block.getBlockId() + ", expected id = " + getBlockId()); blockUCState = BlockUCState.COMMITTED; this.set(getBlockId(), block.getNumBytes(), block.getGenerationStamp()); // Sort out invalid replicas. setGenerationStampAndVerifyReplicas(block.getGenerationStamp()); } /** * Initialize lease recovery for this block. * Find the first alive data-node starting from the previous primary and * make it primary. */ public void initializeBlockRecovery(long recoveryId) { setBlockUCState(BlockUCState.UNDER_RECOVERY); blockRecoveryId = recoveryId; if (replicas.size() == 0) { NameNode.blockStateChangeLog.warn("BLOCK*" + " BlockInfoUnderConstruction.initLeaseRecovery:" + " No blocks found, lease removed."); } boolean allLiveReplicasTriedAsPrimary = true; for (int i = 0; i < replicas.size(); i++) { // Check if all replicas have been tried or not. if (replicas.get(i).isAlive()) { allLiveReplicasTriedAsPrimary = (allLiveReplicasTriedAsPrimary && replicas.get(i).getChosenAsPrimary()); } } if (allLiveReplicasTriedAsPrimary) { // Just set all the replicas to be chosen whether they are alive or not. for (int i = 0; i < replicas.size(); i++) { replicas.get(i).setChosenAsPrimary(false); } } long mostRecentLastUpdate = 0; ReplicaUnderConstruction primary = null; primaryNodeIndex = -1; for(int i = 0; i < replicas.size(); i++) { // Skip alive replicas which have been chosen for recovery. if (!(replicas.get(i).isAlive() && !replicas.get(i).getChosenAsPrimary())) { continue; } final ReplicaUnderConstruction ruc = replicas.get(i); final long lastUpdate = ruc.getExpectedStorageLocation() .getDatanodeDescriptor().getLastUpdateMonotonic(); if (lastUpdate > mostRecentLastUpdate) { primaryNodeIndex = i; primary = ruc; mostRecentLastUpdate = lastUpdate; } } if (primary != null) { primary.getExpectedStorageLocation().getDatanodeDescriptor().addBlockToBeRecovered(this); primary.setChosenAsPrimary(true); NameNode.blockStateChangeLog.debug( "BLOCK* {} recovery started, primary={}", this, primary); } } void addReplicaIfNotPresent(DatanodeStorageInfo storage, Block block, ReplicaState rState) { Iterator<ReplicaUnderConstruction> it = replicas.iterator(); while (it.hasNext()) { ReplicaUnderConstruction r = it.next(); DatanodeStorageInfo expectedLocation = r.getExpectedStorageLocation(); if(expectedLocation == storage) { // Record the gen stamp from the report r.setGenerationStamp(block.getGenerationStamp()); return; } else if (expectedLocation != null && expectedLocation.getDatanodeDescriptor() == storage.getDatanodeDescriptor()) { // The Datanode reported that the block is on a different storage // than the one chosen by BlockPlacementPolicy. This can occur as // we allow Datanodes to choose the target storage. Update our // state by removing the stale entry and adding a new one. it.remove(); break; } } replicas.add(new ReplicaUnderConstruction(block, storage, rState)); } @Override // BlockInfo // BlockInfoUnderConstruction participates in maps the same way as BlockInfo public int hashCode() { return super.hashCode(); } @Override // BlockInfo public boolean equals(Object obj) { // Sufficient to rely on super's implementation return (this == obj) || super.equals(obj); } @Override public String toString() { final StringBuilder b = new StringBuilder(100); appendStringTo(b); return b.toString(); } @Override public void appendStringTo(StringBuilder sb) { super.appendStringTo(sb); appendUCParts(sb); } private void appendUCParts(StringBuilder sb) { sb.append("{UCState=").append(blockUCState) .append(", truncateBlock=" + truncateBlock) .append(", primaryNodeIndex=").append(primaryNodeIndex) .append(", replicas=["); if (replicas != null) { Iterator<ReplicaUnderConstruction> iter = replicas.iterator(); if (iter.hasNext()) { iter.next().appendStringTo(sb); while (iter.hasNext()) { sb.append(", "); iter.next().appendStringTo(sb); } } } sb.append("]}"); } }
13,165
31.589109
95
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerFaultInjector.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.blockmanagement; import java.io.IOException; import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.server.protocol.BlockReportContext; /** * Used to inject certain faults for testing. */ public class BlockManagerFaultInjector { @VisibleForTesting public static BlockManagerFaultInjector instance = new BlockManagerFaultInjector(); @VisibleForTesting public static BlockManagerFaultInjector getInstance() { return instance; } @VisibleForTesting public void incomingBlockReportRpc(DatanodeID nodeID, BlockReportContext context) throws IOException { } @VisibleForTesting public void requestBlockReportLease(DatanodeDescriptor node, long leaseId) { } @VisibleForTesting public void removeBlockReportLease(DatanodeDescriptor node, long leaseId) { } }
1,732
31.698113
78
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.blockmanagement; import static org.apache.hadoop.util.ExitUtil.terminate; import java.io.Closeable; import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.Date; import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.Random; import java.util.TreeMap; import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.Condition; import java.util.concurrent.locks.ReentrantLock; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.CacheDirective; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList.Type; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.server.namenode.CacheManager; import org.apache.hadoop.hdfs.server.namenode.CachePool; import org.apache.hadoop.hdfs.server.namenode.CachedBlock; import org.apache.hadoop.hdfs.server.namenode.FSDirectory; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.INode; import org.apache.hadoop.hdfs.server.namenode.INodeDirectory; import org.apache.hadoop.hdfs.server.namenode.INodeFile; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.hdfs.util.ReadOnlyList; import org.apache.hadoop.util.GSet; import org.apache.hadoop.util.Time; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.base.Preconditions; ; /** * Scans the namesystem, scheduling blocks to be cached as appropriate. * * The CacheReplicationMonitor does a full scan when the NameNode first * starts up, and at configurable intervals afterwards. */ @InterfaceAudience.LimitedPrivate({"HDFS"}) public class CacheReplicationMonitor extends Thread implements Closeable { private static final Logger LOG = LoggerFactory.getLogger(CacheReplicationMonitor.class); private final FSNamesystem namesystem; private final BlockManager blockManager; private final CacheManager cacheManager; private final GSet<CachedBlock, CachedBlock> cachedBlocks; /** * Pseudorandom number source */ private static final Random random = new Random(); /** * The interval at which we scan the namesystem for caching changes. */ private final long intervalMs; /** * The CacheReplicationMonitor (CRM) lock. Used to synchronize starting and * waiting for rescan operations. */ private final ReentrantLock lock; /** * Notifies the scan thread that an immediate rescan is needed. */ private final Condition doRescan; /** * Notifies waiting threads that a rescan has finished. */ private final Condition scanFinished; /** * The number of rescans completed. Used to wait for scans to finish. * Protected by the CacheReplicationMonitor lock. */ private long completedScanCount = 0; /** * The scan we're currently performing, or -1 if no scan is in progress. * Protected by the CacheReplicationMonitor lock. */ private long curScanCount = -1; /** * The number of rescans we need to complete. Protected by the CRM lock. */ private long neededScanCount = 0; /** * True if this monitor should terminate. Protected by the CRM lock. */ private boolean shutdown = false; /** * Mark status of the current scan. */ private boolean mark = false; /** * Cache directives found in the previous scan. */ private int scannedDirectives; /** * Blocks found in the previous scan. */ private long scannedBlocks; public CacheReplicationMonitor(FSNamesystem namesystem, CacheManager cacheManager, long intervalMs, ReentrantLock lock) { this.namesystem = namesystem; this.blockManager = namesystem.getBlockManager(); this.cacheManager = cacheManager; this.cachedBlocks = cacheManager.getCachedBlocks(); this.intervalMs = intervalMs; this.lock = lock; this.doRescan = this.lock.newCondition(); this.scanFinished = this.lock.newCondition(); } @Override public void run() { long startTimeMs = 0; Thread.currentThread().setName("CacheReplicationMonitor(" + System.identityHashCode(this) + ")"); LOG.info("Starting CacheReplicationMonitor with interval " + intervalMs + " milliseconds"); try { long curTimeMs = Time.monotonicNow(); while (true) { lock.lock(); try { while (true) { if (shutdown) { LOG.debug("Shutting down CacheReplicationMonitor"); return; } if (completedScanCount < neededScanCount) { LOG.debug("Rescanning because of pending operations"); break; } long delta = (startTimeMs + intervalMs) - curTimeMs; if (delta <= 0) { LOG.debug("Rescanning after {} milliseconds", (curTimeMs - startTimeMs)); break; } doRescan.await(delta, TimeUnit.MILLISECONDS); curTimeMs = Time.monotonicNow(); } } finally { lock.unlock(); } startTimeMs = curTimeMs; mark = !mark; rescan(); curTimeMs = Time.monotonicNow(); // Update synchronization-related variables. lock.lock(); try { completedScanCount = curScanCount; curScanCount = -1; scanFinished.signalAll(); } finally { lock.unlock(); } LOG.debug("Scanned {} directive(s) and {} block(s) in {} millisecond(s).", scannedDirectives, scannedBlocks, (curTimeMs - startTimeMs)); } } catch (InterruptedException e) { LOG.info("Shutting down CacheReplicationMonitor."); return; } catch (Throwable t) { LOG.error("Thread exiting", t); terminate(1, t); } } /** * Waits for a rescan to complete. This doesn't guarantee consistency with * pending operations, only relative recency, since it will not force a new * rescan if a rescan is already underway. * <p> * Note that this call will release the FSN lock, so operations before and * after are not atomic. */ public void waitForRescanIfNeeded() { Preconditions.checkArgument(!namesystem.hasWriteLock(), "Must not hold the FSN write lock when waiting for a rescan."); Preconditions.checkArgument(lock.isHeldByCurrentThread(), "Must hold the CRM lock when waiting for a rescan."); if (neededScanCount <= completedScanCount) { return; } // If no scan is already ongoing, mark the CRM as dirty and kick if (curScanCount < 0) { doRescan.signal(); } // Wait until the scan finishes and the count advances while ((!shutdown) && (completedScanCount < neededScanCount)) { try { scanFinished.await(); } catch (InterruptedException e) { LOG.warn("Interrupted while waiting for CacheReplicationMonitor" + " rescan", e); break; } } } /** * Indicates to the CacheReplicationMonitor that there have been CacheManager * changes that require a rescan. */ public void setNeedsRescan() { Preconditions.checkArgument(lock.isHeldByCurrentThread(), "Must hold the CRM lock when setting the needsRescan bit."); if (curScanCount >= 0) { // If there is a scan in progress, we need to wait for the scan after // that. neededScanCount = curScanCount + 1; } else { // If there is no scan in progress, we need to wait for the next scan. neededScanCount = completedScanCount + 1; } } /** * Shut down the monitor thread. */ @Override public void close() throws IOException { Preconditions.checkArgument(namesystem.hasWriteLock()); lock.lock(); try { if (shutdown) return; // Since we hold both the FSN write lock and the CRM lock here, // we know that the CRM thread cannot be currently modifying // the cache manager state while we're closing it. // Since the CRM thread checks the value of 'shutdown' after waiting // for a lock, we know that the thread will not modify the cache // manager state after this point. shutdown = true; doRescan.signalAll(); scanFinished.signalAll(); } finally { lock.unlock(); } } private void rescan() throws InterruptedException { scannedDirectives = 0; scannedBlocks = 0; try { namesystem.writeLock(); try { lock.lock(); if (shutdown) { throw new InterruptedException("CacheReplicationMonitor was " + "shut down."); } curScanCount = completedScanCount + 1; } finally { lock.unlock(); } resetStatistics(); rescanCacheDirectives(); rescanCachedBlockMap(); blockManager.getDatanodeManager().resetLastCachingDirectiveSentTime(); } finally { namesystem.writeUnlock(); } } private void resetStatistics() { for (CachePool pool: cacheManager.getCachePools()) { pool.resetStatistics(); } for (CacheDirective directive: cacheManager.getCacheDirectives()) { directive.resetStatistics(); } } /** * Scan all CacheDirectives. Use the information to figure out * what cache replication factor each block should have. */ private void rescanCacheDirectives() { FSDirectory fsDir = namesystem.getFSDirectory(); final long now = new Date().getTime(); for (CacheDirective directive : cacheManager.getCacheDirectives()) { scannedDirectives++; // Skip processing this entry if it has expired if (directive.getExpiryTime() > 0 && directive.getExpiryTime() <= now) { LOG.debug("Directive {}: the directive expired at {} (now = {})", directive.getId(), directive.getExpiryTime(), now); continue; } String path = directive.getPath(); INode node; try { node = fsDir.getINode(path); } catch (UnresolvedLinkException e) { // We don't cache through symlinks LOG.debug("Directive {}: got UnresolvedLinkException while resolving " + "path {}", directive.getId(), path ); continue; } if (node == null) { LOG.debug("Directive {}: No inode found at {}", directive.getId(), path); } else if (node.isDirectory()) { INodeDirectory dir = node.asDirectory(); ReadOnlyList<INode> children = dir .getChildrenList(Snapshot.CURRENT_STATE_ID); for (INode child : children) { if (child.isFile()) { rescanFile(directive, child.asFile()); } } } else if (node.isFile()) { rescanFile(directive, node.asFile()); } else { LOG.debug("Directive {}: ignoring non-directive, non-file inode {} ", directive.getId(), node); } } } /** * Apply a CacheDirective to a file. * * @param directive The CacheDirective to apply. * @param file The file. */ private void rescanFile(CacheDirective directive, INodeFile file) { BlockInfo[] blockInfos = file.getBlocks(); // Increment the "needed" statistics directive.addFilesNeeded(1); // We don't cache UC blocks, don't add them to the total here long neededTotal = file.computeFileSizeNotIncludingLastUcBlock() * directive.getReplication(); directive.addBytesNeeded(neededTotal); // The pool's bytesNeeded is incremented as we scan. If the demand // thus far plus the demand of this file would exceed the pool's limit, // do not cache this file. CachePool pool = directive.getPool(); if (pool.getBytesNeeded() > pool.getLimit()) { LOG.debug("Directive {}: not scanning file {} because " + "bytesNeeded for pool {} is {}, but the pool's limit is {}", directive.getId(), file.getFullPathName(), pool.getPoolName(), pool.getBytesNeeded(), pool.getLimit()); return; } long cachedTotal = 0; for (BlockInfo blockInfo : blockInfos) { if (!blockInfo.getBlockUCState().equals(BlockUCState.COMPLETE)) { // We don't try to cache blocks that are under construction. LOG.trace("Directive {}: can't cache block {} because it is in state " + "{}, not COMPLETE.", directive.getId(), blockInfo, blockInfo.getBlockUCState() ); continue; } Block block = new Block(blockInfo.getBlockId()); CachedBlock ncblock = new CachedBlock(block.getBlockId(), directive.getReplication(), mark); CachedBlock ocblock = cachedBlocks.get(ncblock); if (ocblock == null) { cachedBlocks.put(ncblock); ocblock = ncblock; } else { // Update bytesUsed using the current replication levels. // Assumptions: we assume that all the blocks are the same length // on each datanode. We can assume this because we're only caching // blocks in state COMPLETE. // Note that if two directives are caching the same block(s), they will // both get them added to their bytesCached. List<DatanodeDescriptor> cachedOn = ocblock.getDatanodes(Type.CACHED); long cachedByBlock = Math.min(cachedOn.size(), directive.getReplication()) * blockInfo.getNumBytes(); cachedTotal += cachedByBlock; if ((mark != ocblock.getMark()) || (ocblock.getReplication() < directive.getReplication())) { // // Overwrite the block's replication and mark in two cases: // // 1. If the mark on the CachedBlock is different from the mark for // this scan, that means the block hasn't been updated during this // scan, and we should overwrite whatever is there, since it is no // longer valid. // // 2. If the replication in the CachedBlock is less than what the // directive asks for, we want to increase the block's replication // field to what the directive asks for. // ocblock.setReplicationAndMark(directive.getReplication(), mark); } } LOG.trace("Directive {}: setting replication for block {} to {}", directive.getId(), blockInfo, ocblock.getReplication()); } // Increment the "cached" statistics directive.addBytesCached(cachedTotal); if (cachedTotal == neededTotal) { directive.addFilesCached(1); } LOG.debug("Directive {}: caching {}: {}/{} bytes", directive.getId(), file.getFullPathName(), cachedTotal, neededTotal); } private String findReasonForNotCaching(CachedBlock cblock, BlockInfo blockInfo) { if (blockInfo == null) { // Somehow, a cache report with the block arrived, but the block // reports from the DataNode haven't (yet?) described such a block. // Alternately, the NameNode might have invalidated the block, but the // DataNode hasn't caught up. In any case, we want to tell the DN // to uncache this. return "not tracked by the BlockManager"; } else if (!blockInfo.isComplete()) { // When a cached block changes state from complete to some other state // on the DataNode (perhaps because of append), it will begin the // uncaching process. However, the uncaching process is not // instantaneous, especially if clients have pinned the block. So // there may be a period of time when incomplete blocks remain cached // on the DataNodes. return "not complete"; } else if (cblock.getReplication() == 0) { // Since 0 is not a valid value for a cache directive's replication // field, seeing a replication of 0 on a CacheBlock means that it // has never been reached by any sweep. return "not needed by any directives"; } else if (cblock.getMark() != mark) { // Although the block was needed in the past, we didn't reach it during // the current sweep. Therefore, it doesn't need to be cached any more. // Need to set the replication to 0 so it doesn't flip back to cached // when the mark flips on the next scan cblock.setReplicationAndMark((short)0, mark); return "no longer needed by any directives"; } return null; } /** * Scan through the cached block map. * Any blocks which are under-replicated should be assigned new Datanodes. * Blocks that are over-replicated should be removed from Datanodes. */ private void rescanCachedBlockMap() { for (Iterator<CachedBlock> cbIter = cachedBlocks.iterator(); cbIter.hasNext(); ) { scannedBlocks++; CachedBlock cblock = cbIter.next(); List<DatanodeDescriptor> pendingCached = cblock.getDatanodes(Type.PENDING_CACHED); List<DatanodeDescriptor> cached = cblock.getDatanodes(Type.CACHED); List<DatanodeDescriptor> pendingUncached = cblock.getDatanodes(Type.PENDING_UNCACHED); // Remove nodes from PENDING_UNCACHED if they were actually uncached. for (Iterator<DatanodeDescriptor> iter = pendingUncached.iterator(); iter.hasNext(); ) { DatanodeDescriptor datanode = iter.next(); if (!cblock.isInList(datanode.getCached())) { LOG.trace("Block {}: removing from PENDING_UNCACHED for node {} " + "because the DataNode uncached it.", cblock.getBlockId(), datanode.getDatanodeUuid()); datanode.getPendingUncached().remove(cblock); iter.remove(); } } BlockInfo blockInfo = blockManager. getStoredBlock(new Block(cblock.getBlockId())); String reason = findReasonForNotCaching(cblock, blockInfo); int neededCached = 0; if (reason != null) { LOG.trace("Block {}: can't cache block because it is {}", cblock.getBlockId(), reason); } else { neededCached = cblock.getReplication(); } int numCached = cached.size(); if (numCached >= neededCached) { // If we have enough replicas, drop all pending cached. for (Iterator<DatanodeDescriptor> iter = pendingCached.iterator(); iter.hasNext(); ) { DatanodeDescriptor datanode = iter.next(); datanode.getPendingCached().remove(cblock); iter.remove(); LOG.trace("Block {}: removing from PENDING_CACHED for node {}" + "because we already have {} cached replicas and we only" + " need {}", cblock.getBlockId(), datanode.getDatanodeUuid(), numCached, neededCached ); } } if (numCached < neededCached) { // If we don't have enough replicas, drop all pending uncached. for (Iterator<DatanodeDescriptor> iter = pendingUncached.iterator(); iter.hasNext(); ) { DatanodeDescriptor datanode = iter.next(); datanode.getPendingUncached().remove(cblock); iter.remove(); LOG.trace("Block {}: removing from PENDING_UNCACHED for node {} " + "because we only have {} cached replicas and we need " + "{}", cblock.getBlockId(), datanode.getDatanodeUuid(), numCached, neededCached ); } } int neededUncached = numCached - (pendingUncached.size() + neededCached); if (neededUncached > 0) { addNewPendingUncached(neededUncached, cblock, cached, pendingUncached); } else { int additionalCachedNeeded = neededCached - (numCached + pendingCached.size()); if (additionalCachedNeeded > 0) { addNewPendingCached(additionalCachedNeeded, cblock, cached, pendingCached); } } if ((neededCached == 0) && pendingUncached.isEmpty() && pendingCached.isEmpty()) { // we have nothing more to do with this block. LOG.trace("Block {}: removing from cachedBlocks, since neededCached " + "== 0, and pendingUncached and pendingCached are empty.", cblock.getBlockId() ); cbIter.remove(); } } } /** * Add new entries to the PendingUncached list. * * @param neededUncached The number of replicas that need to be uncached. * @param cachedBlock The block which needs to be uncached. * @param cached A list of DataNodes currently caching the block. * @param pendingUncached A list of DataNodes that will soon uncache the * block. */ private void addNewPendingUncached(int neededUncached, CachedBlock cachedBlock, List<DatanodeDescriptor> cached, List<DatanodeDescriptor> pendingUncached) { // Figure out which replicas can be uncached. LinkedList<DatanodeDescriptor> possibilities = new LinkedList<DatanodeDescriptor>(); for (DatanodeDescriptor datanode : cached) { if (!pendingUncached.contains(datanode)) { possibilities.add(datanode); } } while (neededUncached > 0) { if (possibilities.isEmpty()) { LOG.warn("Logic error: we're trying to uncache more replicas than " + "actually exist for " + cachedBlock); return; } DatanodeDescriptor datanode = possibilities.remove(random.nextInt(possibilities.size())); pendingUncached.add(datanode); boolean added = datanode.getPendingUncached().add(cachedBlock); assert added; neededUncached--; } } /** * Add new entries to the PendingCached list. * * @param neededCached The number of replicas that need to be cached. * @param cachedBlock The block which needs to be cached. * @param cached A list of DataNodes currently caching the block. * @param pendingCached A list of DataNodes that will soon cache the * block. */ private void addNewPendingCached(final int neededCached, CachedBlock cachedBlock, List<DatanodeDescriptor> cached, List<DatanodeDescriptor> pendingCached) { // To figure out which replicas can be cached, we consult the // blocksMap. We don't want to try to cache a corrupt replica, though. BlockInfo blockInfo = blockManager. getStoredBlock(new Block(cachedBlock.getBlockId())); if (blockInfo == null) { LOG.debug("Block {}: can't add new cached replicas," + " because there is no record of this block " + "on the NameNode.", cachedBlock.getBlockId()); return; } if (!blockInfo.isComplete()) { LOG.debug("Block {}: can't cache this block, because it is not yet" + " complete.", cachedBlock.getBlockId()); return; } // Filter the list of replicas to only the valid targets List<DatanodeDescriptor> possibilities = new LinkedList<DatanodeDescriptor>(); int numReplicas = blockInfo.getCapacity(); Collection<DatanodeDescriptor> corrupt = blockManager.getCorruptReplicas(blockInfo); int outOfCapacity = 0; for (int i = 0; i < numReplicas; i++) { DatanodeDescriptor datanode = blockInfo.getDatanode(i); if (datanode == null) { continue; } if (datanode.isDecommissioned() || datanode.isDecommissionInProgress()) { continue; } if (corrupt != null && corrupt.contains(datanode)) { continue; } if (pendingCached.contains(datanode) || cached.contains(datanode)) { continue; } long pendingBytes = 0; // Subtract pending cached blocks from effective capacity Iterator<CachedBlock> it = datanode.getPendingCached().iterator(); while (it.hasNext()) { CachedBlock cBlock = it.next(); BlockInfo info = blockManager.getStoredBlock(new Block(cBlock.getBlockId())); if (info != null) { pendingBytes -= info.getNumBytes(); } } it = datanode.getPendingUncached().iterator(); // Add pending uncached blocks from effective capacity while (it.hasNext()) { CachedBlock cBlock = it.next(); BlockInfo info = blockManager.getStoredBlock(new Block(cBlock.getBlockId())); if (info != null) { pendingBytes += info.getNumBytes(); } } long pendingCapacity = pendingBytes + datanode.getCacheRemaining(); if (pendingCapacity < blockInfo.getNumBytes()) { LOG.trace("Block {}: DataNode {} is not a valid possibility " + "because the block has size {}, but the DataNode only has {}" + "bytes of cache remaining ({} pending bytes, {} already cached.", blockInfo.getBlockId(), datanode.getDatanodeUuid(), blockInfo.getNumBytes(), pendingCapacity, pendingBytes, datanode.getCacheRemaining()); outOfCapacity++; continue; } possibilities.add(datanode); } List<DatanodeDescriptor> chosen = chooseDatanodesForCaching(possibilities, neededCached, blockManager.getDatanodeManager().getStaleInterval()); for (DatanodeDescriptor datanode : chosen) { LOG.trace("Block {}: added to PENDING_CACHED on DataNode {}", blockInfo.getBlockId(), datanode.getDatanodeUuid()); pendingCached.add(datanode); boolean added = datanode.getPendingCached().add(cachedBlock); assert added; } // We were unable to satisfy the requested replication factor if (neededCached > chosen.size()) { LOG.debug("Block {}: we only have {} of {} cached replicas." + " {} DataNodes have insufficient cache capacity.", blockInfo.getBlockId(), (cachedBlock.getReplication() - neededCached + chosen.size()), cachedBlock.getReplication(), outOfCapacity ); } } /** * Chooses datanode locations for caching from a list of valid possibilities. * Non-stale nodes are chosen before stale nodes. * * @param possibilities List of candidate datanodes * @param neededCached Number of replicas needed * @param staleInterval Age of a stale datanode * @return A list of chosen datanodes */ private static List<DatanodeDescriptor> chooseDatanodesForCaching( final List<DatanodeDescriptor> possibilities, final int neededCached, final long staleInterval) { // Make a copy that we can modify List<DatanodeDescriptor> targets = new ArrayList<DatanodeDescriptor>(possibilities); // Selected targets List<DatanodeDescriptor> chosen = new LinkedList<DatanodeDescriptor>(); // Filter out stale datanodes List<DatanodeDescriptor> stale = new LinkedList<DatanodeDescriptor>(); Iterator<DatanodeDescriptor> it = targets.iterator(); while (it.hasNext()) { DatanodeDescriptor d = it.next(); if (d.isStale(staleInterval)) { it.remove(); stale.add(d); } } // Select targets while (chosen.size() < neededCached) { // Try to use stale nodes if we're out of non-stale nodes, else we're done if (targets.isEmpty()) { if (!stale.isEmpty()) { targets = stale; } else { break; } } // Select a random target DatanodeDescriptor target = chooseRandomDatanodeByRemainingCapacity(targets); chosen.add(target); targets.remove(target); } return chosen; } /** * Choose a single datanode from the provided list of possible * targets, weighted by the percentage of free space remaining on the node. * * @return The chosen datanode */ private static DatanodeDescriptor chooseRandomDatanodeByRemainingCapacity( final List<DatanodeDescriptor> targets) { // Use a weighted probability to choose the target datanode float total = 0; for (DatanodeDescriptor d : targets) { total += d.getCacheRemainingPercent(); } // Give each datanode a portion of keyspace equal to its relative weight // [0, w1) selects d1, [w1, w2) selects d2, etc. TreeMap<Integer, DatanodeDescriptor> lottery = new TreeMap<Integer, DatanodeDescriptor>(); int offset = 0; for (DatanodeDescriptor d : targets) { // Since we're using floats, be paranoid about negative values int weight = Math.max(1, (int)((d.getCacheRemainingPercent() / total) * 1000000)); offset += weight; lottery.put(offset, d); } // Choose a number from [0, offset), which is the total amount of weight, // to select the winner DatanodeDescriptor winner = lottery.higherEntry(random.nextInt(offset)).getValue(); return winner; } }
29,796
36.339599
94
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRackFaultTolerant.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.blockmanagement; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.net.Node; import org.apache.hadoop.net.NodeBase; import java.util.*; /** * The class is responsible for choosing the desired number of targets * for placing block replicas. * The strategy is that it tries its best to place the replicas to most racks. */ @InterfaceAudience.Private public class BlockPlacementPolicyRackFaultTolerant extends BlockPlacementPolicyDefault { @Override protected int[] getMaxNodesPerRack(int numOfChosen, int numOfReplicas) { int clusterSize = clusterMap.getNumOfLeaves(); int totalNumOfReplicas = numOfChosen + numOfReplicas; if (totalNumOfReplicas > clusterSize) { numOfReplicas -= (totalNumOfReplicas-clusterSize); totalNumOfReplicas = clusterSize; } // No calculation needed when there is only one rack or picking one node. int numOfRacks = clusterMap.getNumOfRacks(); if (numOfRacks == 1 || totalNumOfReplicas <= 1) { return new int[] {numOfReplicas, totalNumOfReplicas}; } if(totalNumOfReplicas<numOfRacks){ return new int[] {numOfReplicas, 1}; } int maxNodesPerRack = (totalNumOfReplicas - 1) / numOfRacks + 1; return new int[] {numOfReplicas, maxNodesPerRack}; } /** * Choose numOfReplicas in order: * 1. If total replica expected is less than numOfRacks in cluster, it choose * randomly. * 2. If total replica expected is bigger than numOfRacks, it choose: * 2a. Fill each rack exactly (maxNodesPerRack-1) replicas. * 2b. For some random racks, place one more replica to each one of them, until * numOfReplicas have been chosen. <br> * In the end, the difference of the numbers of replicas for each two racks * is no more than 1. * Either way it always prefer local storage. * @return local node of writer */ @Override protected Node chooseTargetInOrder(int numOfReplicas, Node writer, final Set<Node> excludedNodes, final long blocksize, final int maxNodesPerRack, final List<DatanodeStorageInfo> results, final boolean avoidStaleNodes, final boolean newBlock, EnumMap<StorageType, Integer> storageTypes) throws NotEnoughReplicasException { int totalReplicaExpected = results.size() + numOfReplicas; int numOfRacks = clusterMap.getNumOfRacks(); if (totalReplicaExpected < numOfRacks || totalReplicaExpected % numOfRacks == 0) { writer = chooseOnce(numOfReplicas, writer, excludedNodes, blocksize, maxNodesPerRack, results, avoidStaleNodes, storageTypes); return writer; } assert totalReplicaExpected > (maxNodesPerRack -1) * numOfRacks; // Calculate numOfReplicas for filling each rack exactly (maxNodesPerRack-1) // replicas. HashMap<String, Integer> rackCounts = new HashMap<>(); for (DatanodeStorageInfo dsInfo : results) { String rack = dsInfo.getDatanodeDescriptor().getNetworkLocation(); Integer count = rackCounts.get(rack); if (count != null) { rackCounts.put(rack, count + 1); } else { rackCounts.put(rack, 1); } } int excess = 0; // Sum of the above (maxNodesPerRack-1) part of nodes in results for (int count : rackCounts.values()) { if (count > maxNodesPerRack -1) { excess += count - (maxNodesPerRack -1); } } numOfReplicas = Math.min(totalReplicaExpected - results.size(), (maxNodesPerRack -1) * numOfRacks - (results.size() - excess)); // Fill each rack exactly (maxNodesPerRack-1) replicas. writer = chooseOnce(numOfReplicas, writer, new HashSet<>(excludedNodes), blocksize, maxNodesPerRack -1, results, avoidStaleNodes, storageTypes); for (DatanodeStorageInfo resultStorage : results) { addToExcludedNodes(resultStorage.getDatanodeDescriptor(), excludedNodes); } // For some racks, place one more replica to each one of them. numOfReplicas = totalReplicaExpected - results.size(); chooseOnce(numOfReplicas, writer, excludedNodes, blocksize, maxNodesPerRack, results, avoidStaleNodes, storageTypes); return writer; } /** * Randomly choose <i>numOfReplicas</i> targets from the given <i>scope</i>. * Except that 1st replica prefer local storage. * @return local node of writer. */ private Node chooseOnce(int numOfReplicas, Node writer, final Set<Node> excludedNodes, final long blocksize, final int maxNodesPerRack, final List<DatanodeStorageInfo> results, final boolean avoidStaleNodes, EnumMap<StorageType, Integer> storageTypes) throws NotEnoughReplicasException { if (numOfReplicas == 0) { return writer; } writer = chooseLocalStorage(writer, excludedNodes, blocksize, maxNodesPerRack, results, avoidStaleNodes, storageTypes, true) .getDatanodeDescriptor(); if (--numOfReplicas == 0) { return writer; } chooseRandom(numOfReplicas, NodeBase.ROOT, excludedNodes, blocksize, maxNodesPerRack, results, avoidStaleNodes, storageTypes); return writer; } }
6,451
40.625806
88
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.blockmanagement; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.collect.Lists; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.XAttr; import org.apache.hadoop.hdfs.XAttrHelper; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.util.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.List; /** A collection of block storage policies. */ public class BlockStoragePolicySuite { static final Logger LOG = LoggerFactory.getLogger(BlockStoragePolicySuite .class); public static final String STORAGE_POLICY_XATTR_NAME = "hsm.block.storage.policy.id"; public static final XAttr.NameSpace XAttrNS = XAttr.NameSpace.SYSTEM; public static final int ID_BIT_LENGTH = 4; @VisibleForTesting public static BlockStoragePolicySuite createDefaultSuite() { final BlockStoragePolicy[] policies = new BlockStoragePolicy[1 << ID_BIT_LENGTH]; final byte lazyPersistId = HdfsServerConstants.MEMORY_STORAGE_POLICY_ID; policies[lazyPersistId] = new BlockStoragePolicy(lazyPersistId, HdfsConstants.MEMORY_STORAGE_POLICY_NAME, new StorageType[]{StorageType.RAM_DISK, StorageType.DISK}, new StorageType[]{StorageType.DISK}, new StorageType[]{StorageType.DISK}, true); // Cannot be changed on regular files, but inherited. final byte allssdId = HdfsServerConstants.ALLSSD_STORAGE_POLICY_ID; policies[allssdId] = new BlockStoragePolicy(allssdId, HdfsConstants.ALLSSD_STORAGE_POLICY_NAME, new StorageType[]{StorageType.SSD}, new StorageType[]{StorageType.DISK}, new StorageType[]{StorageType.DISK}); final byte onessdId = HdfsServerConstants.ONESSD_STORAGE_POLICY_ID; policies[onessdId] = new BlockStoragePolicy(onessdId, HdfsConstants.ONESSD_STORAGE_POLICY_NAME, new StorageType[]{StorageType.SSD, StorageType.DISK}, new StorageType[]{StorageType.SSD, StorageType.DISK}, new StorageType[]{StorageType.SSD, StorageType.DISK}); final byte hotId = HdfsServerConstants.HOT_STORAGE_POLICY_ID; policies[hotId] = new BlockStoragePolicy(hotId, HdfsConstants.HOT_STORAGE_POLICY_NAME, new StorageType[]{StorageType.DISK}, StorageType.EMPTY_ARRAY, new StorageType[]{StorageType.ARCHIVE}); final byte warmId = HdfsServerConstants.WARM_STORAGE_POLICY_ID; policies[warmId] = new BlockStoragePolicy(warmId, HdfsConstants.WARM_STORAGE_POLICY_NAME, new StorageType[]{StorageType.DISK, StorageType.ARCHIVE}, new StorageType[]{StorageType.DISK, StorageType.ARCHIVE}, new StorageType[]{StorageType.DISK, StorageType.ARCHIVE}); final byte coldId = HdfsServerConstants.COLD_STORAGE_POLICY_ID; policies[coldId] = new BlockStoragePolicy(coldId, HdfsConstants.COLD_STORAGE_POLICY_NAME, new StorageType[]{StorageType.ARCHIVE}, StorageType.EMPTY_ARRAY, StorageType.EMPTY_ARRAY); return new BlockStoragePolicySuite(hotId, policies); } private final byte defaultPolicyID; private final BlockStoragePolicy[] policies; public BlockStoragePolicySuite(byte defaultPolicyID, BlockStoragePolicy[] policies) { this.defaultPolicyID = defaultPolicyID; this.policies = policies; } /** @return the corresponding policy. */ public BlockStoragePolicy getPolicy(byte id) { // id == 0 means policy not specified. return id == 0? getDefaultPolicy(): policies[id]; } /** @return the default policy. */ public BlockStoragePolicy getDefaultPolicy() { return getPolicy(defaultPolicyID); } public BlockStoragePolicy getPolicy(String policyName) { Preconditions.checkNotNull(policyName); if (policies != null) { for (BlockStoragePolicy policy : policies) { if (policy != null && policy.getName().equalsIgnoreCase(policyName)) { return policy; } } } return null; } public BlockStoragePolicy[] getAllPolicies() { List<BlockStoragePolicy> list = Lists.newArrayList(); if (policies != null) { for (BlockStoragePolicy policy : policies) { if (policy != null) { list.add(policy); } } } return list.toArray(new BlockStoragePolicy[list.size()]); } public static String buildXAttrName() { return StringUtils.toLowerCase(XAttrNS.toString()) + "." + STORAGE_POLICY_XATTR_NAME; } public static XAttr buildXAttr(byte policyId) { final String name = buildXAttrName(); return XAttrHelper.buildXAttr(name, new byte[]{policyId}); } public static boolean isStoragePolicyXAttr(XAttr xattr) { return xattr != null && xattr.getNameSpace() == XAttrNS && xattr.getName().equals(STORAGE_POLICY_XATTR_NAME); } }
5,838
38.187919
78
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.blockmanagement; import java.util.*; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.net.NetworkTopologyWithNodeGroup; import org.apache.hadoop.net.Node; import org.apache.hadoop.net.NodeBase; /** The class is responsible for choosing the desired number of targets * for placing block replicas on environment with node-group layer. * The replica placement strategy is adjusted to: * If the writer is on a datanode, the 1st replica is placed on the local * node (or local node-group), otherwise a random datanode. * The 2nd replica is placed on a datanode that is on a different rack with 1st * replica node. * The 3rd replica is placed on a datanode which is on a different node-group * but the same rack as the second replica node. */ public class BlockPlacementPolicyWithNodeGroup extends BlockPlacementPolicyDefault { protected BlockPlacementPolicyWithNodeGroup(Configuration conf, FSClusterStats stats, NetworkTopology clusterMap, DatanodeManager datanodeManager) { initialize(conf, stats, clusterMap, host2datanodeMap); } protected BlockPlacementPolicyWithNodeGroup() { } public void initialize(Configuration conf, FSClusterStats stats, NetworkTopology clusterMap, Host2NodesMap host2datanodeMap) { super.initialize(conf, stats, clusterMap, host2datanodeMap); } /** choose local node of localMachine as the target. * if localMachine is not available, choose a node on the same nodegroup or * rack instead. * @return the chosen node */ @Override protected DatanodeStorageInfo chooseLocalStorage(Node localMachine, Set<Node> excludedNodes, long blocksize, int maxNodesPerRack, List<DatanodeStorageInfo> results, boolean avoidStaleNodes, EnumMap<StorageType, Integer> storageTypes, boolean fallbackToLocalRack) throws NotEnoughReplicasException { // if no local machine, randomly choose one node if (localMachine == null) return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize, maxNodesPerRack, results, avoidStaleNodes, storageTypes); // otherwise try local machine first if (localMachine instanceof DatanodeDescriptor) { DatanodeDescriptor localDataNode = (DatanodeDescriptor)localMachine; if (excludedNodes.add(localMachine)) { // was not in the excluded list for (Iterator<Map.Entry<StorageType, Integer>> iter = storageTypes .entrySet().iterator(); iter.hasNext(); ) { Map.Entry<StorageType, Integer> entry = iter.next(); for (DatanodeStorageInfo localStorage : DFSUtil.shuffle( localDataNode.getStorageInfos())) { StorageType type = entry.getKey(); if (addIfIsGoodTarget(localStorage, excludedNodes, blocksize, maxNodesPerRack, false, results, avoidStaleNodes, type) >= 0) { int num = entry.getValue(); if (num == 1) { iter.remove(); } else { entry.setValue(num - 1); } return localStorage; } } } } } // try a node on local node group DatanodeStorageInfo chosenStorage = chooseLocalNodeGroup( (NetworkTopologyWithNodeGroup)clusterMap, localMachine, excludedNodes, blocksize, maxNodesPerRack, results, avoidStaleNodes, storageTypes); if (chosenStorage != null) { return chosenStorage; } if (!fallbackToLocalRack) { return null; } // try a node on local rack return chooseLocalRack(localMachine, excludedNodes, blocksize, maxNodesPerRack, results, avoidStaleNodes, storageTypes); } /** @return the node of the second replica */ private static DatanodeDescriptor secondNode(Node localMachine, List<DatanodeStorageInfo> results) { // find the second replica for(DatanodeStorageInfo nextStorage : results) { DatanodeDescriptor nextNode = nextStorage.getDatanodeDescriptor(); if (nextNode != localMachine) { return nextNode; } } return null; } @Override protected DatanodeStorageInfo chooseLocalRack(Node localMachine, Set<Node> excludedNodes, long blocksize, int maxNodesPerRack, List<DatanodeStorageInfo> results, boolean avoidStaleNodes, EnumMap<StorageType, Integer> storageTypes) throws NotEnoughReplicasException { // no local machine, so choose a random machine if (localMachine == null) { return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize, maxNodesPerRack, results, avoidStaleNodes, storageTypes); } // choose one from the local rack, but off-nodegroup try { final String scope = NetworkTopology.getFirstHalf(localMachine.getNetworkLocation()); return chooseRandom(scope, excludedNodes, blocksize, maxNodesPerRack, results, avoidStaleNodes, storageTypes); } catch (NotEnoughReplicasException e1) { // find the second replica final DatanodeDescriptor newLocal = secondNode(localMachine, results); if (newLocal != null) { try { return chooseRandom( clusterMap.getRack(newLocal.getNetworkLocation()), excludedNodes, blocksize, maxNodesPerRack, results, avoidStaleNodes, storageTypes); } catch(NotEnoughReplicasException e2) { //otherwise randomly choose one from the network return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize, maxNodesPerRack, results, avoidStaleNodes, storageTypes); } } else { //otherwise randomly choose one from the network return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize, maxNodesPerRack, results, avoidStaleNodes, storageTypes); } } } /** * {@inheritDoc} */ @Override protected void chooseRemoteRack(int numOfReplicas, DatanodeDescriptor localMachine, Set<Node> excludedNodes, long blocksize, int maxReplicasPerRack, List<DatanodeStorageInfo> results, boolean avoidStaleNodes, EnumMap<StorageType, Integer> storageTypes) throws NotEnoughReplicasException { int oldNumOfReplicas = results.size(); final String rackLocation = NetworkTopology.getFirstHalf( localMachine.getNetworkLocation()); try { // randomly choose from remote racks chooseRandom(numOfReplicas, "~" + rackLocation, excludedNodes, blocksize, maxReplicasPerRack, results, avoidStaleNodes, storageTypes); } catch (NotEnoughReplicasException e) { // fall back to the local rack chooseRandom(numOfReplicas - (results.size() - oldNumOfReplicas), rackLocation, excludedNodes, blocksize, maxReplicasPerRack, results, avoidStaleNodes, storageTypes); } } /* choose one node from the nodegroup that <i>localMachine</i> is on. * if no such node is available, choose one node from the nodegroup where * a second replica is on. * if still no such node is available, choose a random node in the cluster. * @return the chosen node */ private DatanodeStorageInfo chooseLocalNodeGroup( NetworkTopologyWithNodeGroup clusterMap, Node localMachine, Set<Node> excludedNodes, long blocksize, int maxNodesPerRack, List<DatanodeStorageInfo> results, boolean avoidStaleNodes, EnumMap<StorageType, Integer> storageTypes) throws NotEnoughReplicasException { // no local machine, so choose a random machine if (localMachine == null) { return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize, maxNodesPerRack, results, avoidStaleNodes, storageTypes); } // choose one from the local node group try { return chooseRandom( clusterMap.getNodeGroup(localMachine.getNetworkLocation()), excludedNodes, blocksize, maxNodesPerRack, results, avoidStaleNodes, storageTypes); } catch (NotEnoughReplicasException e1) { final DatanodeDescriptor newLocal = secondNode(localMachine, results); if (newLocal != null) { try { return chooseRandom( clusterMap.getNodeGroup(newLocal.getNetworkLocation()), excludedNodes, blocksize, maxNodesPerRack, results, avoidStaleNodes, storageTypes); } catch(NotEnoughReplicasException e2) { //otherwise randomly choose one from the network return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize, maxNodesPerRack, results, avoidStaleNodes, storageTypes); } } else { //otherwise randomly choose one from the network return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize, maxNodesPerRack, results, avoidStaleNodes, storageTypes); } } } @Override protected String getRack(final DatanodeInfo cur) { String nodeGroupString = cur.getNetworkLocation(); return NetworkTopology.getFirstHalf(nodeGroupString); } /** * Find other nodes in the same nodegroup of <i>localMachine</i> and add them * into <i>excludeNodes</i> as replica should not be duplicated for nodes * within the same nodegroup * @return number of new excluded nodes */ @Override protected int addToExcludedNodes(DatanodeDescriptor chosenNode, Set<Node> excludedNodes) { int countOfExcludedNodes = 0; String nodeGroupScope = chosenNode.getNetworkLocation(); List<Node> leafNodes = clusterMap.getLeaves(nodeGroupScope); for (Node leafNode : leafNodes) { if (excludedNodes.add(leafNode)) { // not a existing node in excludedNodes countOfExcludedNodes++; } } countOfExcludedNodes += addDependentNodesToExcludedNodes( chosenNode, excludedNodes); return countOfExcludedNodes; } /** * Add all nodes from a dependent nodes list to excludedNodes. * @return number of new excluded nodes */ private int addDependentNodesToExcludedNodes(DatanodeDescriptor chosenNode, Set<Node> excludedNodes) { if (this.host2datanodeMap == null) { return 0; } int countOfExcludedNodes = 0; for(String hostname : chosenNode.getDependentHostNames()) { DatanodeDescriptor node = this.host2datanodeMap.getDataNodeByHostName(hostname); if(node!=null) { if (excludedNodes.add(node)) { countOfExcludedNodes++; } } else { LOG.warn("Not able to find datanode " + hostname + " which has dependency with datanode " + chosenNode.getHostName()); } } return countOfExcludedNodes; } /** * Pick up replica node set for deleting replica as over-replicated. * First set contains replica nodes on rack with more than one * replica while second set contains remaining replica nodes. * If first is not empty, divide first set into two subsets: * moreThanOne contains nodes on nodegroup with more than one replica * exactlyOne contains the remaining nodes in first set * then pickup priSet if not empty. * If first is empty, then pick second. */ @Override public Collection<DatanodeStorageInfo> pickupReplicaSet( Collection<DatanodeStorageInfo> first, Collection<DatanodeStorageInfo> second) { // If no replica within same rack, return directly. if (first.isEmpty()) { return second; } // Split data nodes in the first set into two sets, // moreThanOne contains nodes on nodegroup with more than one replica // exactlyOne contains the remaining nodes Map<String, List<DatanodeStorageInfo>> nodeGroupMap = new HashMap<String, List<DatanodeStorageInfo>>(); for(DatanodeStorageInfo storage : first) { final String nodeGroupName = NetworkTopology.getLastHalf( storage.getDatanodeDescriptor().getNetworkLocation()); List<DatanodeStorageInfo> storageList = nodeGroupMap.get(nodeGroupName); if (storageList == null) { storageList = new ArrayList<DatanodeStorageInfo>(); nodeGroupMap.put(nodeGroupName, storageList); } storageList.add(storage); } final List<DatanodeStorageInfo> moreThanOne = new ArrayList<DatanodeStorageInfo>(); final List<DatanodeStorageInfo> exactlyOne = new ArrayList<DatanodeStorageInfo>(); // split nodes into two sets for(List<DatanodeStorageInfo> datanodeList : nodeGroupMap.values()) { if (datanodeList.size() == 1 ) { // exactlyOne contains nodes on nodegroup with exactly one replica exactlyOne.add(datanodeList.get(0)); } else { // moreThanOne contains nodes on nodegroup with more than one replica moreThanOne.addAll(datanodeList); } } return moreThanOne.isEmpty()? exactlyOne : moreThanOne; } }
13,845
39.017341
91
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStatsMXBean.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.blockmanagement; import java.util.Map; import org.apache.hadoop.fs.StorageType; /** * This is an interface used to retrieve statistic information related to * block management. */ public interface BlockStatsMXBean { /** * The statistics of storage types. * * @return get storage statistics per storage type */ Map<StorageType, StorageTypeStats> getStorageTypeStats(); }
1,238
32.486486
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.blockmanagement; import java.util.Arrays; import java.util.Iterator; import java.util.List; import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State; import org.apache.hadoop.hdfs.server.protocol.StorageReport; /** * A Datanode has one or more storages. A storage in the Datanode is represented * by this class. */ public class DatanodeStorageInfo { public static final DatanodeStorageInfo[] EMPTY_ARRAY = {}; public static DatanodeInfo[] toDatanodeInfos( DatanodeStorageInfo[] storages) { return storages == null ? null: toDatanodeInfos(Arrays.asList(storages)); } static DatanodeInfo[] toDatanodeInfos(List<DatanodeStorageInfo> storages) { final DatanodeInfo[] datanodes = new DatanodeInfo[storages.size()]; for(int i = 0; i < storages.size(); i++) { datanodes[i] = storages.get(i).getDatanodeDescriptor(); } return datanodes; } static DatanodeDescriptor[] toDatanodeDescriptors( DatanodeStorageInfo[] storages) { DatanodeDescriptor[] datanodes = new DatanodeDescriptor[storages.length]; for (int i = 0; i < storages.length; ++i) { datanodes[i] = storages[i].getDatanodeDescriptor(); } return datanodes; } public static String[] toStorageIDs(DatanodeStorageInfo[] storages) { if (storages == null) { return null; } String[] storageIDs = new String[storages.length]; for(int i = 0; i < storageIDs.length; i++) { storageIDs[i] = storages[i].getStorageID(); } return storageIDs; } public static StorageType[] toStorageTypes(DatanodeStorageInfo[] storages) { if (storages == null) { return null; } StorageType[] storageTypes = new StorageType[storages.length]; for(int i = 0; i < storageTypes.length; i++) { storageTypes[i] = storages[i].getStorageType(); } return storageTypes; } public void updateFromStorage(DatanodeStorage storage) { state = storage.getState(); storageType = storage.getStorageType(); } /** * Iterates over the list of blocks belonging to the data-node. */ class BlockIterator implements Iterator<BlockInfo> { private BlockInfo current; BlockIterator(BlockInfo head) { this.current = head; } public boolean hasNext() { return current != null; } public BlockInfo next() { BlockInfo res = current; current = current.getNext(current.findStorageInfo(DatanodeStorageInfo.this)); return res; } public void remove() { throw new UnsupportedOperationException("Sorry. can't remove."); } } private final DatanodeDescriptor dn; private final String storageID; private StorageType storageType; private State state; private long capacity; private long dfsUsed; private volatile long remaining; private long blockPoolUsed; private volatile BlockInfo blockList = null; private int numBlocks = 0; // The ID of the last full block report which updated this storage. private long lastBlockReportId = 0; /** The number of block reports received */ private int blockReportCount = 0; /** * Set to false on any NN failover, and reset to true * whenever a block report is received. */ private boolean heartbeatedSinceFailover = false; /** * At startup or at failover, the storages in the cluster may have pending * block deletions from a previous incarnation of the NameNode. The block * contents are considered as stale until a block report is received. When a * storage is considered as stale, the replicas on it are also considered as * stale. If any block has at least one stale replica, then no invalidations * will be processed for this block. See HDFS-1972. */ private boolean blockContentsStale = true; DatanodeStorageInfo(DatanodeDescriptor dn, DatanodeStorage s) { this.dn = dn; this.storageID = s.getStorageID(); this.storageType = s.getStorageType(); this.state = s.getState(); } int getBlockReportCount() { return blockReportCount; } void setBlockReportCount(int blockReportCount) { this.blockReportCount = blockReportCount; } public boolean areBlockContentsStale() { return blockContentsStale; } void markStaleAfterFailover() { heartbeatedSinceFailover = false; blockContentsStale = true; } void receivedHeartbeat(StorageReport report) { updateState(report); heartbeatedSinceFailover = true; } void receivedBlockReport() { if (heartbeatedSinceFailover) { blockContentsStale = false; } blockReportCount++; } @VisibleForTesting public void setUtilizationForTesting(long capacity, long dfsUsed, long remaining, long blockPoolUsed) { this.capacity = capacity; this.dfsUsed = dfsUsed; this.remaining = remaining; this.blockPoolUsed = blockPoolUsed; } long getLastBlockReportId() { return lastBlockReportId; } void setLastBlockReportId(long lastBlockReportId) { this.lastBlockReportId = lastBlockReportId; } State getState() { return this.state; } void setState(State state) { this.state = state; } boolean areBlocksOnFailedStorage() { return getState() == State.FAILED && numBlocks != 0; } public String getStorageID() { return storageID; } public StorageType getStorageType() { return storageType; } long getCapacity() { return capacity; } long getDfsUsed() { return dfsUsed; } long getRemaining() { return remaining; } long getBlockPoolUsed() { return blockPoolUsed; } public AddBlockResult addBlock(BlockInfo b) { // First check whether the block belongs to a different storage // on the same DN. AddBlockResult result = AddBlockResult.ADDED; DatanodeStorageInfo otherStorage = b.findStorageInfo(getDatanodeDescriptor()); if (otherStorage != null) { if (otherStorage != this) { // The block belongs to a different storage. Remove it first. otherStorage.removeBlock(b); result = AddBlockResult.REPLACED; } else { // The block is already associated with this storage. return AddBlockResult.ALREADY_EXIST; } } // add to the head of the data-node list b.addStorage(this); blockList = b.listInsert(blockList, this); numBlocks++; return result; } public boolean removeBlock(BlockInfo b) { blockList = b.listRemove(blockList, this); if (b.removeStorage(this)) { numBlocks--; return true; } else { return false; } } int numBlocks() { return numBlocks; } Iterator<BlockInfo> getBlockIterator() { return new BlockIterator(blockList); } /** * Move block to the head of the list of blocks belonging to the data-node. * @return the index of the head of the blockList */ int moveBlockToHead(BlockInfo b, int curIndex, int headIndex) { blockList = b.moveBlockToHead(blockList, this, curIndex, headIndex); return curIndex; } /** * Used for testing only * @return the head of the blockList */ @VisibleForTesting BlockInfo getBlockListHeadForTesting(){ return blockList; } void updateState(StorageReport r) { capacity = r.getCapacity(); dfsUsed = r.getDfsUsed(); remaining = r.getRemaining(); blockPoolUsed = r.getBlockPoolUsed(); } public DatanodeDescriptor getDatanodeDescriptor() { return dn; } /** Increment the number of blocks scheduled for each given storage */ public static void incrementBlocksScheduled(DatanodeStorageInfo... storages) { for (DatanodeStorageInfo s : storages) { s.getDatanodeDescriptor().incrementBlocksScheduled(s.getStorageType()); } } @Override public boolean equals(Object obj) { if (this == obj) { return true; } else if (obj == null || !(obj instanceof DatanodeStorageInfo)) { return false; } final DatanodeStorageInfo that = (DatanodeStorageInfo)obj; return this.storageID.equals(that.storageID); } @Override public int hashCode() { return storageID.hashCode(); } @Override public String toString() { return "[" + storageType + "]" + storageID + ":" + state + ":" + dn; } StorageReport toStorageReport() { return new StorageReport( new DatanodeStorage(storageID, state, storageType), false, capacity, dfsUsed, remaining, blockPoolUsed); } static Iterable<StorageType> toStorageTypes( final Iterable<DatanodeStorageInfo> infos) { return new Iterable<StorageType>() { @Override public Iterator<StorageType> iterator() { return new Iterator<StorageType>() { final Iterator<DatanodeStorageInfo> i = infos.iterator(); @Override public boolean hasNext() {return i.hasNext();} @Override public StorageType next() {return i.next().getStorageType();} @Override public void remove() { throw new UnsupportedOperationException(); } }; } }; } /** @return the first {@link DatanodeStorageInfo} corresponding to * the given datanode */ static DatanodeStorageInfo getDatanodeStorageInfo( final Iterable<DatanodeStorageInfo> infos, final DatanodeDescriptor datanode) { if (datanode == null) { return null; } for(DatanodeStorageInfo storage : infos) { if (storage.getDatanodeDescriptor() == datanode) { return storage; } } return null; } static enum AddBlockResult { ADDED, REPLACED, ALREADY_EXIST } }
10,685
26.828125
83
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.blockmanagement; import java.util.ArrayList; import java.util.Collections; import java.util.HashSet; import java.util.IdentityHashMap; import java.util.List; import java.util.Map; import java.util.Set; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.server.namenode.Namesystem; import org.apache.hadoop.hdfs.server.protocol.StorageReport; import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary; import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.Time; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Manage the heartbeats received from datanodes. * The datanode list and statistics are synchronized * by the heartbeat manager lock. */ class HeartbeatManager implements DatanodeStatistics { static final Logger LOG = LoggerFactory.getLogger(HeartbeatManager.class); /** * Stores a subset of the datanodeMap in DatanodeManager, * containing nodes that are considered alive. * The HeartbeatMonitor periodically checks for out-dated entries, * and removes them from the list. * It is synchronized by the heartbeat manager lock. */ private final List<DatanodeDescriptor> datanodes = new ArrayList<DatanodeDescriptor>(); /** Statistics, which are synchronized by the heartbeat manager lock. */ private final Stats stats = new Stats(); /** The time period to check for expired datanodes */ private final long heartbeatRecheckInterval; /** Heartbeat monitor thread */ private final Daemon heartbeatThread = new Daemon(new Monitor()); final Namesystem namesystem; final BlockManager blockManager; HeartbeatManager(final Namesystem namesystem, final BlockManager blockManager, final Configuration conf) { this.namesystem = namesystem; this.blockManager = blockManager; boolean avoidStaleDataNodesForWrite = conf.getBoolean( DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY, DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_DEFAULT); long recheckInterval = conf.getInt( DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT); // 5 min long staleInterval = conf.getLong( DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY, DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_DEFAULT);// 30s if (avoidStaleDataNodesForWrite && staleInterval < recheckInterval) { this.heartbeatRecheckInterval = staleInterval; LOG.info("Setting heartbeat recheck interval to " + staleInterval + " since " + DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY + " is less than " + DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY); } else { this.heartbeatRecheckInterval = recheckInterval; } } void activate(Configuration conf) { heartbeatThread.start(); } void close() { heartbeatThread.interrupt(); try { // This will no effect if the thread hasn't yet been started. heartbeatThread.join(3000); } catch (InterruptedException e) { } } synchronized int getLiveDatanodeCount() { return datanodes.size(); } @Override public synchronized long getCapacityTotal() { return stats.capacityTotal; } @Override public synchronized long getCapacityUsed() { return stats.capacityUsed; } @Override public synchronized float getCapacityUsedPercent() { return DFSUtilClient.getPercentUsed(stats.capacityUsed, stats.capacityTotal); } @Override public synchronized long getCapacityRemaining() { return stats.capacityRemaining; } @Override public synchronized float getCapacityRemainingPercent() { return DFSUtilClient.getPercentRemaining(stats.capacityRemaining, stats.capacityTotal); } @Override public synchronized long getBlockPoolUsed() { return stats.blockPoolUsed; } @Override public synchronized float getPercentBlockPoolUsed() { return DFSUtilClient.getPercentUsed(stats.blockPoolUsed, stats.capacityTotal); } @Override public synchronized long getCapacityUsedNonDFS() { final long nonDFSUsed = stats.capacityTotal - stats.capacityRemaining - stats.capacityUsed; return nonDFSUsed < 0L? 0L : nonDFSUsed; } @Override public synchronized int getXceiverCount() { return stats.xceiverCount; } @Override public synchronized int getInServiceXceiverCount() { return stats.nodesInServiceXceiverCount; } @Override public synchronized int getNumDatanodesInService() { return stats.nodesInService; } @Override public synchronized long getCacheCapacity() { return stats.cacheCapacity; } @Override public synchronized long getCacheUsed() { return stats.cacheUsed; } @Override public synchronized long[] getStats() { return new long[] {getCapacityTotal(), getCapacityUsed(), getCapacityRemaining(), -1L, -1L, -1L, -1L}; } @Override public synchronized int getExpiredHeartbeats() { return stats.expiredHeartbeats; } @Override public Map<StorageType, StorageTypeStats> getStorageTypeStats() { return stats.statsMap.get(); } synchronized void register(final DatanodeDescriptor d) { if (!d.isAlive) { addDatanode(d); //update its timestamp d.updateHeartbeatState(StorageReport.EMPTY_ARRAY, 0L, 0L, 0, 0, null); } } synchronized DatanodeDescriptor[] getDatanodes() { return datanodes.toArray(new DatanodeDescriptor[datanodes.size()]); } synchronized void addDatanode(final DatanodeDescriptor d) { // update in-service node count stats.add(d); datanodes.add(d); d.isAlive = true; } synchronized void removeDatanode(DatanodeDescriptor node) { if (node.isAlive) { stats.subtract(node); datanodes.remove(node); node.isAlive = false; } } synchronized void updateHeartbeat(final DatanodeDescriptor node, StorageReport[] reports, long cacheCapacity, long cacheUsed, int xceiverCount, int failedVolumes, VolumeFailureSummary volumeFailureSummary) { stats.subtract(node); node.updateHeartbeat(reports, cacheCapacity, cacheUsed, xceiverCount, failedVolumes, volumeFailureSummary); stats.add(node); } synchronized void startDecommission(final DatanodeDescriptor node) { if (!node.isAlive) { LOG.info("Dead node {} is decommissioned immediately.", node); node.setDecommissioned(); } else { stats.subtract(node); node.startDecommission(); stats.add(node); } } synchronized void stopDecommission(final DatanodeDescriptor node) { LOG.info("Stopping decommissioning of {} node {}", node.isAlive ? "live" : "dead", node); if (!node.isAlive) { node.stopDecommission(); } else { stats.subtract(node); node.stopDecommission(); stats.add(node); } } /** * Check if there are any expired heartbeats, and if so, * whether any blocks have to be re-replicated. * While removing dead datanodes, make sure that only one datanode is marked * dead at a time within the synchronized section. Otherwise, a cascading * effect causes more datanodes to be declared dead. * Check if there are any failed storage and if so, * Remove all the blocks on the storage. It also covers the following less * common scenarios. After DatanodeStorage is marked FAILED, it is still * possible to receive IBR for this storage. * 1) DN could deliver IBR for failed storage due to its implementation. * a) DN queues a pending IBR request. * b) The storage of the block fails. * c) DN first sends HB, NN will mark the storage FAILED. * d) DN then sends the pending IBR request. * 2) SBN processes block request from pendingDNMessages. * It is possible to have messages in pendingDNMessages that refer * to some failed storage. * a) SBN receives a IBR and put it in pendingDNMessages. * b) The storage of the block fails. * c) Edit log replay get the IBR from pendingDNMessages. * Alternatively, we can resolve these scenarios with the following approaches. * A. Make sure DN don't deliver IBR for failed storage. * B. Remove all blocks in PendingDataNodeMessages for the failed storage * when we remove all blocks from BlocksMap for that storage. */ void heartbeatCheck() { final DatanodeManager dm = blockManager.getDatanodeManager(); // It's OK to check safe mode w/o taking the lock here, we re-check // for safe mode after taking the lock before removing a datanode. if (namesystem.isInStartupSafeMode()) { return; } boolean allAlive = false; while (!allAlive) { // locate the first dead node. DatanodeID dead = null; // locate the first failed storage that isn't on a dead node. DatanodeStorageInfo failedStorage = null; // check the number of stale nodes int numOfStaleNodes = 0; int numOfStaleStorages = 0; synchronized(this) { for (DatanodeDescriptor d : datanodes) { if (dead == null && dm.isDatanodeDead(d)) { stats.incrExpiredHeartbeats(); dead = d; } if (d.isStale(dm.getStaleInterval())) { numOfStaleNodes++; } DatanodeStorageInfo[] storageInfos = d.getStorageInfos(); for(DatanodeStorageInfo storageInfo : storageInfos) { if (storageInfo.areBlockContentsStale()) { numOfStaleStorages++; } if (failedStorage == null && storageInfo.areBlocksOnFailedStorage() && d != dead) { failedStorage = storageInfo; } } } // Set the number of stale nodes in the DatanodeManager dm.setNumStaleNodes(numOfStaleNodes); dm.setNumStaleStorages(numOfStaleStorages); } allAlive = dead == null && failedStorage == null; if (dead != null) { // acquire the fsnamesystem lock, and then remove the dead node. namesystem.writeLock(); try { if (namesystem.isInStartupSafeMode()) { return; } synchronized(this) { dm.removeDeadDatanode(dead); } } finally { namesystem.writeUnlock(); } } if (failedStorage != null) { // acquire the fsnamesystem lock, and remove blocks on the storage. namesystem.writeLock(); try { if (namesystem.isInStartupSafeMode()) { return; } synchronized(this) { blockManager.removeBlocksAssociatedTo(failedStorage); } } finally { namesystem.writeUnlock(); } } } } /** Periodically check heartbeat and update block key */ private class Monitor implements Runnable { private long lastHeartbeatCheck; private long lastBlockKeyUpdate; @Override public void run() { while(namesystem.isRunning()) { try { final long now = Time.monotonicNow(); if (lastHeartbeatCheck + heartbeatRecheckInterval < now) { heartbeatCheck(); lastHeartbeatCheck = now; } if (blockManager.shouldUpdateBlockKey(now - lastBlockKeyUpdate)) { synchronized(HeartbeatManager.this) { for(DatanodeDescriptor d : datanodes) { d.needKeyUpdate = true; } } lastBlockKeyUpdate = now; } } catch (Exception e) { LOG.error("Exception while checking heartbeat", e); } try { Thread.sleep(5000); // 5 seconds } catch (InterruptedException ie) { } } } } /** Datanode statistics. * For decommissioning/decommissioned nodes, only used capacity is counted. */ private static class Stats { private final StorageTypeStatsMap statsMap = new StorageTypeStatsMap(); private long capacityTotal = 0L; private long capacityUsed = 0L; private long capacityRemaining = 0L; private long blockPoolUsed = 0L; private int xceiverCount = 0; private long cacheCapacity = 0L; private long cacheUsed = 0L; private int nodesInService = 0; private int nodesInServiceXceiverCount = 0; private int expiredHeartbeats = 0; private void add(final DatanodeDescriptor node) { capacityUsed += node.getDfsUsed(); blockPoolUsed += node.getBlockPoolUsed(); xceiverCount += node.getXceiverCount(); if (!(node.isDecommissionInProgress() || node.isDecommissioned())) { nodesInService++; nodesInServiceXceiverCount += node.getXceiverCount(); capacityTotal += node.getCapacity(); capacityRemaining += node.getRemaining(); } else { capacityTotal += node.getDfsUsed(); } cacheCapacity += node.getCacheCapacity(); cacheUsed += node.getCacheUsed(); Set<StorageType> storageTypes = new HashSet<>(); for (DatanodeStorageInfo storageInfo : node.getStorageInfos()) { statsMap.addStorage(storageInfo, node); storageTypes.add(storageInfo.getStorageType()); } for (StorageType storageType : storageTypes) { statsMap.addNode(storageType, node); } } private void subtract(final DatanodeDescriptor node) { capacityUsed -= node.getDfsUsed(); blockPoolUsed -= node.getBlockPoolUsed(); xceiverCount -= node.getXceiverCount(); if (!(node.isDecommissionInProgress() || node.isDecommissioned())) { nodesInService--; nodesInServiceXceiverCount -= node.getXceiverCount(); capacityTotal -= node.getCapacity(); capacityRemaining -= node.getRemaining(); } else { capacityTotal -= node.getDfsUsed(); } cacheCapacity -= node.getCacheCapacity(); cacheUsed -= node.getCacheUsed(); Set<StorageType> storageTypes = new HashSet<>(); for (DatanodeStorageInfo storageInfo : node.getStorageInfos()) { statsMap.subtractStorage(storageInfo, node); storageTypes.add(storageInfo.getStorageType()); } for (StorageType storageType : storageTypes) { statsMap.subtractNode(storageType, node); } } /** Increment expired heartbeat counter. */ private void incrExpiredHeartbeats() { expiredHeartbeats++; } } /** StorageType specific statistics. * For decommissioning/decommissioned nodes, only used capacity is counted. */ static final class StorageTypeStatsMap { private Map<StorageType, StorageTypeStats> storageTypeStatsMap = new IdentityHashMap<>(); private StorageTypeStatsMap() {} private StorageTypeStatsMap(StorageTypeStatsMap other) { storageTypeStatsMap = new IdentityHashMap<>(other.storageTypeStatsMap); for (Map.Entry<StorageType, StorageTypeStats> entry : storageTypeStatsMap.entrySet()) { entry.setValue(new StorageTypeStats(entry.getValue())); } } private Map<StorageType, StorageTypeStats> get() { return Collections.unmodifiableMap(storageTypeStatsMap); } private void addNode(StorageType storageType, final DatanodeDescriptor node) { StorageTypeStats storageTypeStats = storageTypeStatsMap.get(storageType); if (storageTypeStats == null) { storageTypeStats = new StorageTypeStats(); storageTypeStatsMap.put(storageType, storageTypeStats); } storageTypeStats.addNode(node); } private void addStorage(final DatanodeStorageInfo info, final DatanodeDescriptor node) { StorageTypeStats storageTypeStats = storageTypeStatsMap.get(info.getStorageType()); if (storageTypeStats == null) { storageTypeStats = new StorageTypeStats(); storageTypeStatsMap.put(info.getStorageType(), storageTypeStats); } storageTypeStats.addStorage(info, node); } private void subtractStorage(final DatanodeStorageInfo info, final DatanodeDescriptor node) { StorageTypeStats storageTypeStats = storageTypeStatsMap.get(info.getStorageType()); if (storageTypeStats != null) { storageTypeStats.subtractStorage(info, node); } } private void subtractNode(StorageType storageType, final DatanodeDescriptor node) { StorageTypeStats storageTypeStats = storageTypeStatsMap.get(storageType); if (storageTypeStats != null) { storageTypeStats.subtractNode(node); } } } }
17,938
32.158965
89
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SequentialBlockIdGenerator.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.blockmanagement; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.util.SequentialNumber; /** * Generate the next valid block ID by incrementing the maximum block * ID allocated so far, starting at 2^30+1. * * Block IDs used to be allocated randomly in the past. Hence we may * find some conflicts while stepping through the ID space sequentially. * However given the sparsity of the ID space, conflicts should be rare * and can be skipped over when detected. */ @InterfaceAudience.Private public class SequentialBlockIdGenerator extends SequentialNumber { /** * The last reserved block ID. */ public static final long LAST_RESERVED_BLOCK_ID = 1024L * 1024 * 1024; private final BlockManager blockManager; SequentialBlockIdGenerator(BlockManager blockManagerRef) { super(LAST_RESERVED_BLOCK_ID); this.blockManager = blockManagerRef; } @Override // NumberGenerator public long nextValue() { Block b = new Block(super.nextValue()); // There may be an occasional conflict with randomly generated // block IDs. Skip over the conflicts. while(isValidBlock(b)) { b.setBlockId(super.nextValue()); } return b.getBlockId(); } /** * Returns whether the given block is one pointed-to by a file. */ private boolean isValidBlock(Block b) { return (blockManager.getBlockCollection(b) != null); } }
2,367
34.343284
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStatistics.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.blockmanagement; import java.util.Map; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.protocol.ClientProtocol; /** Datanode statistics */ public interface DatanodeStatistics { /** @return the total capacity */ public long getCapacityTotal(); /** @return the used capacity */ public long getCapacityUsed(); /** @return the percentage of the used capacity over the total capacity. */ public float getCapacityUsedPercent(); /** @return the remaining capacity */ public long getCapacityRemaining(); /** @return the percentage of the remaining capacity over the total capacity. */ public float getCapacityRemainingPercent(); /** @return the block pool used. */ public long getBlockPoolUsed(); /** @return the percentage of the block pool used space over the total capacity. */ public float getPercentBlockPoolUsed(); /** @return the total cache capacity of all DataNodes */ public long getCacheCapacity(); /** @return the total cache used by all DataNodes */ public long getCacheUsed(); /** @return the xceiver count */ public int getXceiverCount(); /** @return average xceiver count for non-decommission(ing|ed) nodes */ public int getInServiceXceiverCount(); /** @return number of non-decommission(ing|ed) nodes */ public int getNumDatanodesInService(); /** * @return the total used space by data nodes for non-DFS purposes * such as storing temporary files on the local file system */ public long getCapacityUsedNonDFS(); /** The same as {@link ClientProtocol#getStats()}. * The block related entries are set to -1. */ public long[] getStats(); /** @return the expired heartbeats */ public int getExpiredHeartbeats(); /** @return Storage Tier statistics*/ Map<StorageType, StorageTypeStats> getStorageTypeStats(); }
2,686
32.5875
85
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/StorageTypeStats.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.blockmanagement; import java.beans.ConstructorProperties; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * Statistics per StorageType. * */ @InterfaceAudience.Private @InterfaceStability.Evolving public class StorageTypeStats { private long capacityTotal = 0L; private long capacityUsed = 0L; private long capacityRemaining = 0L; private long blockPoolUsed = 0L; private int nodesInService = 0; @ConstructorProperties({"capacityTotal", "capacityUsed", "capacityRemaining", "blockPoolUsed", "nodesInService"}) public StorageTypeStats(long capacityTotal, long capacityUsed, long capacityRemaining, long blockPoolUsed, int nodesInService) { this.capacityTotal = capacityTotal; this.capacityUsed = capacityUsed; this.capacityRemaining = capacityRemaining; this.blockPoolUsed = blockPoolUsed; this.nodesInService = nodesInService; } public long getCapacityTotal() { return capacityTotal; } public long getCapacityUsed() { return capacityUsed; } public long getCapacityRemaining() { return capacityRemaining; } public long getBlockPoolUsed() { return blockPoolUsed; } public int getNodesInService() { return nodesInService; } StorageTypeStats() {} StorageTypeStats(StorageTypeStats other) { capacityTotal = other.capacityTotal; capacityUsed = other.capacityUsed; capacityRemaining = other.capacityRemaining; blockPoolUsed = other.blockPoolUsed; nodesInService = other.nodesInService; } void addStorage(final DatanodeStorageInfo info, final DatanodeDescriptor node) { capacityUsed += info.getDfsUsed(); blockPoolUsed += info.getBlockPoolUsed(); if (!(node.isDecommissionInProgress() || node.isDecommissioned())) { capacityTotal += info.getCapacity(); capacityRemaining += info.getRemaining(); } else { capacityTotal += info.getDfsUsed(); } } void addNode(final DatanodeDescriptor node) { if (!(node.isDecommissionInProgress() || node.isDecommissioned())) { nodesInService++; } } void subtractStorage(final DatanodeStorageInfo info, final DatanodeDescriptor node) { capacityUsed -= info.getDfsUsed(); blockPoolUsed -= info.getBlockPoolUsed(); if (!(node.isDecommissionInProgress() || node.isDecommissioned())) { capacityTotal -= info.getCapacity(); capacityRemaining -= info.getRemaining(); } else { capacityTotal -= info.getDfsUsed(); } } void subtractNode(final DatanodeDescriptor node) { if (!(node.isDecommissionInProgress() || node.isDecommissioned())) { nodesInService--; } } }
3,563
29.724138
79
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/NumberReplicas.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.blockmanagement; /** * A immutable object that stores the number of live replicas and * the number of decommissioned Replicas. */ public class NumberReplicas { private int liveReplicas; // Tracks only the decommissioning replicas private int decommissioning; // Tracks only the decommissioned replicas private int decommissioned; private int corruptReplicas; private int excessReplicas; private int replicasOnStaleNodes; NumberReplicas() { initialize(0, 0, 0, 0, 0, 0); } NumberReplicas(int live, int decommissioned, int decommissioning, int corrupt, int excess, int stale) { initialize(live, decommissioned, decommissioning, corrupt, excess, stale); } void initialize(int live, int decommissioned, int decommissioning, int corrupt, int excess, int stale) { liveReplicas = live; this.decommissioning = decommissioning; this.decommissioned = decommissioned; corruptReplicas = corrupt; excessReplicas = excess; replicasOnStaleNodes = stale; } public int liveReplicas() { return liveReplicas; } /** * * @return decommissioned replicas + decommissioning replicas * It is deprecated by decommissionedAndDecommissioning * due to its misleading name. */ @Deprecated public int decommissionedReplicas() { return decommissionedAndDecommissioning(); } /** * * @return decommissioned and decommissioning replicas */ public int decommissionedAndDecommissioning() { return decommissioned + decommissioning; } /** * * @return decommissioned replicas only */ public int decommissioned() { return decommissioned; } /** * * @return decommissioning replicas only */ public int decommissioning() { return decommissioning; } public int corruptReplicas() { return corruptReplicas; } public int excessReplicas() { return excessReplicas; } /** * @return the number of replicas which are on stale nodes. * This is not mutually exclusive with the other counts -- ie a * replica may count as both "live" and "stale". */ public int replicasOnStaleNodes() { return replicasOnStaleNodes; } }
3,052
26.754545
80
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.blockmanagement; import static org.apache.hadoop.util.Time.monotonicNow; import java.util.*; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State; import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.net.Node; import org.apache.hadoop.net.NodeBase; import com.google.common.annotations.VisibleForTesting; /** * The class is responsible for choosing the desired number of targets * for placing block replicas. * The replica placement strategy is that if the writer is on a datanode, * the 1st replica is placed on the local machine, * otherwise a random datanode. The 2nd replica is placed on a datanode * that is on a different rack. The 3rd replica is placed on a datanode * which is on a different node of the rack as the second replica. */ @InterfaceAudience.Private public class BlockPlacementPolicyDefault extends BlockPlacementPolicy { private static final String enableDebugLogging = "For more information, please enable DEBUG log level on " + BlockPlacementPolicy.class.getName(); private static final ThreadLocal<StringBuilder> debugLoggingBuilder = new ThreadLocal<StringBuilder>() { @Override protected StringBuilder initialValue() { return new StringBuilder(); } }; protected boolean considerLoad; private boolean preferLocalNode = true; protected NetworkTopology clusterMap; protected Host2NodesMap host2datanodeMap; private FSClusterStats stats; protected long heartbeatInterval; // interval for DataNode heartbeats private long staleInterval; // interval used to identify stale DataNodes /** * A miss of that many heartbeats is tolerated for replica deletion policy. */ protected int tolerateHeartbeatMultiplier; protected BlockPlacementPolicyDefault() { } @Override public void initialize(Configuration conf, FSClusterStats stats, NetworkTopology clusterMap, Host2NodesMap host2datanodeMap) { this.considerLoad = conf.getBoolean( DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, true); this.stats = stats; this.clusterMap = clusterMap; this.host2datanodeMap = host2datanodeMap; this.heartbeatInterval = conf.getLong( DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT) * 1000; this.tolerateHeartbeatMultiplier = conf.getInt( DFSConfigKeys.DFS_NAMENODE_TOLERATE_HEARTBEAT_MULTIPLIER_KEY, DFSConfigKeys.DFS_NAMENODE_TOLERATE_HEARTBEAT_MULTIPLIER_DEFAULT); this.staleInterval = conf.getLong( DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY, DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_DEFAULT); } @Override public DatanodeStorageInfo[] chooseTarget(String srcPath, int numOfReplicas, Node writer, List<DatanodeStorageInfo> chosenNodes, boolean returnChosenNodes, Set<Node> excludedNodes, long blocksize, final BlockStoragePolicy storagePolicy) { return chooseTarget(numOfReplicas, writer, chosenNodes, returnChosenNodes, excludedNodes, blocksize, storagePolicy); } @Override DatanodeStorageInfo[] chooseTarget(String src, int numOfReplicas, Node writer, Set<Node> excludedNodes, long blocksize, List<DatanodeDescriptor> favoredNodes, BlockStoragePolicy storagePolicy) { try { if (favoredNodes == null || favoredNodes.size() == 0) { // Favored nodes not specified, fall back to regular block placement. return chooseTarget(src, numOfReplicas, writer, new ArrayList<DatanodeStorageInfo>(numOfReplicas), false, excludedNodes, blocksize, storagePolicy); } Set<Node> favoriteAndExcludedNodes = excludedNodes == null ? new HashSet<Node>() : new HashSet<Node>(excludedNodes); final List<StorageType> requiredStorageTypes = storagePolicy .chooseStorageTypes((short)numOfReplicas); final EnumMap<StorageType, Integer> storageTypes = getRequiredStorageTypes(requiredStorageTypes); // Choose favored nodes List<DatanodeStorageInfo> results = new ArrayList<DatanodeStorageInfo>(); boolean avoidStaleNodes = stats != null && stats.isAvoidingStaleDataNodesForWrite(); int maxNodesAndReplicas[] = getMaxNodesPerRack(0, numOfReplicas); numOfReplicas = maxNodesAndReplicas[0]; int maxNodesPerRack = maxNodesAndReplicas[1]; for (int i = 0; i < favoredNodes.size() && results.size() < numOfReplicas; i++) { DatanodeDescriptor favoredNode = favoredNodes.get(i); // Choose a single node which is local to favoredNode. // 'results' is updated within chooseLocalNode final DatanodeStorageInfo target = chooseLocalStorage(favoredNode, favoriteAndExcludedNodes, blocksize, maxNodesPerRack, results, avoidStaleNodes, storageTypes, false); if (target == null) { LOG.warn("Could not find a target for file " + src + " with favored node " + favoredNode); continue; } favoriteAndExcludedNodes.add(target.getDatanodeDescriptor()); } if (results.size() < numOfReplicas) { // Not enough favored nodes, choose other nodes. numOfReplicas -= results.size(); DatanodeStorageInfo[] remainingTargets = chooseTarget(src, numOfReplicas, writer, results, false, favoriteAndExcludedNodes, blocksize, storagePolicy); for (int i = 0; i < remainingTargets.length; i++) { results.add(remainingTargets[i]); } } return getPipeline(writer, results.toArray(new DatanodeStorageInfo[results.size()])); } catch (NotEnoughReplicasException nr) { if (LOG.isDebugEnabled()) { LOG.debug("Failed to choose with favored nodes (=" + favoredNodes + "), disregard favored nodes hint and retry.", nr); } // Fall back to regular block placement disregarding favored nodes hint return chooseTarget(src, numOfReplicas, writer, new ArrayList<DatanodeStorageInfo>(numOfReplicas), false, excludedNodes, blocksize, storagePolicy); } } /** This is the implementation. */ private DatanodeStorageInfo[] chooseTarget(int numOfReplicas, Node writer, List<DatanodeStorageInfo> chosenStorage, boolean returnChosenNodes, Set<Node> excludedNodes, long blocksize, final BlockStoragePolicy storagePolicy) { if (numOfReplicas == 0 || clusterMap.getNumOfLeaves()==0) { return DatanodeStorageInfo.EMPTY_ARRAY; } if (excludedNodes == null) { excludedNodes = new HashSet<Node>(); } int[] result = getMaxNodesPerRack(chosenStorage.size(), numOfReplicas); numOfReplicas = result[0]; int maxNodesPerRack = result[1]; final List<DatanodeStorageInfo> results = new ArrayList<DatanodeStorageInfo>(chosenStorage); for (DatanodeStorageInfo storage : chosenStorage) { // add localMachine and related nodes to excludedNodes addToExcludedNodes(storage.getDatanodeDescriptor(), excludedNodes); } boolean avoidStaleNodes = (stats != null && stats.isAvoidingStaleDataNodesForWrite()); final Node localNode = chooseTarget(numOfReplicas, writer, excludedNodes, blocksize, maxNodesPerRack, results, avoidStaleNodes, storagePolicy, EnumSet.noneOf(StorageType.class), results.isEmpty()); if (!returnChosenNodes) { results.removeAll(chosenStorage); } // sorting nodes to form a pipeline return getPipeline( (writer != null && writer instanceof DatanodeDescriptor) ? writer : localNode, results.toArray(new DatanodeStorageInfo[results.size()])); } /** * Calculate the maximum number of replicas to allocate per rack. It also * limits the total number of replicas to the total number of nodes in the * cluster. Caller should adjust the replica count to the return value. * * @param numOfChosen The number of already chosen nodes. * @param numOfReplicas The number of additional nodes to allocate. * @return integer array. Index 0: The number of nodes allowed to allocate * in addition to already chosen nodes. * Index 1: The maximum allowed number of nodes per rack. This * is independent of the number of chosen nodes, as it is calculated * using the target number of replicas. */ protected int[] getMaxNodesPerRack(int numOfChosen, int numOfReplicas) { int clusterSize = clusterMap.getNumOfLeaves(); int totalNumOfReplicas = numOfChosen + numOfReplicas; if (totalNumOfReplicas > clusterSize) { numOfReplicas -= (totalNumOfReplicas-clusterSize); totalNumOfReplicas = clusterSize; } // No calculation needed when there is only one rack or picking one node. int numOfRacks = clusterMap.getNumOfRacks(); if (numOfRacks == 1 || totalNumOfReplicas <= 1) { return new int[] {numOfReplicas, totalNumOfReplicas}; } int maxNodesPerRack = (totalNumOfReplicas-1)/numOfRacks + 2; // At this point, there are more than one racks and more than one replicas // to store. Avoid all replicas being in the same rack. // // maxNodesPerRack has the following properties at this stage. // 1) maxNodesPerRack >= 2 // 2) (maxNodesPerRack-1) * numOfRacks > totalNumOfReplicas // when numOfRacks > 1 // // Thus, the following adjustment will still result in a value that forces // multi-rack allocation and gives enough number of total nodes. if (maxNodesPerRack == totalNumOfReplicas) { maxNodesPerRack--; } return new int[] {numOfReplicas, maxNodesPerRack}; } private EnumMap<StorageType, Integer> getRequiredStorageTypes( List<StorageType> types) { EnumMap<StorageType, Integer> map = new EnumMap<StorageType, Integer>(StorageType.class); for (StorageType type : types) { if (!map.containsKey(type)) { map.put(type, 1); } else { int num = map.get(type); map.put(type, num + 1); } } return map; } /** * choose <i>numOfReplicas</i> from all data nodes * @param numOfReplicas additional number of replicas wanted * @param writer the writer's machine, could be a non-DatanodeDescriptor node * @param excludedNodes datanodes that should not be considered as targets * @param blocksize size of the data to be written * @param maxNodesPerRack max nodes allowed per rack * @param results the target nodes already chosen * @param avoidStaleNodes avoid stale nodes in replica choosing * @return local node of writer (not chosen node) */ private Node chooseTarget(int numOfReplicas, Node writer, final Set<Node> excludedNodes, final long blocksize, final int maxNodesPerRack, final List<DatanodeStorageInfo> results, final boolean avoidStaleNodes, final BlockStoragePolicy storagePolicy, final EnumSet<StorageType> unavailableStorages, final boolean newBlock) { if (numOfReplicas == 0 || clusterMap.getNumOfLeaves()==0) { return (writer instanceof DatanodeDescriptor) ? writer : null; } final int numOfResults = results.size(); final int totalReplicasExpected = numOfReplicas + numOfResults; if ((writer == null || !(writer instanceof DatanodeDescriptor)) && !newBlock) { writer = results.get(0).getDatanodeDescriptor(); } // Keep a copy of original excludedNodes final Set<Node> oldExcludedNodes = new HashSet<Node>(excludedNodes); // choose storage types; use fallbacks for unavailable storages final List<StorageType> requiredStorageTypes = storagePolicy .chooseStorageTypes((short) totalReplicasExpected, DatanodeStorageInfo.toStorageTypes(results), unavailableStorages, newBlock); final EnumMap<StorageType, Integer> storageTypes = getRequiredStorageTypes(requiredStorageTypes); if (LOG.isTraceEnabled()) { LOG.trace("storageTypes=" + storageTypes); } try { if ((numOfReplicas = requiredStorageTypes.size()) == 0) { throw new NotEnoughReplicasException( "All required storage types are unavailable: " + " unavailableStorages=" + unavailableStorages + ", storagePolicy=" + storagePolicy); } writer = chooseTargetInOrder(numOfReplicas, writer, excludedNodes, blocksize, maxNodesPerRack, results, avoidStaleNodes, newBlock, storageTypes); } catch (NotEnoughReplicasException e) { final String message = "Failed to place enough replicas, still in need of " + (totalReplicasExpected - results.size()) + " to reach " + totalReplicasExpected + " (unavailableStorages=" + unavailableStorages + ", storagePolicy=" + storagePolicy + ", newBlock=" + newBlock + ")"; if (LOG.isTraceEnabled()) { LOG.trace(message, e); } else { LOG.warn(message + " " + e.getMessage()); } if (avoidStaleNodes) { // Retry chooseTarget again, this time not avoiding stale nodes. // excludedNodes contains the initial excludedNodes and nodes that were // not chosen because they were stale, decommissioned, etc. // We need to additionally exclude the nodes that were added to the // result list in the successful calls to choose*() above. for (DatanodeStorageInfo resultStorage : results) { addToExcludedNodes(resultStorage.getDatanodeDescriptor(), oldExcludedNodes); } // Set numOfReplicas, since it can get out of sync with the result list // if the NotEnoughReplicasException was thrown in chooseRandom(). numOfReplicas = totalReplicasExpected - results.size(); return chooseTarget(numOfReplicas, writer, oldExcludedNodes, blocksize, maxNodesPerRack, results, false, storagePolicy, unavailableStorages, newBlock); } boolean retry = false; // simply add all the remaining types into unavailableStorages and give // another try. No best effort is guaranteed here. for (StorageType type : storageTypes.keySet()) { if (!unavailableStorages.contains(type)) { unavailableStorages.add(type); retry = true; } } if (retry) { for (DatanodeStorageInfo resultStorage : results) { addToExcludedNodes(resultStorage.getDatanodeDescriptor(), oldExcludedNodes); } numOfReplicas = totalReplicasExpected - results.size(); return chooseTarget(numOfReplicas, writer, oldExcludedNodes, blocksize, maxNodesPerRack, results, false, storagePolicy, unavailableStorages, newBlock); } } return writer; } protected Node chooseTargetInOrder(int numOfReplicas, Node writer, final Set<Node> excludedNodes, final long blocksize, final int maxNodesPerRack, final List<DatanodeStorageInfo> results, final boolean avoidStaleNodes, final boolean newBlock, EnumMap<StorageType, Integer> storageTypes) throws NotEnoughReplicasException { final int numOfResults = results.size(); if (numOfResults == 0) { writer = chooseLocalStorage(writer, excludedNodes, blocksize, maxNodesPerRack, results, avoidStaleNodes, storageTypes, true) .getDatanodeDescriptor(); if (--numOfReplicas == 0) { return writer; } } final DatanodeDescriptor dn0 = results.get(0).getDatanodeDescriptor(); if (numOfResults <= 1) { chooseRemoteRack(1, dn0, excludedNodes, blocksize, maxNodesPerRack, results, avoidStaleNodes, storageTypes); if (--numOfReplicas == 0) { return writer; } } if (numOfResults <= 2) { final DatanodeDescriptor dn1 = results.get(1).getDatanodeDescriptor(); if (clusterMap.isOnSameRack(dn0, dn1)) { chooseRemoteRack(1, dn0, excludedNodes, blocksize, maxNodesPerRack, results, avoidStaleNodes, storageTypes); } else if (newBlock){ chooseLocalRack(dn1, excludedNodes, blocksize, maxNodesPerRack, results, avoidStaleNodes, storageTypes); } else { chooseLocalRack(writer, excludedNodes, blocksize, maxNodesPerRack, results, avoidStaleNodes, storageTypes); } if (--numOfReplicas == 0) { return writer; } } chooseRandom(numOfReplicas, NodeBase.ROOT, excludedNodes, blocksize, maxNodesPerRack, results, avoidStaleNodes, storageTypes); return writer; } /** * Choose <i>localMachine</i> as the target. * if <i>localMachine</i> is not available, * choose a node on the same rack * @return the chosen storage */ protected DatanodeStorageInfo chooseLocalStorage(Node localMachine, Set<Node> excludedNodes, long blocksize, int maxNodesPerRack, List<DatanodeStorageInfo> results, boolean avoidStaleNodes, EnumMap<StorageType, Integer> storageTypes, boolean fallbackToLocalRack) throws NotEnoughReplicasException { // if no local machine, randomly choose one node if (localMachine == null) { return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize, maxNodesPerRack, results, avoidStaleNodes, storageTypes); } if (preferLocalNode && localMachine instanceof DatanodeDescriptor && clusterMap.contains(localMachine)) { DatanodeDescriptor localDatanode = (DatanodeDescriptor) localMachine; // otherwise try local machine first if (excludedNodes.add(localMachine)) { // was not in the excluded list for (Iterator<Map.Entry<StorageType, Integer>> iter = storageTypes .entrySet().iterator(); iter.hasNext(); ) { Map.Entry<StorageType, Integer> entry = iter.next(); for (DatanodeStorageInfo localStorage : DFSUtil.shuffle( localDatanode.getStorageInfos())) { StorageType type = entry.getKey(); if (addIfIsGoodTarget(localStorage, excludedNodes, blocksize, maxNodesPerRack, false, results, avoidStaleNodes, type) >= 0) { int num = entry.getValue(); if (num == 1) { iter.remove(); } else { entry.setValue(num - 1); } return localStorage; } } } } } if (!fallbackToLocalRack) { return null; } // try a node on local rack return chooseLocalRack(localMachine, excludedNodes, blocksize, maxNodesPerRack, results, avoidStaleNodes, storageTypes); } /** * Add <i>localMachine</i> and related nodes to <i>excludedNodes</i> * for next replica choosing. In sub class, we can add more nodes within * the same failure domain of localMachine * @return number of new excluded nodes */ protected int addToExcludedNodes(DatanodeDescriptor localMachine, Set<Node> excludedNodes) { return excludedNodes.add(localMachine) ? 1 : 0; } /** * Choose one node from the rack that <i>localMachine</i> is on. * if no such node is available, choose one node from the rack where * a second replica is on. * if still no such node is available, choose a random node * in the cluster. * @return the chosen node */ protected DatanodeStorageInfo chooseLocalRack(Node localMachine, Set<Node> excludedNodes, long blocksize, int maxNodesPerRack, List<DatanodeStorageInfo> results, boolean avoidStaleNodes, EnumMap<StorageType, Integer> storageTypes) throws NotEnoughReplicasException { // no local machine, so choose a random machine if (localMachine == null) { return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize, maxNodesPerRack, results, avoidStaleNodes, storageTypes); } final String localRack = localMachine.getNetworkLocation(); try { // choose one from the local rack return chooseRandom(localRack, excludedNodes, blocksize, maxNodesPerRack, results, avoidStaleNodes, storageTypes); } catch (NotEnoughReplicasException e) { // find the next replica and retry with its rack for(DatanodeStorageInfo resultStorage : results) { DatanodeDescriptor nextNode = resultStorage.getDatanodeDescriptor(); if (nextNode != localMachine) { if (LOG.isDebugEnabled()) { LOG.debug("Failed to choose from local rack (location = " + localRack + "), retry with the rack of the next replica (location = " + nextNode.getNetworkLocation() + ")", e); } return chooseFromNextRack(nextNode, excludedNodes, blocksize, maxNodesPerRack, results, avoidStaleNodes, storageTypes); } } if (LOG.isDebugEnabled()) { LOG.debug("Failed to choose from local rack (location = " + localRack + "); the second replica is not found, retry choosing ramdomly", e); } //the second replica is not found, randomly choose one from the network return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize, maxNodesPerRack, results, avoidStaleNodes, storageTypes); } } private DatanodeStorageInfo chooseFromNextRack(Node next, Set<Node> excludedNodes, long blocksize, int maxNodesPerRack, List<DatanodeStorageInfo> results, boolean avoidStaleNodes, EnumMap<StorageType, Integer> storageTypes) throws NotEnoughReplicasException { final String nextRack = next.getNetworkLocation(); try { return chooseRandom(nextRack, excludedNodes, blocksize, maxNodesPerRack, results, avoidStaleNodes, storageTypes); } catch(NotEnoughReplicasException e) { if (LOG.isDebugEnabled()) { LOG.debug("Failed to choose from the next rack (location = " + nextRack + "), retry choosing ramdomly", e); } //otherwise randomly choose one from the network return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize, maxNodesPerRack, results, avoidStaleNodes, storageTypes); } } /** * Choose <i>numOfReplicas</i> nodes from the racks * that <i>localMachine</i> is NOT on. * if not enough nodes are available, choose the remaining ones * from the local rack */ protected void chooseRemoteRack(int numOfReplicas, DatanodeDescriptor localMachine, Set<Node> excludedNodes, long blocksize, int maxReplicasPerRack, List<DatanodeStorageInfo> results, boolean avoidStaleNodes, EnumMap<StorageType, Integer> storageTypes) throws NotEnoughReplicasException { int oldNumOfReplicas = results.size(); // randomly choose one node from remote racks try { chooseRandom(numOfReplicas, "~" + localMachine.getNetworkLocation(), excludedNodes, blocksize, maxReplicasPerRack, results, avoidStaleNodes, storageTypes); } catch (NotEnoughReplicasException e) { if (LOG.isDebugEnabled()) { LOG.debug("Failed to choose remote rack (location = ~" + localMachine.getNetworkLocation() + "), fallback to local rack", e); } chooseRandom(numOfReplicas-(results.size()-oldNumOfReplicas), localMachine.getNetworkLocation(), excludedNodes, blocksize, maxReplicasPerRack, results, avoidStaleNodes, storageTypes); } } /** * Randomly choose one target from the given <i>scope</i>. * @return the chosen storage, if there is any. */ protected DatanodeStorageInfo chooseRandom(String scope, Set<Node> excludedNodes, long blocksize, int maxNodesPerRack, List<DatanodeStorageInfo> results, boolean avoidStaleNodes, EnumMap<StorageType, Integer> storageTypes) throws NotEnoughReplicasException { return chooseRandom(1, scope, excludedNodes, blocksize, maxNodesPerRack, results, avoidStaleNodes, storageTypes); } /** * Randomly choose <i>numOfReplicas</i> targets from the given <i>scope</i>. * @return the first chosen node, if there is any. */ protected DatanodeStorageInfo chooseRandom(int numOfReplicas, String scope, Set<Node> excludedNodes, long blocksize, int maxNodesPerRack, List<DatanodeStorageInfo> results, boolean avoidStaleNodes, EnumMap<StorageType, Integer> storageTypes) throws NotEnoughReplicasException { int numOfAvailableNodes = clusterMap.countNumOfAvailableNodes( scope, excludedNodes); StringBuilder builder = null; if (LOG.isDebugEnabled()) { builder = debugLoggingBuilder.get(); builder.setLength(0); builder.append("["); } boolean badTarget = false; DatanodeStorageInfo firstChosen = null; while(numOfReplicas > 0 && numOfAvailableNodes > 0) { DatanodeDescriptor chosenNode = chooseDataNode(scope); if (excludedNodes.add(chosenNode)) { //was not in the excluded list if (LOG.isDebugEnabled()) { builder.append("\nNode ").append(NodeBase.getPath(chosenNode)).append(" ["); } numOfAvailableNodes--; final DatanodeStorageInfo[] storages = DFSUtil.shuffle( chosenNode.getStorageInfos()); int i = 0; boolean search = true; for (Iterator<Map.Entry<StorageType, Integer>> iter = storageTypes .entrySet().iterator(); search && iter.hasNext(); ) { Map.Entry<StorageType, Integer> entry = iter.next(); for (i = 0; i < storages.length; i++) { StorageType type = entry.getKey(); final int newExcludedNodes = addIfIsGoodTarget(storages[i], excludedNodes, blocksize, maxNodesPerRack, considerLoad, results, avoidStaleNodes, type); if (newExcludedNodes >= 0) { numOfReplicas--; if (firstChosen == null) { firstChosen = storages[i]; } numOfAvailableNodes -= newExcludedNodes; int num = entry.getValue(); if (num == 1) { iter.remove(); } else { entry.setValue(num - 1); } search = false; break; } } } if (LOG.isDebugEnabled()) { builder.append("\n]"); } // If no candidate storage was found on this DN then set badTarget. badTarget = (i == storages.length); } } if (numOfReplicas>0) { String detail = enableDebugLogging; if (LOG.isDebugEnabled()) { if (badTarget && builder != null) { detail = builder.toString(); builder.setLength(0); } else { detail = ""; } } throw new NotEnoughReplicasException(detail); } return firstChosen; } /** * Choose a datanode from the given <i>scope</i>. * @return the chosen node, if there is any. */ protected DatanodeDescriptor chooseDataNode(final String scope) { return (DatanodeDescriptor) clusterMap.chooseRandom(scope); } /** * If the given storage is a good target, add it to the result list and * update the set of excluded nodes. * @return -1 if the given is not a good target; * otherwise, return the number of nodes added to excludedNodes set. */ int addIfIsGoodTarget(DatanodeStorageInfo storage, Set<Node> excludedNodes, long blockSize, int maxNodesPerRack, boolean considerLoad, List<DatanodeStorageInfo> results, boolean avoidStaleNodes, StorageType storageType) { if (isGoodTarget(storage, blockSize, maxNodesPerRack, considerLoad, results, avoidStaleNodes, storageType)) { results.add(storage); // add node and related nodes to excludedNode return addToExcludedNodes(storage.getDatanodeDescriptor(), excludedNodes); } else { return -1; } } private static void logNodeIsNotChosen(DatanodeStorageInfo storage, String reason) { if (LOG.isDebugEnabled()) { // build the error message for later use. debugLoggingBuilder.get() .append("\n Storage ").append(storage) .append(" is not chosen since ").append(reason).append("."); } } /** * Determine if a storage is a good target. * * @param storage The target storage * @param blockSize Size of block * @param maxTargetPerRack Maximum number of targets per rack. The value of * this parameter depends on the number of racks in * the cluster and total number of replicas for a block * @param considerLoad whether or not to consider load of the target node * @param results A list containing currently chosen nodes. Used to check if * too many nodes has been chosen in the target rack. * @param avoidStaleNodes Whether or not to avoid choosing stale nodes * @return Return true if <i>node</i> has enough space, * does not have too much load, * and the rack does not have too many nodes. */ private boolean isGoodTarget(DatanodeStorageInfo storage, long blockSize, int maxTargetPerRack, boolean considerLoad, List<DatanodeStorageInfo> results, boolean avoidStaleNodes, StorageType requiredStorageType) { if (storage.getStorageType() != requiredStorageType) { logNodeIsNotChosen(storage, "storage types do not match," + " where the required storage type is " + requiredStorageType); return false; } if (storage.getState() == State.READ_ONLY_SHARED) { logNodeIsNotChosen(storage, "storage is read-only"); return false; } if (storage.getState() == State.FAILED) { logNodeIsNotChosen(storage, "storage has failed"); return false; } DatanodeDescriptor node = storage.getDatanodeDescriptor(); // check if the node is (being) decommissioned if (node.isDecommissionInProgress() || node.isDecommissioned()) { logNodeIsNotChosen(storage, "the node is (being) decommissioned "); return false; } if (avoidStaleNodes) { if (node.isStale(this.staleInterval)) { logNodeIsNotChosen(storage, "the node is stale "); return false; } } final long requiredSize = blockSize * HdfsServerConstants.MIN_BLOCKS_FOR_WRITE; final long scheduledSize = blockSize * node.getBlocksScheduled(storage.getStorageType()); final long remaining = node.getRemaining(storage.getStorageType()); if (requiredSize > remaining - scheduledSize) { logNodeIsNotChosen(storage, "the node does not have enough " + storage.getStorageType() + " space" + " (required=" + requiredSize + ", scheduled=" + scheduledSize + ", remaining=" + remaining + ")"); return false; } // check the communication traffic of the target machine if (considerLoad) { final double maxLoad = 2.0 * stats.getInServiceXceiverAverage(); final int nodeLoad = node.getXceiverCount(); if (nodeLoad > maxLoad) { logNodeIsNotChosen(storage, "the node is too busy (load: " + nodeLoad + " > " + maxLoad + ") "); return false; } } // check if the target rack has chosen too many nodes String rackname = node.getNetworkLocation(); int counter=1; for(DatanodeStorageInfo resultStorage : results) { if (rackname.equals( resultStorage.getDatanodeDescriptor().getNetworkLocation())) { counter++; } } if (counter>maxTargetPerRack) { logNodeIsNotChosen(storage, "the rack has too many chosen nodes "); return false; } return true; } /** * Return a pipeline of nodes. * The pipeline is formed finding a shortest path that * starts from the writer and traverses all <i>nodes</i> * This is basically a traveling salesman problem. */ private DatanodeStorageInfo[] getPipeline(Node writer, DatanodeStorageInfo[] storages) { if (storages.length == 0) { return storages; } synchronized(clusterMap) { int index=0; if (writer == null || !clusterMap.contains(writer)) { writer = storages[0].getDatanodeDescriptor(); } for(; index < storages.length; index++) { DatanodeStorageInfo shortestStorage = storages[index]; int shortestDistance = clusterMap.getDistance(writer, shortestStorage.getDatanodeDescriptor()); int shortestIndex = index; for(int i = index + 1; i < storages.length; i++) { int currentDistance = clusterMap.getDistance(writer, storages[i].getDatanodeDescriptor()); if (shortestDistance>currentDistance) { shortestDistance = currentDistance; shortestStorage = storages[i]; shortestIndex = i; } } //switch position index & shortestIndex if (index != shortestIndex) { storages[shortestIndex] = storages[index]; storages[index] = shortestStorage; } writer = shortestStorage.getDatanodeDescriptor(); } } return storages; } @Override public BlockPlacementStatus verifyBlockPlacement(String srcPath, LocatedBlock lBlk, int numberOfReplicas) { DatanodeInfo[] locs = lBlk.getLocations(); if (locs == null) locs = DatanodeDescriptor.EMPTY_ARRAY; int numRacks = clusterMap.getNumOfRacks(); if(numRacks <= 1) // only one rack return new BlockPlacementStatusDefault( Math.min(numRacks, numberOfReplicas), numRacks); int minRacks = Math.min(2, numberOfReplicas); // 1. Check that all locations are different. // 2. Count locations on different racks. Set<String> racks = new TreeSet<String>(); for (DatanodeInfo dn : locs) racks.add(dn.getNetworkLocation()); return new BlockPlacementStatusDefault(racks.size(), minRacks); } @Override public DatanodeStorageInfo chooseReplicaToDelete(BlockCollection bc, Block block, short replicationFactor, Collection<DatanodeStorageInfo> first, Collection<DatanodeStorageInfo> second, final List<StorageType> excessTypes) { long oldestHeartbeat = monotonicNow() - heartbeatInterval * tolerateHeartbeatMultiplier; DatanodeStorageInfo oldestHeartbeatStorage = null; long minSpace = Long.MAX_VALUE; DatanodeStorageInfo minSpaceStorage = null; // Pick the node with the oldest heartbeat or with the least free space, // if all hearbeats are within the tolerable heartbeat interval for(DatanodeStorageInfo storage : pickupReplicaSet(first, second)) { if (!excessTypes.contains(storage.getStorageType())) { continue; } final DatanodeDescriptor node = storage.getDatanodeDescriptor(); long free = node.getRemaining(); long lastHeartbeat = node.getLastUpdateMonotonic(); if (lastHeartbeat < oldestHeartbeat) { oldestHeartbeat = lastHeartbeat; oldestHeartbeatStorage = storage; } if (minSpace > free) { minSpace = free; minSpaceStorage = storage; } } final DatanodeStorageInfo storage; if (oldestHeartbeatStorage != null) { storage = oldestHeartbeatStorage; } else if (minSpaceStorage != null) { storage = minSpaceStorage; } else { return null; } excessTypes.remove(storage.getStorageType()); return storage; } /** * Pick up replica node set for deleting replica as over-replicated. * First set contains replica nodes on rack with more than one * replica while second set contains remaining replica nodes. * So pick up first set if not empty. If first is empty, then pick second. */ protected Collection<DatanodeStorageInfo> pickupReplicaSet( Collection<DatanodeStorageInfo> first, Collection<DatanodeStorageInfo> second) { return first.isEmpty() ? second : first; } @VisibleForTesting void setPreferLocalNode(boolean prefer) { this.preferLocalNode = prefer; } }
39,443
39.874611
96
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.blockmanagement; import java.util.LinkedList; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.util.LightWeightGSet; /** * BlockInfo class maintains for a given block * the {@link INodeFile} it is part of and datanodes where the replicas of * the block are stored. * BlockInfo class maintains for a given block * the {@link BlockCollection} it is part of and datanodes where the replicas of * the block are stored. */ @InterfaceAudience.Private public abstract class BlockInfo extends Block implements LightWeightGSet.LinkedElement { public static final BlockInfo[] EMPTY_ARRAY = {}; private BlockCollection bc; /** For implementing {@link LightWeightGSet.LinkedElement} interface */ private LightWeightGSet.LinkedElement nextLinkedElement; /** * This array contains triplets of references. For each i-th storage, the * block belongs to triplets[3*i] is the reference to the * {@link DatanodeStorageInfo} and triplets[3*i+1] and triplets[3*i+2] are * references to the previous and the next blocks, respectively, in the list * of blocks belonging to this storage. * * Using previous and next in Object triplets is done instead of a * {@link LinkedList} list to efficiently use memory. With LinkedList the cost * per replica is 42 bytes (LinkedList#Entry object per replica) versus 16 * bytes using the triplets. */ protected Object[] triplets; /** * Construct an entry for blocksmap * @param replication the block's replication factor */ public BlockInfo(short replication) { this.triplets = new Object[3*replication]; this.bc = null; } public BlockInfo(Block blk, short replication) { super(blk); this.triplets = new Object[3*replication]; this.bc = null; } /** * Copy construction. * This is used to convert BlockInfoUnderConstruction * @param from BlockInfo to copy from. */ protected BlockInfo(BlockInfo from) { super(from); this.triplets = new Object[from.triplets.length]; this.bc = from.bc; } public BlockCollection getBlockCollection() { return bc; } public void setBlockCollection(BlockCollection bc) { this.bc = bc; } public boolean isDeleted() { return (bc == null); } public DatanodeDescriptor getDatanode(int index) { DatanodeStorageInfo storage = getStorageInfo(index); return storage == null ? null : storage.getDatanodeDescriptor(); } DatanodeStorageInfo getStorageInfo(int index) { assert this.triplets != null : "BlockInfo is not initialized"; assert index >= 0 && index*3 < triplets.length : "Index is out of bound"; return (DatanodeStorageInfo)triplets[index*3]; } BlockInfo getPrevious(int index) { assert this.triplets != null : "BlockInfo is not initialized"; assert index >= 0 && index*3+1 < triplets.length : "Index is out of bound"; BlockInfo info = (BlockInfo)triplets[index*3+1]; assert info == null || info.getClass().getName().startsWith(BlockInfo.class.getName()) : "BlockInfo is expected at " + index*3; return info; } BlockInfo getNext(int index) { assert this.triplets != null : "BlockInfo is not initialized"; assert index >= 0 && index*3+2 < triplets.length : "Index is out of bound"; BlockInfo info = (BlockInfo)triplets[index*3+2]; assert info == null || info.getClass().getName().startsWith( BlockInfo.class.getName()) : "BlockInfo is expected at " + index*3; return info; } void setStorageInfo(int index, DatanodeStorageInfo storage) { assert this.triplets != null : "BlockInfo is not initialized"; assert index >= 0 && index*3 < triplets.length : "Index is out of bound"; triplets[index*3] = storage; } /** * Return the previous block on the block list for the datanode at * position index. Set the previous block on the list to "to". * * @param index - the datanode index * @param to - block to be set to previous on the list of blocks * @return current previous block on the list of blocks */ BlockInfo setPrevious(int index, BlockInfo to) { assert this.triplets != null : "BlockInfo is not initialized"; assert index >= 0 && index*3+1 < triplets.length : "Index is out of bound"; BlockInfo info = (BlockInfo)triplets[index*3+1]; triplets[index*3+1] = to; return info; } /** * Return the next block on the block list for the datanode at * position index. Set the next block on the list to "to". * * @param index - the datanode index * @param to - block to be set to next on the list of blocks * * @return current next block on the list of blocks */ BlockInfo setNext(int index, BlockInfo to) { assert this.triplets != null : "BlockInfo is not initialized"; assert index >= 0 && index*3+2 < triplets.length : "Index is out of bound"; BlockInfo info = (BlockInfo)triplets[index*3+2]; triplets[index*3+2] = to; return info; } public int getCapacity() { assert this.triplets != null : "BlockInfo is not initialized"; assert triplets.length % 3 == 0 : "Malformed BlockInfo"; return triplets.length / 3; } /** * Count the number of data-nodes the block belongs to. */ public abstract int numNodes(); /** * Add a {@link DatanodeStorageInfo} location for a block. */ abstract boolean addStorage(DatanodeStorageInfo storage); /** * Remove {@link DatanodeStorageInfo} location for a block */ abstract boolean removeStorage(DatanodeStorageInfo storage); /** * Replace the current BlockInfo with the new one in corresponding * DatanodeStorageInfo's linked list */ abstract void replaceBlock(BlockInfo newBlock); /** * Find specified DatanodeStorageInfo. * @return DatanodeStorageInfo or null if not found. */ DatanodeStorageInfo findStorageInfo(DatanodeDescriptor dn) { int len = getCapacity(); for(int idx = 0; idx < len; idx++) { DatanodeStorageInfo cur = getStorageInfo(idx); if(cur == null) break; if(cur.getDatanodeDescriptor() == dn) return cur; } return null; } /** * Find specified DatanodeStorageInfo. * @return index or -1 if not found. */ int findStorageInfo(DatanodeStorageInfo storageInfo) { int len = getCapacity(); for(int idx = 0; idx < len; idx++) { DatanodeStorageInfo cur = getStorageInfo(idx); if (cur == storageInfo) { return idx; } if (cur == null) { break; } } return -1; } /** * Insert this block into the head of the list of blocks * related to the specified DatanodeStorageInfo. * If the head is null then form a new list. * @return current block as the new head of the list. */ BlockInfo listInsert(BlockInfo head, DatanodeStorageInfo storage) { int dnIndex = this.findStorageInfo(storage); assert dnIndex >= 0 : "Data node is not found: current"; assert getPrevious(dnIndex) == null && getNext(dnIndex) == null : "Block is already in the list and cannot be inserted."; this.setPrevious(dnIndex, null); this.setNext(dnIndex, head); if(head != null) head.setPrevious(head.findStorageInfo(storage), this); return this; } /** * Remove this block from the list of blocks * related to the specified DatanodeStorageInfo. * If this block is the head of the list then return the next block as * the new head. * @return the new head of the list or null if the list becomes * empy after deletion. */ BlockInfo listRemove(BlockInfo head, DatanodeStorageInfo storage) { if(head == null) return null; int dnIndex = this.findStorageInfo(storage); if(dnIndex < 0) // this block is not on the data-node list return head; BlockInfo next = this.getNext(dnIndex); BlockInfo prev = this.getPrevious(dnIndex); this.setNext(dnIndex, null); this.setPrevious(dnIndex, null); if(prev != null) prev.setNext(prev.findStorageInfo(storage), next); if(next != null) next.setPrevious(next.findStorageInfo(storage), prev); if(this == head) // removing the head head = next; return head; } /** * Remove this block from the list of blocks related to the specified * DatanodeDescriptor. Insert it into the head of the list of blocks. * * @return the new head of the list. */ public BlockInfo moveBlockToHead(BlockInfo head, DatanodeStorageInfo storage, int curIndex, int headIndex) { if (head == this) { return this; } BlockInfo next = this.setNext(curIndex, head); BlockInfo prev = this.setPrevious(curIndex, null); head.setPrevious(headIndex, this); prev.setNext(prev.findStorageInfo(storage), next); if (next != null) { next.setPrevious(next.findStorageInfo(storage), prev); } return this; } /** * BlockInfo represents a block that is not being constructed. * In order to start modifying the block, the BlockInfo should be converted * to {@link BlockInfoContiguousUnderConstruction}. * @return {@link BlockUCState#COMPLETE} */ public BlockUCState getBlockUCState() { return BlockUCState.COMPLETE; } /** * Is this block complete? * * @return true if the state of the block is {@link BlockUCState#COMPLETE} */ public boolean isComplete() { return getBlockUCState().equals(BlockUCState.COMPLETE); } /** * Convert a complete block to an under construction block. * @return BlockInfoUnderConstruction - an under construction block. */ public BlockInfoContiguousUnderConstruction convertToBlockUnderConstruction( BlockUCState s, DatanodeStorageInfo[] targets) { if(isComplete()) { BlockInfoContiguousUnderConstruction ucBlock = new BlockInfoContiguousUnderConstruction(this, getBlockCollection().getPreferredBlockReplication(), s, targets); ucBlock.setBlockCollection(getBlockCollection()); return ucBlock; } // the block is already under construction BlockInfoContiguousUnderConstruction ucBlock = (BlockInfoContiguousUnderConstruction)this; ucBlock.setBlockUCState(s); ucBlock.setExpectedLocations(targets); ucBlock.setBlockCollection(getBlockCollection()); return ucBlock; } @Override public int hashCode() { // Super implementation is sufficient return super.hashCode(); } @Override public boolean equals(Object obj) { // Sufficient to rely on super's implementation return (this == obj) || super.equals(obj); } @Override public LightWeightGSet.LinkedElement getNext() { return nextLinkedElement; } @Override public void setNext(LightWeightGSet.LinkedElement next) { this.nextLinkedElement = next; } }
11,799
31.686981
80
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.blockmanagement; import java.util.Iterator; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.util.GSet; import org.apache.hadoop.util.LightWeightGSet; import com.google.common.base.Predicate; import com.google.common.collect.Iterables; /** * This class maintains the map from a block to its metadata. * block's metadata currently includes blockCollection it belongs to and * the datanodes that store the block. */ class BlocksMap { private static class StorageIterator implements Iterator<DatanodeStorageInfo> { private final BlockInfo blockInfo; private int nextIdx = 0; StorageIterator(BlockInfo blkInfo) { this.blockInfo = blkInfo; } @Override public boolean hasNext() { return blockInfo != null && nextIdx < blockInfo.getCapacity() && blockInfo.getDatanode(nextIdx) != null; } @Override public DatanodeStorageInfo next() { return blockInfo.getStorageInfo(nextIdx++); } @Override public void remove() { throw new UnsupportedOperationException("Sorry. can't remove."); } } /** Constant {@link LightWeightGSet} capacity. */ private final int capacity; private GSet<Block, BlockInfo> blocks; BlocksMap(int capacity) { // Use 2% of total memory to size the GSet capacity this.capacity = capacity; this.blocks = new LightWeightGSet<Block, BlockInfo>(capacity) { @Override public Iterator<BlockInfo> iterator() { SetIterator iterator = new SetIterator(); /* * Not tracking any modifications to set. As this set will be used * always under FSNameSystem lock, modifications will not cause any * ConcurrentModificationExceptions. But there is a chance of missing * newly added elements during iteration. */ iterator.setTrackModification(false); return iterator; } }; } void close() { clear(); blocks = null; } void clear() { if (blocks != null) { blocks.clear(); } } BlockCollection getBlockCollection(Block b) { BlockInfo info = blocks.get(b); return (info != null) ? info.getBlockCollection() : null; } /** * Add block b belonging to the specified block collection to the map. */ BlockInfo addBlockCollection(BlockInfo b, BlockCollection bc) { BlockInfo info = blocks.get(b); if (info != b) { info = b; blocks.put(info); } info.setBlockCollection(bc); return info; } /** * Remove the block from the block map; * remove it from all data-node lists it belongs to; * and remove all data-node locations associated with the block. */ void removeBlock(Block block) { BlockInfo blockInfo = blocks.remove(block); if (blockInfo == null) return; blockInfo.setBlockCollection(null); for(int idx = blockInfo.numNodes()-1; idx >= 0; idx--) { DatanodeDescriptor dn = blockInfo.getDatanode(idx); dn.removeBlock(blockInfo); // remove from the list and wipe the location } } /** Returns the block object it it exists in the map. */ BlockInfo getStoredBlock(Block b) { return blocks.get(b); } /** * Searches for the block in the BlocksMap and * returns {@link Iterable} of the storages the block belongs to. */ Iterable<DatanodeStorageInfo> getStorages(Block b) { return getStorages(blocks.get(b)); } /** * Searches for the block in the BlocksMap and * returns {@link Iterable} of the storages the block belongs to * <i>that are of the given {@link DatanodeStorage.State state}</i>. * * @param state DatanodeStorage state by which to filter the returned Iterable */ Iterable<DatanodeStorageInfo> getStorages(Block b, final DatanodeStorage.State state) { return Iterables.filter(getStorages(blocks.get(b)), new Predicate<DatanodeStorageInfo>() { @Override public boolean apply(DatanodeStorageInfo storage) { return storage.getState() == state; } }); } /** * For a block that has already been retrieved from the BlocksMap * returns {@link Iterable} of the storages the block belongs to. */ Iterable<DatanodeStorageInfo> getStorages(final BlockInfo storedBlock) { return new Iterable<DatanodeStorageInfo>() { @Override public Iterator<DatanodeStorageInfo> iterator() { return new StorageIterator(storedBlock); } }; } /** counts number of containing nodes. Better than using iterator. */ int numNodes(Block b) { BlockInfo info = blocks.get(b); return info == null ? 0 : info.numNodes(); } /** * Remove data-node reference from the block. * Remove the block from the block map * only if it does not belong to any file and data-nodes. */ boolean removeNode(Block b, DatanodeDescriptor node) { BlockInfo info = blocks.get(b); if (info == null) return false; // remove block from the data-node list and the node from the block info boolean removed = node.removeBlock(info); if (info.getDatanode(0) == null // no datanodes left && info.isDeleted()) { // does not belong to a file blocks.remove(b); // remove block from the map } return removed; } int size() { if (blocks != null) { return blocks.size(); } else { return 0; } } Iterable<BlockInfo> getBlocks() { return blocks; } /** Get the capacity of the HashMap that stores blocks */ int getCapacity() { return capacity; } /** * Replace a block in the block map by a new block. * The new block and the old one have the same key. * @param newBlock - block for replacement * @return new block */ BlockInfo replaceBlock(BlockInfo newBlock) { BlockInfo currentBlock = blocks.get(newBlock); assert currentBlock != null : "the block if not in blocksMap"; // replace block in data-node lists currentBlock.replaceBlock(newBlock); // replace block in the map itself blocks.put(newBlock); return newBlock; } }
6,983
28.974249
94
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.blockmanagement; import java.util.ArrayList; import java.util.BitSet; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Queue; import java.util.Set; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableList; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.server.namenode.CachedBlock; import org.apache.hadoop.hdfs.server.protocol.BlockReportContext; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.hdfs.server.protocol.StorageReport; import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary; import org.apache.hadoop.hdfs.util.EnumCounters; import org.apache.hadoop.hdfs.util.LightWeightHashSet; import org.apache.hadoop.util.IntrusiveCollection; import org.apache.hadoop.util.Time; import com.google.common.annotations.VisibleForTesting; /** * This class extends the DatanodeInfo class with ephemeral information (eg * health, capacity, what blocks are associated with the Datanode) that is * private to the Namenode, ie this class is not exposed to clients. */ @InterfaceAudience.Private @InterfaceStability.Evolving public class DatanodeDescriptor extends DatanodeInfo { public static final Log LOG = LogFactory.getLog(DatanodeDescriptor.class); public static final DatanodeDescriptor[] EMPTY_ARRAY = {}; // Stores status of decommissioning. // If node is not decommissioning, do not use this object for anything. public final DecommissioningStatus decommissioningStatus = new DecommissioningStatus(); private long curBlockReportId = 0; private BitSet curBlockReportRpcsSeen = null; public int updateBlockReportContext(BlockReportContext context) { if (curBlockReportId != context.getReportId()) { curBlockReportId = context.getReportId(); curBlockReportRpcsSeen = new BitSet(context.getTotalRpcs()); } curBlockReportRpcsSeen.set(context.getCurRpc()); return curBlockReportRpcsSeen.cardinality(); } public void clearBlockReportContext() { curBlockReportId = 0; curBlockReportRpcsSeen = null; } /** Block and targets pair */ @InterfaceAudience.Private @InterfaceStability.Evolving public static class BlockTargetPair { public final Block block; public final DatanodeStorageInfo[] targets; BlockTargetPair(Block block, DatanodeStorageInfo[] targets) { this.block = block; this.targets = targets; } } /** A BlockTargetPair queue. */ private static class BlockQueue<E> { private final Queue<E> blockq = new LinkedList<E>(); /** Size of the queue */ synchronized int size() {return blockq.size();} /** Enqueue */ synchronized boolean offer(E e) { return blockq.offer(e); } /** Dequeue */ synchronized List<E> poll(int numBlocks) { if (numBlocks <= 0 || blockq.isEmpty()) { return null; } List<E> results = new ArrayList<>(); for(; !blockq.isEmpty() && numBlocks > 0; numBlocks--) { results.add(blockq.poll()); } return results; } /** * Returns <tt>true</tt> if the queue contains the specified element. */ boolean contains(E e) { return blockq.contains(e); } synchronized void clear() { blockq.clear(); } } private final Map<String, DatanodeStorageInfo> storageMap = new HashMap<>(); /** * A list of CachedBlock objects on this datanode. */ public static class CachedBlocksList extends IntrusiveCollection<CachedBlock> { public enum Type { PENDING_CACHED, CACHED, PENDING_UNCACHED } private final DatanodeDescriptor datanode; private final Type type; CachedBlocksList(DatanodeDescriptor datanode, Type type) { this.datanode = datanode; this.type = type; } public DatanodeDescriptor getDatanode() { return datanode; } public Type getType() { return type; } } /** * The blocks which we want to cache on this DataNode. */ private final CachedBlocksList pendingCached = new CachedBlocksList(this, CachedBlocksList.Type.PENDING_CACHED); /** * The blocks which we know are cached on this datanode. * This list is updated by periodic cache reports. */ private final CachedBlocksList cached = new CachedBlocksList(this, CachedBlocksList.Type.CACHED); /** * The blocks which we want to uncache on this DataNode. */ private final CachedBlocksList pendingUncached = new CachedBlocksList(this, CachedBlocksList.Type.PENDING_UNCACHED); public CachedBlocksList getPendingCached() { return pendingCached; } public CachedBlocksList getCached() { return cached; } public CachedBlocksList getPendingUncached() { return pendingUncached; } /** * The time when the last batch of caching directives was sent, in * monotonic milliseconds. */ private long lastCachingDirectiveSentTimeMs; // isAlive == heartbeats.contains(this) // This is an optimization, because contains takes O(n) time on Arraylist public boolean isAlive = false; public boolean needKeyUpdate = false; // A system administrator can tune the balancer bandwidth parameter // (dfs.balance.bandwidthPerSec) dynamically by calling // "dfsadmin -setBalanacerBandwidth <newbandwidth>", at which point the // following 'bandwidth' variable gets updated with the new value for each // node. Once the heartbeat command is issued to update the value on the // specified datanode, this value will be set back to 0. private long bandwidth; /** A queue of blocks to be replicated by this datanode */ private final BlockQueue<BlockTargetPair> replicateBlocks = new BlockQueue<>(); /** A queue of blocks to be recovered by this datanode */ private final BlockQueue<BlockInfoContiguousUnderConstruction> recoverBlocks = new BlockQueue<BlockInfoContiguousUnderConstruction>(); /** A set of blocks to be invalidated by this datanode */ private final LightWeightHashSet<Block> invalidateBlocks = new LightWeightHashSet<>(); /* Variables for maintaining number of blocks scheduled to be written to * this storage. This count is approximate and might be slightly bigger * in case of errors (e.g. datanode does not report if an error occurs * while writing the block). */ private EnumCounters<StorageType> currApproxBlocksScheduled = new EnumCounters<>(StorageType.class); private EnumCounters<StorageType> prevApproxBlocksScheduled = new EnumCounters<>(StorageType.class); private long lastBlocksScheduledRollTime = 0; private static final int BLOCKS_SCHEDULED_ROLL_INTERVAL = 600*1000; //10min private int volumeFailures = 0; private VolumeFailureSummary volumeFailureSummary = null; /** * When set to true, the node is not in include list and is not allowed * to communicate with the namenode */ private boolean disallowed = false; // The number of replication work pending before targets are determined private int PendingReplicationWithoutTargets = 0; // HB processing can use it to tell if it is the first HB since DN restarted private boolean heartbeatedSinceRegistration = false; /** * DatanodeDescriptor constructor * @param nodeID id of the data node */ public DatanodeDescriptor(DatanodeID nodeID) { super(nodeID); updateHeartbeatState(StorageReport.EMPTY_ARRAY, 0L, 0L, 0, 0, null); } /** * DatanodeDescriptor constructor * @param nodeID id of the data node * @param networkLocation location of the data node in network */ public DatanodeDescriptor(DatanodeID nodeID, String networkLocation) { super(nodeID, networkLocation); updateHeartbeatState(StorageReport.EMPTY_ARRAY, 0L, 0L, 0, 0, null); } @VisibleForTesting public DatanodeStorageInfo getStorageInfo(String storageID) { synchronized (storageMap) { return storageMap.get(storageID); } } DatanodeStorageInfo[] getStorageInfos() { synchronized (storageMap) { final Collection<DatanodeStorageInfo> storages = storageMap.values(); return storages.toArray(new DatanodeStorageInfo[storages.size()]); } } public StorageReport[] getStorageReports() { final DatanodeStorageInfo[] infos = getStorageInfos(); final StorageReport[] reports = new StorageReport[infos.length]; for(int i = 0; i < infos.length; i++) { reports[i] = infos[i].toStorageReport(); } return reports; } boolean hasStaleStorages() { synchronized (storageMap) { for (DatanodeStorageInfo storage : storageMap.values()) { if (storage.areBlockContentsStale()) { return true; } } return false; } } static final private List<DatanodeStorageInfo> EMPTY_STORAGE_INFO_LIST = ImmutableList.of(); List<DatanodeStorageInfo> removeZombieStorages() { List<DatanodeStorageInfo> zombies = null; synchronized (storageMap) { Iterator<Map.Entry<String, DatanodeStorageInfo>> iter = storageMap.entrySet().iterator(); while (iter.hasNext()) { Map.Entry<String, DatanodeStorageInfo> entry = iter.next(); DatanodeStorageInfo storageInfo = entry.getValue(); if (storageInfo.getLastBlockReportId() != curBlockReportId) { LOG.info(storageInfo.getStorageID() + " had lastBlockReportId 0x" + Long.toHexString(storageInfo.getLastBlockReportId()) + ", but curBlockReportId = 0x" + Long.toHexString(curBlockReportId)); iter.remove(); if (zombies == null) { zombies = new LinkedList<>(); } zombies.add(storageInfo); } storageInfo.setLastBlockReportId(0); } } return zombies == null ? EMPTY_STORAGE_INFO_LIST : zombies; } /** * Remove block from the list of blocks belonging to the data-node. Remove * data-node from the block. */ boolean removeBlock(BlockInfo b) { final DatanodeStorageInfo s = b.findStorageInfo(this); // if block exists on this datanode if (s != null) { return s.removeBlock(b); } return false; } /** * Remove block from the list of blocks belonging to the data-node. Remove * data-node from the block. */ boolean removeBlock(String storageID, BlockInfo b) { DatanodeStorageInfo s = getStorageInfo(storageID); return s != null && s.removeBlock(b); } public void resetBlocks() { setCapacity(0); setRemaining(0); setBlockPoolUsed(0); setDfsUsed(0); setXceiverCount(0); this.invalidateBlocks.clear(); this.volumeFailures = 0; // pendingCached, cached, and pendingUncached are protected by the // FSN lock. this.pendingCached.clear(); this.cached.clear(); this.pendingUncached.clear(); } public void clearBlockQueues() { synchronized (invalidateBlocks) { this.invalidateBlocks.clear(); this.recoverBlocks.clear(); this.replicateBlocks.clear(); } // pendingCached, cached, and pendingUncached are protected by the // FSN lock. this.pendingCached.clear(); this.cached.clear(); this.pendingUncached.clear(); } public int numBlocks() { int blocks = 0; for (DatanodeStorageInfo entry : getStorageInfos()) { blocks += entry.numBlocks(); } return blocks; } /** * Updates stats from datanode heartbeat. */ public void updateHeartbeat(StorageReport[] reports, long cacheCapacity, long cacheUsed, int xceiverCount, int volFailures, VolumeFailureSummary volumeFailureSummary) { updateHeartbeatState(reports, cacheCapacity, cacheUsed, xceiverCount, volFailures, volumeFailureSummary); heartbeatedSinceRegistration = true; } /** * process datanode heartbeat or stats initialization. */ public void updateHeartbeatState(StorageReport[] reports, long cacheCapacity, long cacheUsed, int xceiverCount, int volFailures, VolumeFailureSummary volumeFailureSummary) { long totalCapacity = 0; long totalRemaining = 0; long totalBlockPoolUsed = 0; long totalDfsUsed = 0; Set<DatanodeStorageInfo> failedStorageInfos = null; // Decide if we should check for any missing StorageReport and mark it as // failed. There are different scenarios. // 1. When DN is running, a storage failed. Given the current DN // implementation doesn't add recovered storage back to its storage list // until DN restart, we can assume volFailures won't decrease // during the current DN registration session. // When volumeFailures == this.volumeFailures, it implies there is no // state change. No need to check for failed storage. This is an // optimization. Recent versions of the DataNode report a // VolumeFailureSummary containing the date/time of the last volume // failure. If that's available, then we check that instead for greater // accuracy. // 2. After DN restarts, volFailures might not increase and it is possible // we still have new failed storage. For example, admins reduce // available storages in configuration. Another corner case // is the failed volumes might change after restart; a) there // is one good storage A, one restored good storage B, so there is // one element in storageReports and that is A. b) A failed. c) Before // DN sends HB to NN to indicate A has failed, DN restarts. d) After DN // restarts, storageReports has one element which is B. final boolean checkFailedStorages; if (volumeFailureSummary != null && this.volumeFailureSummary != null) { checkFailedStorages = volumeFailureSummary.getLastVolumeFailureDate() > this.volumeFailureSummary.getLastVolumeFailureDate(); } else { checkFailedStorages = (volFailures > this.volumeFailures) || !heartbeatedSinceRegistration; } if (checkFailedStorages) { LOG.info("Number of failed storage changes from " + this.volumeFailures + " to " + volFailures); synchronized (storageMap) { failedStorageInfos = new HashSet<>(storageMap.values()); } } setCacheCapacity(cacheCapacity); setCacheUsed(cacheUsed); setXceiverCount(xceiverCount); setLastUpdate(Time.now()); setLastUpdateMonotonic(Time.monotonicNow()); this.volumeFailures = volFailures; this.volumeFailureSummary = volumeFailureSummary; for (StorageReport report : reports) { DatanodeStorageInfo storage = updateStorage(report.getStorage()); if (checkFailedStorages) { failedStorageInfos.remove(storage); } storage.receivedHeartbeat(report); totalCapacity += report.getCapacity(); totalRemaining += report.getRemaining(); totalBlockPoolUsed += report.getBlockPoolUsed(); totalDfsUsed += report.getDfsUsed(); } rollBlocksScheduled(getLastUpdateMonotonic()); // Update total metrics for the node. setCapacity(totalCapacity); setRemaining(totalRemaining); setBlockPoolUsed(totalBlockPoolUsed); setDfsUsed(totalDfsUsed); if (checkFailedStorages) { updateFailedStorage(failedStorageInfos); } long storageMapSize; synchronized (storageMap) { storageMapSize = storageMap.size(); } if (storageMapSize != reports.length) { pruneStorageMap(reports); } } /** * Remove stale storages from storageMap. We must not remove any storages * as long as they have associated block replicas. */ private void pruneStorageMap(final StorageReport[] reports) { synchronized (storageMap) { if (LOG.isDebugEnabled()) { LOG.debug("Number of storages reported in heartbeat=" + reports.length + "; Number of storages in storageMap=" + storageMap.size()); } HashMap<String, DatanodeStorageInfo> excessStorages; // Init excessStorages with all known storages. excessStorages = new HashMap<>(storageMap); // Remove storages that the DN reported in the heartbeat. for (final StorageReport report : reports) { excessStorages.remove(report.getStorage().getStorageID()); } // For each remaining storage, remove it if there are no associated // blocks. for (final DatanodeStorageInfo storageInfo : excessStorages.values()) { if (storageInfo.numBlocks() == 0) { storageMap.remove(storageInfo.getStorageID()); LOG.info("Removed storage " + storageInfo + " from DataNode" + this); } else if (LOG.isDebugEnabled()) { // This can occur until all block reports are received. LOG.debug("Deferring removal of stale storage " + storageInfo + " with " + storageInfo.numBlocks() + " blocks"); } } } } private void updateFailedStorage( Set<DatanodeStorageInfo> failedStorageInfos) { for (DatanodeStorageInfo storageInfo : failedStorageInfos) { if (storageInfo.getState() != DatanodeStorage.State.FAILED) { LOG.info(storageInfo + " failed."); storageInfo.setState(DatanodeStorage.State.FAILED); } } } private static class BlockIterator implements Iterator<BlockInfo> { private int index = 0; private final List<Iterator<BlockInfo>> iterators; private BlockIterator(final DatanodeStorageInfo... storages) { List<Iterator<BlockInfo>> iterators = new ArrayList<>(); for (DatanodeStorageInfo e : storages) { iterators.add(e.getBlockIterator()); } this.iterators = Collections.unmodifiableList(iterators); } @Override public boolean hasNext() { update(); return !iterators.isEmpty() && iterators.get(index).hasNext(); } @Override public BlockInfo next() { update(); return iterators.get(index).next(); } @Override public void remove() { throw new UnsupportedOperationException("Remove unsupported."); } private void update() { while(index < iterators.size() - 1 && !iterators.get(index).hasNext()) { index++; } } } Iterator<BlockInfo> getBlockIterator() { return new BlockIterator(getStorageInfos()); } Iterator<BlockInfo> getBlockIterator(final String storageID) { return new BlockIterator(getStorageInfo(storageID)); } void incrementPendingReplicationWithoutTargets() { PendingReplicationWithoutTargets++; } void decrementPendingReplicationWithoutTargets() { PendingReplicationWithoutTargets--; } /** * Store block replication work. */ void addBlockToBeReplicated(Block block, DatanodeStorageInfo[] targets) { assert(block != null && targets != null && targets.length > 0); replicateBlocks.offer(new BlockTargetPair(block, targets)); } /** * Store block recovery work. */ void addBlockToBeRecovered(BlockInfoContiguousUnderConstruction block) { if(recoverBlocks.contains(block)) { // this prevents adding the same block twice to the recovery queue BlockManager.LOG.info(block + " is already in the recovery queue"); return; } recoverBlocks.offer(block); } /** * Store block invalidation work. */ void addBlocksToBeInvalidated(List<Block> blocklist) { assert(blocklist != null && blocklist.size() > 0); synchronized (invalidateBlocks) { for(Block blk : blocklist) { invalidateBlocks.add(blk); } } } /** * The number of work items that are pending to be replicated */ int getNumberOfBlocksToBeReplicated() { return PendingReplicationWithoutTargets + replicateBlocks.size(); } /** * The number of block invalidation items that are pending to * be sent to the datanode */ int getNumberOfBlocksToBeInvalidated() { synchronized (invalidateBlocks) { return invalidateBlocks.size(); } } public List<BlockTargetPair> getReplicationCommand(int maxTransfers) { return replicateBlocks.poll(maxTransfers); } public BlockInfoContiguousUnderConstruction[] getLeaseRecoveryCommand(int maxTransfers) { List<BlockInfoContiguousUnderConstruction> blocks = recoverBlocks.poll(maxTransfers); if(blocks == null) return null; return blocks.toArray(new BlockInfoContiguousUnderConstruction[blocks.size()]); } /** * Remove the specified number of blocks to be invalidated */ public Block[] getInvalidateBlocks(int maxblocks) { synchronized (invalidateBlocks) { Block[] deleteList = invalidateBlocks.pollToArray(new Block[Math.min( invalidateBlocks.size(), maxblocks)]); return deleteList.length == 0 ? null : deleteList; } } /** * @return Approximate number of blocks currently scheduled to be written */ public long getRemaining(StorageType t) { long remaining = 0; for(DatanodeStorageInfo s : getStorageInfos()) { if (s.getStorageType() == t) { remaining += s.getRemaining(); } } return remaining; } /** * @return Approximate number of blocks currently scheduled to be written * to the given storage type of this datanode. */ public int getBlocksScheduled(StorageType t) { return (int)(currApproxBlocksScheduled.get(t) + prevApproxBlocksScheduled.get(t)); } /** * @return Approximate number of blocks currently scheduled to be written * to this datanode. */ public int getBlocksScheduled() { return (int)(currApproxBlocksScheduled.sum() + prevApproxBlocksScheduled.sum()); } /** Increment the number of blocks scheduled. */ void incrementBlocksScheduled(StorageType t) { currApproxBlocksScheduled.add(t, 1);; } /** Decrement the number of blocks scheduled. */ void decrementBlocksScheduled(StorageType t) { if (prevApproxBlocksScheduled.get(t) > 0) { prevApproxBlocksScheduled.subtract(t, 1); } else if (currApproxBlocksScheduled.get(t) > 0) { currApproxBlocksScheduled.subtract(t, 1); } // its ok if both counters are zero. } /** Adjusts curr and prev number of blocks scheduled every few minutes. */ private void rollBlocksScheduled(long now) { if (now - lastBlocksScheduledRollTime > BLOCKS_SCHEDULED_ROLL_INTERVAL) { prevApproxBlocksScheduled.set(currApproxBlocksScheduled); currApproxBlocksScheduled.reset(); lastBlocksScheduledRollTime = now; } } @Override public int hashCode() { // Super implementation is sufficient return super.hashCode(); } @Override public boolean equals(Object obj) { // Sufficient to use super equality as datanodes are uniquely identified // by DatanodeID return (this == obj) || super.equals(obj); } /** Decommissioning status */ public class DecommissioningStatus { private int underReplicatedBlocks; private int decommissionOnlyReplicas; private int underReplicatedInOpenFiles; private long startTime; synchronized void set(int underRep, int onlyRep, int underConstruction) { if (isDecommissionInProgress() == false) { return; } underReplicatedBlocks = underRep; decommissionOnlyReplicas = onlyRep; underReplicatedInOpenFiles = underConstruction; } /** @return the number of under-replicated blocks */ public synchronized int getUnderReplicatedBlocks() { if (isDecommissionInProgress() == false) { return 0; } return underReplicatedBlocks; } /** @return the number of decommission-only replicas */ public synchronized int getDecommissionOnlyReplicas() { if (isDecommissionInProgress() == false) { return 0; } return decommissionOnlyReplicas; } /** @return the number of under-replicated blocks in open files */ public synchronized int getUnderReplicatedInOpenFiles() { if (isDecommissionInProgress() == false) { return 0; } return underReplicatedInOpenFiles; } /** Set start time */ public synchronized void setStartTime(long time) { startTime = time; } /** @return start time */ public synchronized long getStartTime() { if (isDecommissionInProgress() == false) { return 0; } return startTime; } } // End of class DecommissioningStatus /** * Set the flag to indicate if this datanode is disallowed from communicating * with the namenode. */ public void setDisallowed(boolean flag) { disallowed = flag; } /** Is the datanode disallowed from communicating with the namenode? */ public boolean isDisallowed() { return disallowed; } /** * @return number of failed volumes in the datanode. */ public int getVolumeFailures() { return volumeFailures; } /** * Returns info about volume failures. * * @return info about volume failures, possibly null */ public VolumeFailureSummary getVolumeFailureSummary() { return volumeFailureSummary; } /** * @param nodeReg DatanodeID to update registration for. */ @Override public void updateRegInfo(DatanodeID nodeReg) { super.updateRegInfo(nodeReg); // must re-process IBR after re-registration for(DatanodeStorageInfo storage : getStorageInfos()) { storage.setBlockReportCount(0); } heartbeatedSinceRegistration = false; } /** * @return balancer bandwidth in bytes per second for this datanode */ public long getBalancerBandwidth() { return this.bandwidth; } /** * @param bandwidth balancer bandwidth in bytes per second for this datanode */ public void setBalancerBandwidth(long bandwidth) { this.bandwidth = bandwidth; } @Override public String dumpDatanode() { StringBuilder sb = new StringBuilder(super.dumpDatanode()); int repl = replicateBlocks.size(); if (repl > 0) { sb.append(" ").append(repl).append(" blocks to be replicated;"); } int inval = invalidateBlocks.size(); if (inval > 0) { sb.append(" ").append(inval).append(" blocks to be invalidated;"); } int recover = recoverBlocks.size(); if (recover > 0) { sb.append(" ").append(recover).append(" blocks to be recovered;"); } return sb.toString(); } DatanodeStorageInfo updateStorage(DatanodeStorage s) { synchronized (storageMap) { DatanodeStorageInfo storage = storageMap.get(s.getStorageID()); if (storage == null) { LOG.info("Adding new storage ID " + s.getStorageID() + " for DN " + getXferAddr()); storage = new DatanodeStorageInfo(this, s); storageMap.put(s.getStorageID(), storage); } else if (storage.getState() != s.getState() || storage.getStorageType() != s.getStorageType()) { // For backwards compatibility, make sure that the type and // state are updated. Some reports from older datanodes do // not include these fields so we may have assumed defaults. storage.updateFromStorage(s); storageMap.put(storage.getStorageID(), storage); } return storage; } } /** * @return The time at which we last sent caching directives to this * DataNode, in monotonic milliseconds. */ public long getLastCachingDirectiveSentTimeMs() { return this.lastCachingDirectiveSentTimeMs; } /** * @param time The time at which we last sent caching directives to this * DataNode, in monotonic milliseconds. */ public void setLastCachingDirectiveSentTimeMs(long time) { this.lastCachingDirectiveSentTimeMs = time; } /** * checks whether atleast first block report has been received * @return */ public boolean checkBlockReportReceived() { if(this.getStorageInfos().length == 0) { return false; } for(DatanodeStorageInfo storageInfo: this.getStorageInfos()) { if(storageInfo.getBlockReportCount() == 0 ) return false; } return true; } }
29,503
31.421978
91
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockReportLeaseManager.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.blockmanagement; import com.google.common.base.Preconditions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.util.Time; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.HashMap; import java.util.concurrent.ThreadLocalRandom; /** * The BlockReportLeaseManager manages block report leases.<p/> * * DataNodes request BR leases from the NameNode by sending a heartbeat with * the requestBlockReportLease field set. The NameNode may choose to respond * with a non-zero lease ID. If so, that DataNode can send a block report with * the given lease ID for the next few minutes. The NameNode will accept * these full block reports.<p/> * * BR leases limit the number of incoming full block reports to the NameNode * at any given time. For compatibility reasons, the NN will always accept * block reports sent with a lease ID of 0 and queue them for processing * immediately. Full block reports which were manually triggered will also * have a lease ID of 0, bypassing the rate-limiting.<p/> * * Block report leases expire after a certain amount of time. This mechanism * is in place so that a DN which dies while holding a lease does not * permanently decrease the number of concurrent block reports which the NN is * willing to accept.<p/> * * When considering which DNs to grant a BR lease, the NameNode gives priority * to the DNs which have gone the longest without sending a full block * report.<p/> */ class BlockReportLeaseManager { static final Logger LOG = LoggerFactory.getLogger(BlockReportLeaseManager.class); private static class NodeData { /** * The UUID of the datanode. */ final String datanodeUuid; /** * The lease ID, or 0 if there is no lease. */ long leaseId; /** * The time when the lease was issued, or 0 if there is no lease. */ long leaseTimeMs; /** * Previous element in the list. */ NodeData prev; /** * Next element in the list. */ NodeData next; static NodeData ListHead(String name) { NodeData node = new NodeData(name); node.next = node; node.prev = node; return node; } NodeData(String datanodeUuid) { this.datanodeUuid = datanodeUuid; } void removeSelf() { if (this.prev != null) { this.prev.next = this.next; } if (this.next != null) { this.next.prev = this.prev; } this.next = null; this.prev = null; } void addToEnd(NodeData node) { Preconditions.checkState(node.next == null); Preconditions.checkState(node.prev == null); node.prev = this.prev; node.next = this; this.prev.next = node; this.prev = node; } void addToBeginning(NodeData node) { Preconditions.checkState(node.next == null); Preconditions.checkState(node.prev == null); node.next = this.next; node.prev = this; this.next.prev = node; this.next = node; } } /** * List of datanodes which don't currently have block report leases. */ private final NodeData deferredHead = NodeData.ListHead("deferredHead"); /** * List of datanodes which currently have block report leases. */ private final NodeData pendingHead = NodeData.ListHead("pendingHead"); /** * Maps datanode UUIDs to NodeData. */ private final HashMap<String, NodeData> nodes = new HashMap<>(); /** * The current length of the pending list. */ private int numPending = 0; /** * The maximum number of leases to hand out at any given time. */ private final int maxPending; /** * The number of milliseconds after which a lease will expire. */ private final long leaseExpiryMs; /** * The next ID we will use for a block report lease. */ private long nextId = ThreadLocalRandom.current().nextLong(); BlockReportLeaseManager(Configuration conf) { this(conf.getInt( DFSConfigKeys.DFS_NAMENODE_MAX_FULL_BLOCK_REPORT_LEASES, DFSConfigKeys.DFS_NAMENODE_MAX_FULL_BLOCK_REPORT_LEASES_DEFAULT), conf.getLong( DFSConfigKeys.DFS_NAMENODE_FULL_BLOCK_REPORT_LEASE_LENGTH_MS, DFSConfigKeys.DFS_NAMENODE_FULL_BLOCK_REPORT_LEASE_LENGTH_MS_DEFAULT)); } BlockReportLeaseManager(int maxPending, long leaseExpiryMs) { Preconditions.checkArgument(maxPending >= 1, "Cannot set the maximum number of block report leases to a " + "value less than 1."); this.maxPending = maxPending; Preconditions.checkArgument(leaseExpiryMs >= 1, "Cannot set full block report lease expiry period to a value " + "less than 1."); this.leaseExpiryMs = leaseExpiryMs; } /** * Get the next block report lease ID. Any number is valid except 0. */ private synchronized long getNextId() { long id; do { id = nextId++; } while (id == 0); return id; } public synchronized void register(DatanodeDescriptor dn) { registerNode(dn); } private synchronized NodeData registerNode(DatanodeDescriptor dn) { if (nodes.containsKey(dn.getDatanodeUuid())) { LOG.info("Can't register DN {} because it is already registered.", dn.getDatanodeUuid()); return null; } NodeData node = new NodeData(dn.getDatanodeUuid()); deferredHead.addToBeginning(node); nodes.put(dn.getDatanodeUuid(), node); LOG.info("Registered DN {} ({}).", dn.getDatanodeUuid(), dn.getXferAddr()); return node; } private synchronized void remove(NodeData node) { if (node.leaseId != 0) { numPending--; node.leaseId = 0; node.leaseTimeMs = 0; } node.removeSelf(); } public synchronized void unregister(DatanodeDescriptor dn) { NodeData node = nodes.remove(dn.getDatanodeUuid()); if (node == null) { LOG.info("Can't unregister DN {} because it is not currently " + "registered.", dn.getDatanodeUuid()); return; } remove(node); } public synchronized long requestLease(DatanodeDescriptor dn) { NodeData node = nodes.get(dn.getDatanodeUuid()); if (node == null) { LOG.warn("DN {} ({}) requested a lease even though it wasn't yet " + "registered. Registering now.", dn.getDatanodeUuid(), dn.getXferAddr()); node = registerNode(dn); } if (node.leaseId != 0) { // The DataNode wants a new lease, even though it already has one. // This can happen if the DataNode is restarted in between requesting // a lease and using it. LOG.debug("Removing existing BR lease 0x{} for DN {} in order to " + "issue a new one.", Long.toHexString(node.leaseId), dn.getDatanodeUuid()); } remove(node); long monotonicNowMs = Time.monotonicNow(); pruneExpiredPending(monotonicNowMs); if (numPending >= maxPending) { if (LOG.isDebugEnabled()) { StringBuilder allLeases = new StringBuilder(); String prefix = ""; for (NodeData cur = pendingHead.next; cur != pendingHead; cur = cur.next) { allLeases.append(prefix).append(cur.datanodeUuid); prefix = ", "; } LOG.debug("Can't create a new BR lease for DN {}, because " + "numPending equals maxPending at {}. Current leases: {}", dn.getDatanodeUuid(), numPending, allLeases.toString()); } return 0; } numPending++; node.leaseId = getNextId(); node.leaseTimeMs = monotonicNowMs; pendingHead.addToEnd(node); if (LOG.isDebugEnabled()) { LOG.debug("Created a new BR lease 0x{} for DN {}. numPending = {}", Long.toHexString(node.leaseId), dn.getDatanodeUuid(), numPending); } return node.leaseId; } private synchronized boolean pruneIfExpired(long monotonicNowMs, NodeData node) { if (monotonicNowMs < node.leaseTimeMs + leaseExpiryMs) { return false; } LOG.info("Removing expired block report lease 0x{} for DN {}.", Long.toHexString(node.leaseId), node.datanodeUuid); Preconditions.checkState(node.leaseId != 0); remove(node); deferredHead.addToBeginning(node); return true; } private synchronized void pruneExpiredPending(long monotonicNowMs) { NodeData cur = pendingHead.next; while (cur != pendingHead) { NodeData next = cur.next; if (!pruneIfExpired(monotonicNowMs, cur)) { return; } cur = next; } LOG.trace("No entries remaining in the pending list."); } public synchronized boolean checkLease(DatanodeDescriptor dn, long monotonicNowMs, long id) { if (id == 0) { LOG.debug("Datanode {} is using BR lease id 0x0 to bypass " + "rate-limiting.", dn.getDatanodeUuid()); return true; } NodeData node = nodes.get(dn.getDatanodeUuid()); if (node == null) { LOG.info("BR lease 0x{} is not valid for unknown datanode {}", Long.toHexString(id), dn.getDatanodeUuid()); return false; } if (node.leaseId == 0) { LOG.warn("BR lease 0x{} is not valid for DN {}, because the DN " + "is not in the pending set.", Long.toHexString(id), dn.getDatanodeUuid()); return false; } if (pruneIfExpired(monotonicNowMs, node)) { LOG.warn("BR lease 0x{} is not valid for DN {}, because the lease " + "has expired.", Long.toHexString(id), dn.getDatanodeUuid()); return false; } if (id != node.leaseId) { LOG.warn("BR lease 0x{} is not valid for DN {}. Expected BR lease 0x{}.", Long.toHexString(id), dn.getDatanodeUuid(), Long.toHexString(node.leaseId)); return false; } if (LOG.isTraceEnabled()) { LOG.trace("BR lease 0x{} is valid for DN {}.", Long.toHexString(id), dn.getDatanodeUuid()); } return true; } public synchronized long removeLease(DatanodeDescriptor dn) { NodeData node = nodes.get(dn.getDatanodeUuid()); if (node == null) { LOG.info("Can't remove lease for unknown datanode {}", dn.getDatanodeUuid()); return 0; } long id = node.leaseId; if (id == 0) { LOG.debug("DN {} has no lease to remove.", dn.getDatanodeUuid()); return 0; } remove(node); deferredHead.addToEnd(node); if (LOG.isTraceEnabled()) { LOG.trace("Removed BR lease 0x{} for DN {}. numPending = {}", Long.toHexString(id), dn.getDatanodeUuid(), numPending); } return id; } }
11,555
31.552113
81
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.blockmanagement; import java.util.AbstractList; import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Queue; import java.util.TreeMap; import java.util.concurrent.ExecutionException; import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.server.namenode.Namesystem; import org.apache.hadoop.hdfs.util.CyclicIteration; import org.apache.hadoop.util.ChunkedArrayList; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import static com.google.common.base.Preconditions.checkArgument; import static org.apache.hadoop.util.Time.monotonicNow; /** * Manages datanode decommissioning. A background monitor thread * periodically checks the status of datanodes that are in-progress of * decommissioning. * <p/> * A datanode can be decommissioned in a few situations: * <ul> * <li>If a DN is dead, it is decommissioned immediately.</li> * <li>If a DN is alive, it is decommissioned after all of its blocks * are sufficiently replicated. Merely under-replicated blocks do not * block decommissioning as long as they are above a replication * threshold.</li> * </ul> * In the second case, the datanode transitions to a * decommission-in-progress state and is tracked by the monitor thread. The * monitor periodically scans through the list of insufficiently replicated * blocks on these datanodes to * determine if they can be decommissioned. The monitor also prunes this list * as blocks become replicated, so monitor scans will become more efficient * over time. * <p/> * Decommission-in-progress nodes that become dead do not progress to * decommissioned until they become live again. This prevents potential * durability loss for singly-replicated blocks (see HDFS-6791). * <p/> * This class depends on the FSNamesystem lock for synchronization. */ @InterfaceAudience.Private public class DecommissionManager { private static final Logger LOG = LoggerFactory.getLogger(DecommissionManager .class); private final Namesystem namesystem; private final BlockManager blockManager; private final HeartbeatManager hbManager; private final ScheduledExecutorService executor; /** * Map containing the decommission-in-progress datanodes that are being * tracked so they can be be marked as decommissioned. * <p/> * This holds a set of references to the under-replicated blocks on the DN at * the time the DN is added to the map, i.e. the blocks that are preventing * the node from being marked as decommissioned. During a monitor tick, this * list is pruned as blocks becomes replicated. * <p/> * Note also that the reference to the list of under-replicated blocks * will be null on initial add * <p/> * However, this map can become out-of-date since it is not updated by block * reports or other events. Before being finally marking as decommissioned, * another check is done with the actual block map. */ private final TreeMap<DatanodeDescriptor, AbstractList<BlockInfo>> decomNodeBlocks; /** * Tracking a node in decomNodeBlocks consumes additional memory. To limit * the impact on NN memory consumption, we limit the number of nodes in * decomNodeBlocks. Additional nodes wait in pendingNodes. */ private final Queue<DatanodeDescriptor> pendingNodes; private Monitor monitor = null; DecommissionManager(final Namesystem namesystem, final BlockManager blockManager, final HeartbeatManager hbManager) { this.namesystem = namesystem; this.blockManager = blockManager; this.hbManager = hbManager; executor = Executors.newScheduledThreadPool(1, new ThreadFactoryBuilder().setNameFormat("DecommissionMonitor-%d") .setDaemon(true).build()); decomNodeBlocks = new TreeMap<>(); pendingNodes = new LinkedList<>(); } /** * Start the decommission monitor thread. * @param conf */ void activate(Configuration conf) { final int intervalSecs = conf.getInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_DEFAULT); checkArgument(intervalSecs >= 0, "Cannot set a negative " + "value for " + DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY); // By default, the new configuration key overrides the deprecated one. // No # node limit is set. int blocksPerInterval = conf.getInt( DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_KEY, DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_DEFAULT); int nodesPerInterval = Integer.MAX_VALUE; // If the expected key isn't present and the deprecated one is, // use the deprecated one into the new one. This overrides the // default. // // Also print a deprecation warning. final String deprecatedKey = "dfs.namenode.decommission.nodes.per.interval"; final String strNodes = conf.get(deprecatedKey); if (strNodes != null) { nodesPerInterval = Integer.parseInt(strNodes); blocksPerInterval = Integer.MAX_VALUE; LOG.warn("Using deprecated configuration key {} value of {}.", deprecatedKey, nodesPerInterval); LOG.warn("Please update your configuration to use {} instead.", DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_KEY); } checkArgument(blocksPerInterval > 0, "Must set a positive value for " + DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_KEY); final int maxConcurrentTrackedNodes = conf.getInt( DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_MAX_CONCURRENT_TRACKED_NODES, DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_MAX_CONCURRENT_TRACKED_NODES_DEFAULT); checkArgument(maxConcurrentTrackedNodes >= 0, "Cannot set a negative " + "value for " + DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_MAX_CONCURRENT_TRACKED_NODES); monitor = new Monitor(blocksPerInterval, nodesPerInterval, maxConcurrentTrackedNodes); executor.scheduleAtFixedRate(monitor, intervalSecs, intervalSecs, TimeUnit.SECONDS); LOG.debug("Activating DecommissionManager with interval {} seconds, " + "{} max blocks per interval, {} max nodes per interval, " + "{} max concurrently tracked nodes.", intervalSecs, blocksPerInterval, nodesPerInterval, maxConcurrentTrackedNodes); } /** * Stop the decommission monitor thread, waiting briefly for it to terminate. */ void close() { executor.shutdownNow(); try { executor.awaitTermination(3000, TimeUnit.MILLISECONDS); } catch (InterruptedException e) {} } /** * Start decommissioning the specified datanode. * @param node */ @VisibleForTesting public void startDecommission(DatanodeDescriptor node) { if (!node.isDecommissionInProgress() && !node.isDecommissioned()) { // Update DN stats maintained by HeartbeatManager hbManager.startDecommission(node); // hbManager.startDecommission will set dead node to decommissioned. if (node.isDecommissionInProgress()) { for (DatanodeStorageInfo storage : node.getStorageInfos()) { LOG.info("Starting decommission of {} {} with {} blocks", node, storage, storage.numBlocks()); } node.decommissioningStatus.setStartTime(monotonicNow()); pendingNodes.add(node); } } else { LOG.trace("startDecommission: Node {} in {}, nothing to do." + node, node.getAdminState()); } } /** * Stop decommissioning the specified datanode. * @param node */ @VisibleForTesting public void stopDecommission(DatanodeDescriptor node) { if (node.isDecommissionInProgress() || node.isDecommissioned()) { // Update DN stats maintained by HeartbeatManager hbManager.stopDecommission(node); // Over-replicated blocks will be detected and processed when // the dead node comes back and send in its full block report. if (node.isAlive) { blockManager.processOverReplicatedBlocksOnReCommission(node); } // Remove from tracking in DecommissionManager pendingNodes.remove(node); decomNodeBlocks.remove(node); } else { LOG.trace("stopDecommission: Node {} in {}, nothing to do." + node, node.getAdminState()); } } private void setDecommissioned(DatanodeDescriptor dn) { dn.setDecommissioned(); LOG.info("Decommissioning complete for node {}", dn); } /** * Checks whether a block is sufficiently replicated for decommissioning. * Full-strength replication is not always necessary, hence "sufficient". * @return true if sufficient, else false. */ private boolean isSufficientlyReplicated(BlockInfo block, BlockCollection bc, NumberReplicas numberReplicas) { final int numExpected = bc.getPreferredBlockReplication(); final int numLive = numberReplicas.liveReplicas(); if (!blockManager.isNeededReplication(block, numExpected, numLive)) { // Block doesn't need replication. Skip. LOG.trace("Block {} does not need replication.", block); return true; } // Block is under-replicated LOG.trace("Block {} numExpected={}, numLive={}", block, numExpected, numLive); if (numExpected > numLive) { if (bc.isUnderConstruction() && block.equals(bc.getLastBlock())) { // Can decom a UC block as long as there will still be minReplicas if (numLive >= blockManager.minReplication) { LOG.trace("UC block {} sufficiently-replicated since numLive ({}) " + ">= minR ({})", block, numLive, blockManager.minReplication); return true; } else { LOG.trace("UC block {} insufficiently-replicated since numLive " + "({}) < minR ({})", block, numLive, blockManager.minReplication); } } else { // Can decom a non-UC as long as the default replication is met if (numLive >= blockManager.defaultReplication) { return true; } } } return false; } private static void logBlockReplicationInfo(Block block, BlockCollection bc, DatanodeDescriptor srcNode, NumberReplicas num, Iterable<DatanodeStorageInfo> storages) { int curReplicas = num.liveReplicas(); int curExpectedReplicas = bc.getPreferredBlockReplication(); StringBuilder nodeList = new StringBuilder(); for (DatanodeStorageInfo storage : storages) { final DatanodeDescriptor node = storage.getDatanodeDescriptor(); nodeList.append(node); nodeList.append(" "); } LOG.info("Block: " + block + ", Expected Replicas: " + curExpectedReplicas + ", live replicas: " + curReplicas + ", corrupt replicas: " + num.corruptReplicas() + ", decommissioned replicas: " + num.decommissioned() + ", decommissioning replicas: " + num.decommissioning() + ", excess replicas: " + num.excessReplicas() + ", Is Open File: " + bc.isUnderConstruction() + ", Datanodes having this block: " + nodeList + ", Current Datanode: " + srcNode + ", Is current datanode decommissioning: " + srcNode.isDecommissionInProgress()); } @VisibleForTesting public int getNumPendingNodes() { return pendingNodes.size(); } @VisibleForTesting public int getNumTrackedNodes() { return decomNodeBlocks.size(); } @VisibleForTesting public int getNumNodesChecked() { return monitor.numNodesChecked; } /** * Checks to see if DNs have finished decommissioning. * <p/> * Since this is done while holding the namesystem lock, * the amount of work per monitor tick is limited. */ private class Monitor implements Runnable { /** * The maximum number of blocks to check per tick. */ private final int numBlocksPerCheck; /** * The maximum number of nodes to check per tick. */ private final int numNodesPerCheck; /** * The maximum number of nodes to track in decomNodeBlocks. A value of 0 * means no limit. */ private final int maxConcurrentTrackedNodes; /** * The number of blocks that have been checked on this tick. */ private int numBlocksChecked = 0; /** * The number of nodes that have been checked on this tick. Used for * testing. */ private int numNodesChecked = 0; /** * The last datanode in decomNodeBlocks that we've processed */ private DatanodeDescriptor iterkey = new DatanodeDescriptor(new DatanodeID("", "", "", 0, 0, 0, 0)); Monitor(int numBlocksPerCheck, int numNodesPerCheck, int maxConcurrentTrackedNodes) { this.numBlocksPerCheck = numBlocksPerCheck; this.numNodesPerCheck = numNodesPerCheck; this.maxConcurrentTrackedNodes = maxConcurrentTrackedNodes; } private boolean exceededNumBlocksPerCheck() { LOG.trace("Processed {} blocks so far this tick", numBlocksChecked); return numBlocksChecked >= numBlocksPerCheck; } @Deprecated private boolean exceededNumNodesPerCheck() { LOG.trace("Processed {} nodes so far this tick", numNodesChecked); return numNodesChecked >= numNodesPerCheck; } @Override public void run() { if (!namesystem.isRunning()) { LOG.info("Namesystem is not running, skipping decommissioning checks" + "."); return; } // Reset the checked count at beginning of each iteration numBlocksChecked = 0; numNodesChecked = 0; // Check decom progress namesystem.writeLock(); try { processPendingNodes(); check(); } finally { namesystem.writeUnlock(); } if (numBlocksChecked + numNodesChecked > 0) { LOG.info("Checked {} blocks and {} nodes this tick", numBlocksChecked, numNodesChecked); } } /** * Pop datanodes off the pending list and into decomNodeBlocks, * subject to the maxConcurrentTrackedNodes limit. */ private void processPendingNodes() { while (!pendingNodes.isEmpty() && (maxConcurrentTrackedNodes == 0 || decomNodeBlocks.size() < maxConcurrentTrackedNodes)) { decomNodeBlocks.put(pendingNodes.poll(), null); } } private void check() { final Iterator<Map.Entry<DatanodeDescriptor, AbstractList<BlockInfo>>> it = new CyclicIteration<>(decomNodeBlocks, iterkey).iterator(); final LinkedList<DatanodeDescriptor> toRemove = new LinkedList<>(); while (it.hasNext() && !exceededNumBlocksPerCheck() && !exceededNumNodesPerCheck()) { numNodesChecked++; final Map.Entry<DatanodeDescriptor, AbstractList<BlockInfo>> entry = it.next(); final DatanodeDescriptor dn = entry.getKey(); AbstractList<BlockInfo> blocks = entry.getValue(); boolean fullScan = false; if (blocks == null) { // This is a newly added datanode, run through its list to schedule // under-replicated blocks for replication and collect the blocks // that are insufficiently replicated for further tracking LOG.debug("Newly-added node {}, doing full scan to find " + "insufficiently-replicated blocks.", dn); blocks = handleInsufficientlyReplicated(dn); decomNodeBlocks.put(dn, blocks); fullScan = true; } else { // This is a known datanode, check if its # of insufficiently // replicated blocks has dropped to zero and if it can be decommed LOG.debug("Processing decommission-in-progress node {}", dn); pruneSufficientlyReplicated(dn, blocks); } if (blocks.size() == 0) { if (!fullScan) { // If we didn't just do a full scan, need to re-check with the // full block map. // // We've replicated all the known insufficiently replicated // blocks. Re-check with the full block map before finally // marking the datanode as decommissioned LOG.debug("Node {} has finished replicating current set of " + "blocks, checking with the full block map.", dn); blocks = handleInsufficientlyReplicated(dn); decomNodeBlocks.put(dn, blocks); } // If the full scan is clean AND the node liveness is okay, // we can finally mark as decommissioned. final boolean isHealthy = blockManager.isNodeHealthyForDecommission(dn); if (blocks.size() == 0 && isHealthy) { setDecommissioned(dn); toRemove.add(dn); LOG.debug("Node {} is sufficiently replicated and healthy, " + "marked as decommissioned.", dn); } else { if (LOG.isDebugEnabled()) { StringBuilder b = new StringBuilder("Node {} "); if (isHealthy) { b.append("is "); } else { b.append("isn't "); } b.append("healthy and still needs to replicate {} more blocks," + " decommissioning is still in progress."); LOG.debug(b.toString(), dn, blocks.size()); } } } else { LOG.debug("Node {} still has {} blocks to replicate " + "before it is a candidate to finish decommissioning.", dn, blocks.size()); } iterkey = dn; } // Remove the datanodes that are decommissioned for (DatanodeDescriptor dn : toRemove) { Preconditions.checkState(dn.isDecommissioned(), "Removing a node that is not yet decommissioned!"); decomNodeBlocks.remove(dn); } } /** * Removes sufficiently replicated blocks from the block list of a * datanode. */ private void pruneSufficientlyReplicated(final DatanodeDescriptor datanode, AbstractList<BlockInfo> blocks) { processBlocksForDecomInternal(datanode, blocks.iterator(), null, true); } /** * Returns a list of blocks on a datanode that are insufficiently * replicated, i.e. are under-replicated enough to prevent decommission. * <p/> * As part of this, it also schedules replication work for * any under-replicated blocks. * * @param datanode * @return List of insufficiently replicated blocks */ private AbstractList<BlockInfo> handleInsufficientlyReplicated( final DatanodeDescriptor datanode) { AbstractList<BlockInfo> insufficient = new ChunkedArrayList<>(); processBlocksForDecomInternal(datanode, datanode.getBlockIterator(), insufficient, false); return insufficient; } /** * Used while checking if decommission-in-progress datanodes can be marked * as decommissioned. Combines shared logic of * pruneSufficientlyReplicated and handleInsufficientlyReplicated. * * @param datanode Datanode * @param it Iterator over the blocks on the * datanode * @param insufficientlyReplicated Return parameter. If it's not null, * will contain the insufficiently * replicated-blocks from the list. * @param pruneSufficientlyReplicated whether to remove sufficiently * replicated blocks from the iterator * @return true if there are under-replicated blocks in the provided block * iterator, else false. */ private void processBlocksForDecomInternal( final DatanodeDescriptor datanode, final Iterator<BlockInfo> it, final List<BlockInfo> insufficientlyReplicated, boolean pruneSufficientlyReplicated) { boolean firstReplicationLog = true; int underReplicatedBlocks = 0; int decommissionOnlyReplicas = 0; int underReplicatedInOpenFiles = 0; while (it.hasNext()) { numBlocksChecked++; final BlockInfo block = it.next(); // Remove the block from the list if it's no longer in the block map, // e.g. the containing file has been deleted if (blockManager.blocksMap.getStoredBlock(block) == null) { LOG.trace("Removing unknown block {}", block); it.remove(); continue; } BlockCollection bc = blockManager.blocksMap.getBlockCollection(block); if (bc == null) { // Orphan block, will be invalidated eventually. Skip. continue; } final NumberReplicas num = blockManager.countNodes(block); final int liveReplicas = num.liveReplicas(); final int curReplicas = liveReplicas; // Schedule under-replicated blocks for replication if not already // pending if (blockManager.isNeededReplication(block, bc.getPreferredBlockReplication(), liveReplicas)) { if (!blockManager.neededReplications.contains(block) && blockManager.pendingReplications.getNumReplicas(block) == 0 && namesystem.isPopulatingReplQueues()) { // Process these blocks only when active NN is out of safe mode. blockManager.neededReplications.add(block, curReplicas, num.decommissionedAndDecommissioning(), bc.getPreferredBlockReplication()); } } // Even if the block is under-replicated, // it doesn't block decommission if it's sufficiently replicated if (isSufficientlyReplicated(block, bc, num)) { if (pruneSufficientlyReplicated) { it.remove(); } continue; } // We've found an insufficiently replicated block. if (insufficientlyReplicated != null) { insufficientlyReplicated.add(block); } // Log if this is our first time through if (firstReplicationLog) { logBlockReplicationInfo(block, bc, datanode, num, blockManager.blocksMap.getStorages(block)); firstReplicationLog = false; } // Update various counts underReplicatedBlocks++; if (bc.isUnderConstruction()) { underReplicatedInOpenFiles++; } if ((curReplicas == 0) && (num.decommissionedAndDecommissioning() > 0)) { decommissionOnlyReplicas++; } } datanode.decommissioningStatus.set(underReplicatedBlocks, decommissionOnlyReplicas, underReplicatedInOpenFiles); } } @VisibleForTesting void runMonitor() throws ExecutionException, InterruptedException { Future f = executor.submit(monitor); f.get(); } }
24,354
38.219002
86
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/Host2NodesMap.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.blockmanagement; import java.util.Arrays; import java.util.HashMap; import java.util.Map; import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** A map from host names to datanode descriptors. */ @InterfaceAudience.Private @InterfaceStability.Evolving class Host2NodesMap { private HashMap<String, String> mapHost = new HashMap<String, String>(); private final HashMap<String, DatanodeDescriptor[]> map = new HashMap<String, DatanodeDescriptor[]>(); private final ReadWriteLock hostmapLock = new ReentrantReadWriteLock(); /** Check if node is already in the map. */ boolean contains(DatanodeDescriptor node) { if (node==null) { return false; } String ipAddr = node.getIpAddr(); hostmapLock.readLock().lock(); try { DatanodeDescriptor[] nodes = map.get(ipAddr); if (nodes != null) { for(DatanodeDescriptor containedNode:nodes) { if (node==containedNode) { return true; } } } } finally { hostmapLock.readLock().unlock(); } return false; } /** add node to the map * return true if the node is added; false otherwise. */ boolean add(DatanodeDescriptor node) { hostmapLock.writeLock().lock(); try { if (node==null || contains(node)) { return false; } String ipAddr = node.getIpAddr(); String hostname = node.getHostName(); mapHost.put(hostname, ipAddr); DatanodeDescriptor[] nodes = map.get(ipAddr); DatanodeDescriptor[] newNodes; if (nodes==null) { newNodes = new DatanodeDescriptor[1]; newNodes[0]=node; } else { // rare case: more than one datanode on the host newNodes = new DatanodeDescriptor[nodes.length+1]; System.arraycopy(nodes, 0, newNodes, 0, nodes.length); newNodes[nodes.length] = node; } map.put(ipAddr, newNodes); return true; } finally { hostmapLock.writeLock().unlock(); } } /** remove node from the map * return true if the node is removed; false otherwise. */ boolean remove(DatanodeDescriptor node) { if (node==null) { return false; } String ipAddr = node.getIpAddr(); String hostname = node.getHostName(); hostmapLock.writeLock().lock(); try { DatanodeDescriptor[] nodes = map.get(ipAddr); if (nodes==null) { return false; } if (nodes.length==1) { if (nodes[0]==node) { map.remove(ipAddr); //remove hostname key since last datanode is removed mapHost.remove(hostname); return true; } else { return false; } } //rare case int i=0; for(; i<nodes.length; i++) { if (nodes[i]==node) { break; } } if (i==nodes.length) { return false; } else { DatanodeDescriptor[] newNodes; newNodes = new DatanodeDescriptor[nodes.length-1]; System.arraycopy(nodes, 0, newNodes, 0, i); System.arraycopy(nodes, i+1, newNodes, i, nodes.length-i-1); map.put(ipAddr, newNodes); return true; } } finally { hostmapLock.writeLock().unlock(); } } /** * Get a data node by its IP address. * @return DatanodeDescriptor if found, null otherwise */ DatanodeDescriptor getDatanodeByHost(String ipAddr) { if (ipAddr == null) { return null; } hostmapLock.readLock().lock(); try { DatanodeDescriptor[] nodes = map.get(ipAddr); // no entry if (nodes== null) { return null; } // one node if (nodes.length == 1) { return nodes[0]; } // more than one node return nodes[ThreadLocalRandom.current().nextInt(nodes.length)]; } finally { hostmapLock.readLock().unlock(); } } /** * Find data node by its transfer address * * @return DatanodeDescriptor if found or null otherwise */ public DatanodeDescriptor getDatanodeByXferAddr(String ipAddr, int xferPort) { if (ipAddr==null) { return null; } hostmapLock.readLock().lock(); try { DatanodeDescriptor[] nodes = map.get(ipAddr); // no entry if (nodes== null) { return null; } for(DatanodeDescriptor containedNode:nodes) { if (xferPort == containedNode.getXferPort()) { return containedNode; } } return null; } finally { hostmapLock.readLock().unlock(); } } /** get a data node by its hostname. This should be used if only one * datanode service is running on a hostname. If multiple datanodes * are running on a hostname then use methods getDataNodeByXferAddr and * getDataNodeByHostNameAndPort. * @return DatanodeDescriptor if found; otherwise null. */ DatanodeDescriptor getDataNodeByHostName(String hostname) { if(hostname == null) { return null; } hostmapLock.readLock().lock(); try { String ipAddr = mapHost.get(hostname); if(ipAddr == null) { return null; } else { return getDatanodeByHost(ipAddr); } } finally { hostmapLock.readLock().unlock(); } } @Override public String toString() { final StringBuilder b = new StringBuilder(getClass().getSimpleName()) .append("["); for(Map.Entry<String, String> host: mapHost.entrySet()) { DatanodeDescriptor[] e = map.get(host.getValue()); b.append("\n " + host.getKey() + " => "+host.getValue() + " => " + Arrays.asList(e)); } return b.append("\n]").toString(); } }
6,761
27.531646
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.blockmanagement; import java.util.ArrayList; import java.util.Collection; import java.util.List; import java.util.Map; import java.util.Set; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.net.Node; import org.apache.hadoop.util.ReflectionUtils; /** * This interface is used for choosing the desired number of targets * for placing block replicas. */ @InterfaceAudience.Private public abstract class BlockPlacementPolicy { static final Log LOG = LogFactory.getLog(BlockPlacementPolicy.class); @InterfaceAudience.Private public static class NotEnoughReplicasException extends Exception { private static final long serialVersionUID = 1L; NotEnoughReplicasException(String msg) { super(msg); } } /** * choose <i>numOfReplicas</i> data nodes for <i>writer</i> * to re-replicate a block with size <i>blocksize</i> * If not, return as many as we can. * * @param srcPath the file to which this chooseTargets is being invoked. * @param numOfReplicas additional number of replicas wanted. * @param writer the writer's machine, null if not in the cluster. * @param chosen datanodes that have been chosen as targets. * @param returnChosenNodes decide if the chosenNodes are returned. * @param excludedNodes datanodes that should not be considered as targets. * @param blocksize size of the data to be written. * @return array of DatanodeDescriptor instances chosen as target * and sorted as a pipeline. */ public abstract DatanodeStorageInfo[] chooseTarget(String srcPath, int numOfReplicas, Node writer, List<DatanodeStorageInfo> chosen, boolean returnChosenNodes, Set<Node> excludedNodes, long blocksize, BlockStoragePolicy storagePolicy); /** * Same as {@link #chooseTarget(String, int, Node, Set, long, List, StorageType)} * with added parameter {@code favoredDatanodes} * @param favoredNodes datanodes that should be favored as targets. This * is only a hint and due to cluster state, namenode may not be * able to place the blocks on these datanodes. */ DatanodeStorageInfo[] chooseTarget(String src, int numOfReplicas, Node writer, Set<Node> excludedNodes, long blocksize, List<DatanodeDescriptor> favoredNodes, BlockStoragePolicy storagePolicy) { // This class does not provide the functionality of placing // a block in favored datanodes. The implementations of this class // are expected to provide this functionality return chooseTarget(src, numOfReplicas, writer, new ArrayList<DatanodeStorageInfo>(numOfReplicas), false, excludedNodes, blocksize, storagePolicy); } /** * Verify if the block's placement meets requirement of placement policy, * i.e. replicas are placed on no less than minRacks racks in the system. * * @param srcPath the full pathname of the file to be verified * @param lBlk block with locations * @param numOfReplicas replica number of file to be verified * @return the result of verification */ abstract public BlockPlacementStatus verifyBlockPlacement(String srcPath, LocatedBlock lBlk, int numOfReplicas); /** * Decide whether deleting the specified replica of the block still makes * the block conform to the configured block placement policy. * * @param srcBC block collection of file to which block-to-be-deleted belongs * @param block The block to be deleted * @param replicationFactor The required number of replicas for this block * @param moreThanOne The replica locations of this block that are present * on more than one unique racks. * @param exactlyOne Replica locations of this block that are present * on exactly one unique racks. * @param excessTypes The excess {@link StorageType}s according to the * {@link BlockStoragePolicy}. * @return the replica that is the best candidate for deletion */ abstract public DatanodeStorageInfo chooseReplicaToDelete( BlockCollection srcBC, Block block, short replicationFactor, Collection<DatanodeStorageInfo> moreThanOne, Collection<DatanodeStorageInfo> exactlyOne, List<StorageType> excessTypes); /** * Used to setup a BlockPlacementPolicy object. This should be defined by * all implementations of a BlockPlacementPolicy. * * @param conf the configuration object * @param stats retrieve cluster status from here * @param clusterMap cluster topology */ abstract protected void initialize(Configuration conf, FSClusterStats stats, NetworkTopology clusterMap, Host2NodesMap host2datanodeMap); /** * Get an instance of the configured Block Placement Policy based on the * the configuration property * {@link DFSConfigKeys#DFS_BLOCK_REPLICATOR_CLASSNAME_KEY}. * * @param conf the configuration to be used * @param stats an object that is used to retrieve the load on the cluster * @param clusterMap the network topology of the cluster * @return an instance of BlockPlacementPolicy */ public static BlockPlacementPolicy getInstance(Configuration conf, FSClusterStats stats, NetworkTopology clusterMap, Host2NodesMap host2datanodeMap) { final Class<? extends BlockPlacementPolicy> replicatorClass = conf.getClass( DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY, DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_DEFAULT, BlockPlacementPolicy.class); final BlockPlacementPolicy replicator = ReflectionUtils.newInstance( replicatorClass, conf); replicator.initialize(conf, stats, clusterMap, host2datanodeMap); return replicator; } /** * Adjust rackmap, moreThanOne, and exactlyOne after removing replica on cur. * * @param rackMap a map from rack to replica * @param moreThanOne The List of replica nodes on rack which has more than * one replica * @param exactlyOne The List of replica nodes on rack with only one replica * @param cur current replica to remove */ public void adjustSetsWithChosenReplica( final Map<String, List<DatanodeStorageInfo>> rackMap, final List<DatanodeStorageInfo> moreThanOne, final List<DatanodeStorageInfo> exactlyOne, final DatanodeStorageInfo cur) { final String rack = getRack(cur.getDatanodeDescriptor()); final List<DatanodeStorageInfo> storages = rackMap.get(rack); storages.remove(cur); if (storages.isEmpty()) { rackMap.remove(rack); } if (moreThanOne.remove(cur)) { if (storages.size() == 1) { final DatanodeStorageInfo remaining = storages.get(0); moreThanOne.remove(remaining); exactlyOne.add(remaining); } } else { exactlyOne.remove(cur); } } /** * Get rack string from a data node * @return rack of data node */ protected String getRack(final DatanodeInfo datanode) { return datanode.getNetworkLocation(); } /** * Split data nodes into two sets, one set includes nodes on rack with * more than one replica, the other set contains the remaining nodes. * * @param dataNodes datanodes to be split into two sets * @param rackMap a map from rack to datanodes * @param moreThanOne contains nodes on rack with more than one replica * @param exactlyOne remains contains the remaining nodes */ public void splitNodesWithRack( final Iterable<DatanodeStorageInfo> storages, final Map<String, List<DatanodeStorageInfo>> rackMap, final List<DatanodeStorageInfo> moreThanOne, final List<DatanodeStorageInfo> exactlyOne) { for(DatanodeStorageInfo s: storages) { final String rackName = getRack(s.getDatanodeDescriptor()); List<DatanodeStorageInfo> storageList = rackMap.get(rackName); if (storageList == null) { storageList = new ArrayList<DatanodeStorageInfo>(); rackMap.put(rackName, storageList); } storageList.add(s); } // split nodes into two sets for(List<DatanodeStorageInfo> storageList : rackMap.values()) { if (storageList.size() == 1) { // exactlyOne contains nodes on rack with only one replica exactlyOne.add(storageList.get(0)); } else { // moreThanOne contains nodes on rack with more than one replica moreThanOne.addAll(storageList); } } } }
10,294
40.18
83
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.blockmanagement; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.protocol.Block; /** * Subclass of {@link BlockInfo}, used for a block with replication scheme. */ @InterfaceAudience.Private public class BlockInfoContiguous extends BlockInfo { public static final BlockInfoContiguous[] EMPTY_ARRAY = {}; public BlockInfoContiguous(short size) { super(size); } public BlockInfoContiguous(Block blk, short size) { super(blk, size); } /** * Copy construction. * This is used to convert BlockReplicationInfoUnderConstruction * @param from BlockReplicationInfo to copy from. */ protected BlockInfoContiguous(BlockInfoContiguous from) { super(from); } /** * Ensure that there is enough space to include num more triplets. * @return first free triplet index. */ private int ensureCapacity(int num) { assert this.triplets != null : "BlockInfo is not initialized"; int last = numNodes(); if (triplets.length >= (last+num)*3) { return last; } /* Not enough space left. Create a new array. Should normally * happen only when replication is manually increased by the user. */ Object[] old = triplets; triplets = new Object[(last+num)*3]; System.arraycopy(old, 0, triplets, 0, last * 3); return last; } @Override boolean addStorage(DatanodeStorageInfo storage) { // find the last null node int lastNode = ensureCapacity(1); setStorageInfo(lastNode, storage); setNext(lastNode, null); setPrevious(lastNode, null); return true; } @Override boolean removeStorage(DatanodeStorageInfo storage) { int dnIndex = findStorageInfo(storage); if (dnIndex < 0) { // the node is not found return false; } assert getPrevious(dnIndex) == null && getNext(dnIndex) == null : "Block is still in the list and must be removed first."; // find the last not null node int lastNode = numNodes()-1; // replace current node triplet by the lastNode one setStorageInfo(dnIndex, getStorageInfo(lastNode)); setNext(dnIndex, getNext(lastNode)); setPrevious(dnIndex, getPrevious(lastNode)); // set the last triplet to null setStorageInfo(lastNode, null); setNext(lastNode, null); setPrevious(lastNode, null); return true; } @Override public int numNodes() { assert this.triplets != null : "BlockInfo is not initialized"; assert triplets.length % 3 == 0 : "Malformed BlockInfo"; for (int idx = getCapacity()-1; idx >= 0; idx--) { if (getDatanode(idx) != null) { return idx + 1; } } return 0; } @Override void replaceBlock(BlockInfo newBlock) { assert newBlock instanceof BlockInfoContiguous; for (int i = this.numNodes() - 1; i >= 0; i--) { final DatanodeStorageInfo storage = this.getStorageInfo(i); final boolean removed = storage.removeBlock(this); assert removed : "currentBlock not found."; final DatanodeStorageInfo.AddBlockResult result = storage.addBlock( newBlock); assert result == DatanodeStorageInfo.AddBlockResult.ADDED : "newBlock already exists."; } } }
4,049
31.66129
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingDataNodeMessages.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.blockmanagement; import java.util.List; import java.util.Map; import java.util.Queue; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import com.google.common.collect.Lists; import com.google.common.collect.Maps; /** * In the Standby Node, we can receive messages about blocks * before they are actually available in the namespace, or while * they have an outdated state in the namespace. In those cases, * we queue those block-related messages in this structure. * */ class PendingDataNodeMessages { final Map<Block, Queue<ReportedBlockInfo>> queueByBlockId = Maps.newHashMap(); private int count = 0; static class ReportedBlockInfo { private final Block block; private final DatanodeStorageInfo storageInfo; private final ReplicaState reportedState; ReportedBlockInfo(DatanodeStorageInfo storageInfo, Block block, ReplicaState reportedState) { this.storageInfo = storageInfo; this.block = block; this.reportedState = reportedState; } Block getBlock() { return block; } ReplicaState getReportedState() { return reportedState; } DatanodeStorageInfo getStorageInfo() { return storageInfo; } @Override public String toString() { return "ReportedBlockInfo [block=" + block + ", dn=" + storageInfo.getDatanodeDescriptor() + ", reportedState=" + reportedState + "]"; } } /** * Remove all pending DN messages which reference the given DN. * @param dn the datanode whose messages we should remove. */ void removeAllMessagesForDatanode(DatanodeDescriptor dn) { for (Map.Entry<Block, Queue<ReportedBlockInfo>> entry : queueByBlockId.entrySet()) { Queue<ReportedBlockInfo> newQueue = Lists.newLinkedList(); Queue<ReportedBlockInfo> oldQueue = entry.getValue(); while (!oldQueue.isEmpty()) { ReportedBlockInfo rbi = oldQueue.remove(); if (!rbi.getStorageInfo().getDatanodeDescriptor().equals(dn)) { newQueue.add(rbi); } else { count--; } } queueByBlockId.put(entry.getKey(), newQueue); } } void enqueueReportedBlock(DatanodeStorageInfo storageInfo, Block block, ReplicaState reportedState) { block = new Block(block); getBlockQueue(block).add( new ReportedBlockInfo(storageInfo, block, reportedState)); count++; } /** * @return any messages that were previously queued for the given block, * or null if no messages were queued. */ Queue<ReportedBlockInfo> takeBlockQueue(Block block) { Queue<ReportedBlockInfo> queue = queueByBlockId.remove(block); if (queue != null) { count -= queue.size(); } return queue; } private Queue<ReportedBlockInfo> getBlockQueue(Block block) { Queue<ReportedBlockInfo> queue = queueByBlockId.get(block); if (queue == null) { queue = Lists.newLinkedList(); queueByBlockId.put(block, queue); } return queue; } int count() { return count ; } @Override public String toString() { StringBuilder sb = new StringBuilder(); for (Map.Entry<Block, Queue<ReportedBlockInfo>> entry : queueByBlockId.entrySet()) { sb.append("Block " + entry.getKey() + ":\n"); for (ReportedBlockInfo rbi : entry.getValue()) { sb.append(" ").append(rbi).append("\n"); } } return sb.toString(); } Iterable<ReportedBlockInfo> takeAll() { List<ReportedBlockInfo> rbis = Lists.newArrayListWithCapacity( count); for (Queue<ReportedBlockInfo> q : queueByBlockId.values()) { rbis.addAll(q); } queueByBlockId.clear(); count = 0; return rbis; } }
4,647
29.379085
77
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.blockmanagement; import static org.apache.hadoop.util.Time.monotonicNow; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.net.InetAddresses; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.protocol.*; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList; import org.apache.hadoop.hdfs.server.namenode.CachedBlock; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.Namesystem; import org.apache.hadoop.hdfs.server.protocol.*; import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.net.*; import org.apache.hadoop.net.NetworkTopology.InvalidTopologyException; import org.apache.hadoop.util.ReflectionUtils; import java.io.IOException; import java.io.PrintWriter; import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.UnknownHostException; import java.util.*; import java.util.concurrent.ThreadLocalRandom; /** * Manage datanodes, include decommission and other activities. */ @InterfaceAudience.Private @InterfaceStability.Evolving public class DatanodeManager { static final Log LOG = LogFactory.getLog(DatanodeManager.class); private final Namesystem namesystem; private final BlockManager blockManager; private final DecommissionManager decomManager; private final HeartbeatManager heartbeatManager; private final FSClusterStats fsClusterStats; /** * Stores the datanode -> block map. * <p> * Done by storing a set of {@link DatanodeDescriptor} objects, sorted by * storage id. In order to keep the storage map consistent it tracks * all storages ever registered with the namenode. * A descriptor corresponding to a specific storage id can be * <ul> * <li>added to the map if it is a new storage id;</li> * <li>updated with a new datanode started as a replacement for the old one * with the same storage id; and </li> * <li>removed if and only if an existing datanode is restarted to serve a * different storage id.</li> * </ul> <br> * <p> * Mapping: StorageID -> DatanodeDescriptor */ private final Map<String, DatanodeDescriptor> datanodeMap = new HashMap<>(); /** Cluster network topology */ private final NetworkTopology networktopology; /** Host names to datanode descriptors mapping. */ private final Host2NodesMap host2DatanodeMap = new Host2NodesMap(); private final DNSToSwitchMapping dnsToSwitchMapping; private final boolean rejectUnresolvedTopologyDN; private final int defaultXferPort; private final int defaultInfoPort; private final int defaultInfoSecurePort; private final int defaultIpcPort; /** Read include/exclude files*/ private final HostFileManager hostFileManager = new HostFileManager(); /** The period to wait for datanode heartbeat.*/ private long heartbeatExpireInterval; /** Ask Datanode only up to this many blocks to delete. */ final int blockInvalidateLimit; /** The interval for judging stale DataNodes for read/write */ private final long staleInterval; /** Whether or not to avoid using stale DataNodes for reading */ private final boolean avoidStaleDataNodesForRead; /** * Whether or not to avoid using stale DataNodes for writing. * Note that, even if this is configured, the policy may be * temporarily disabled when a high percentage of the nodes * are marked as stale. */ private final boolean avoidStaleDataNodesForWrite; /** * When the ratio of stale datanodes reaches this number, stop avoiding * writing to stale datanodes, i.e., continue using stale nodes for writing. */ private final float ratioUseStaleDataNodesForWrite; /** The number of stale DataNodes */ private volatile int numStaleNodes; /** The number of stale storages */ private volatile int numStaleStorages; /** * Number of blocks to check for each postponedMisreplicatedBlocks iteration */ private final long blocksPerPostponedMisreplicatedBlocksRescan; /** * Whether or not this cluster has ever consisted of more than 1 rack, * according to the NetworkTopology. */ private boolean hasClusterEverBeenMultiRack = false; private final boolean checkIpHostnameInRegistration; /** * Whether we should tell datanodes what to cache in replies to * heartbeat messages. */ private boolean shouldSendCachingCommands = false; /** * The number of datanodes for each software version. This list should change * during rolling upgrades. * Software version -> Number of datanodes with this version */ private HashMap<String, Integer> datanodesSoftwareVersions = new HashMap<>(4, 0.75f); /** * The minimum time between resending caching directives to Datanodes, * in milliseconds. * * Note that when a rescan happens, we will send the new directives * as soon as possible. This timeout only applies to resending * directives that we've already sent. */ private final long timeBetweenResendingCachingDirectivesMs; DatanodeManager(final BlockManager blockManager, final Namesystem namesystem, final Configuration conf) throws IOException { this.namesystem = namesystem; this.blockManager = blockManager; this.heartbeatManager = new HeartbeatManager(namesystem, blockManager, conf); this.decomManager = new DecommissionManager(namesystem, blockManager, heartbeatManager); this.fsClusterStats = newFSClusterStats(); networktopology = NetworkTopology.getInstance(conf); this.defaultXferPort = NetUtils.createSocketAddr( conf.getTrimmed(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, DFSConfigKeys.DFS_DATANODE_ADDRESS_DEFAULT)).getPort(); this.defaultInfoPort = NetUtils.createSocketAddr( conf.getTrimmed(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_DEFAULT)).getPort(); this.defaultInfoSecurePort = NetUtils.createSocketAddr( conf.getTrimmed(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_DEFAULT)).getPort(); this.defaultIpcPort = NetUtils.createSocketAddr( conf.getTrimmed(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_DEFAULT)).getPort(); try { this.hostFileManager.refresh(conf.get(DFSConfigKeys.DFS_HOSTS, ""), conf.get(DFSConfigKeys.DFS_HOSTS_EXCLUDE, "")); } catch (IOException e) { LOG.error("error reading hosts files: ", e); } this.dnsToSwitchMapping = ReflectionUtils.newInstance( conf.getClass(DFSConfigKeys.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, ScriptBasedMapping.class, DNSToSwitchMapping.class), conf); this.rejectUnresolvedTopologyDN = conf.getBoolean( DFSConfigKeys.DFS_REJECT_UNRESOLVED_DN_TOPOLOGY_MAPPING_KEY, DFSConfigKeys.DFS_REJECT_UNRESOLVED_DN_TOPOLOGY_MAPPING_DEFAULT); // If the dns to switch mapping supports cache, resolve network // locations of those hosts in the include list and store the mapping // in the cache; so future calls to resolve will be fast. if (dnsToSwitchMapping instanceof CachedDNSToSwitchMapping) { final ArrayList<String> locations = new ArrayList<>(); for (InetSocketAddress addr : hostFileManager.getIncludes()) { locations.add(addr.getAddress().getHostAddress()); } dnsToSwitchMapping.resolve(locations); } final long heartbeatIntervalSeconds = conf.getLong( DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT); final int heartbeatRecheckInterval = conf.getInt( DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT); // 5 minutes this.heartbeatExpireInterval = 2 * heartbeatRecheckInterval + 10 * 1000 * heartbeatIntervalSeconds; final int blockInvalidateLimit = Math.max(20*(int)(heartbeatIntervalSeconds), DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT); this.blockInvalidateLimit = conf.getInt( DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY, blockInvalidateLimit); LOG.info(DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY + "=" + this.blockInvalidateLimit); this.checkIpHostnameInRegistration = conf.getBoolean( DFSConfigKeys.DFS_NAMENODE_DATANODE_REGISTRATION_IP_HOSTNAME_CHECK_KEY, DFSConfigKeys.DFS_NAMENODE_DATANODE_REGISTRATION_IP_HOSTNAME_CHECK_DEFAULT); LOG.info(DFSConfigKeys.DFS_NAMENODE_DATANODE_REGISTRATION_IP_HOSTNAME_CHECK_KEY + "=" + checkIpHostnameInRegistration); this.avoidStaleDataNodesForRead = conf.getBoolean( DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY, DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_DEFAULT); this.avoidStaleDataNodesForWrite = conf.getBoolean( DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY, DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_DEFAULT); this.staleInterval = getStaleIntervalFromConf(conf, heartbeatExpireInterval); this.ratioUseStaleDataNodesForWrite = conf.getFloat( DFSConfigKeys.DFS_NAMENODE_USE_STALE_DATANODE_FOR_WRITE_RATIO_KEY, DFSConfigKeys.DFS_NAMENODE_USE_STALE_DATANODE_FOR_WRITE_RATIO_DEFAULT); Preconditions.checkArgument( (ratioUseStaleDataNodesForWrite > 0 && ratioUseStaleDataNodesForWrite <= 1.0f), DFSConfigKeys.DFS_NAMENODE_USE_STALE_DATANODE_FOR_WRITE_RATIO_KEY + " = '" + ratioUseStaleDataNodesForWrite + "' is invalid. " + "It should be a positive non-zero float value, not greater than 1.0f."); this.timeBetweenResendingCachingDirectivesMs = conf.getLong( DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_RETRY_INTERVAL_MS, DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_RETRY_INTERVAL_MS_DEFAULT); this.blocksPerPostponedMisreplicatedBlocksRescan = conf.getLong( DFSConfigKeys.DFS_NAMENODE_BLOCKS_PER_POSTPONEDBLOCKS_RESCAN_KEY, DFSConfigKeys.DFS_NAMENODE_BLOCKS_PER_POSTPONEDBLOCKS_RESCAN_KEY_DEFAULT); } private static long getStaleIntervalFromConf(Configuration conf, long heartbeatExpireInterval) { long staleInterval = conf.getLong( DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY, DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_DEFAULT); Preconditions.checkArgument(staleInterval > 0, DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY + " = '" + staleInterval + "' is invalid. " + "It should be a positive non-zero value."); final long heartbeatIntervalSeconds = conf.getLong( DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT); // The stale interval value cannot be smaller than // 3 times of heartbeat interval final long minStaleInterval = conf.getInt( DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_MINIMUM_INTERVAL_KEY, DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_MINIMUM_INTERVAL_DEFAULT) * heartbeatIntervalSeconds * 1000; if (staleInterval < minStaleInterval) { LOG.warn("The given interval for marking stale datanode = " + staleInterval + ", which is less than " + DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_MINIMUM_INTERVAL_DEFAULT + " heartbeat intervals. This may cause too frequent changes of " + "stale states of DataNodes since a heartbeat msg may be missing " + "due to temporary short-term failures. Reset stale interval to " + minStaleInterval + "."); staleInterval = minStaleInterval; } if (staleInterval > heartbeatExpireInterval) { LOG.warn("The given interval for marking stale datanode = " + staleInterval + ", which is larger than heartbeat expire interval " + heartbeatExpireInterval + "."); } return staleInterval; } void activate(final Configuration conf) { decomManager.activate(conf); heartbeatManager.activate(conf); } void close() { decomManager.close(); heartbeatManager.close(); } /** @return the network topology. */ public NetworkTopology getNetworkTopology() { return networktopology; } /** @return the heartbeat manager. */ HeartbeatManager getHeartbeatManager() { return heartbeatManager; } @VisibleForTesting public DecommissionManager getDecomManager() { return decomManager; } HostFileManager getHostFileManager() { return hostFileManager; } @VisibleForTesting public void setHeartbeatExpireInterval(long expiryMs) { this.heartbeatExpireInterval = expiryMs; } @VisibleForTesting public FSClusterStats getFSClusterStats() { return fsClusterStats; } /** @return the datanode statistics. */ public DatanodeStatistics getDatanodeStatistics() { return heartbeatManager; } private boolean isInactive(DatanodeInfo datanode) { if (datanode.isDecommissioned()) { return true; } if (avoidStaleDataNodesForRead) { return datanode.isStale(staleInterval); } return false; } /** Sort the located blocks by the distance to the target host. */ public void sortLocatedBlocks(final String targethost, final List<LocatedBlock> locatedblocks) { //sort the blocks // As it is possible for the separation of node manager and datanode, // here we should get node but not datanode only . Node client = getDatanodeByHost(targethost); if (client == null) { List<String> hosts = new ArrayList<> (1); hosts.add(targethost); List<String> resolvedHosts = dnsToSwitchMapping.resolve(hosts); if (resolvedHosts != null && !resolvedHosts.isEmpty()) { String rName = resolvedHosts.get(0); if (rName != null) { client = new NodeBase(rName + NodeBase.PATH_SEPARATOR_STR + targethost); } } else { LOG.error("Node Resolution failed. Please make sure that rack " + "awareness scripts are functional."); } } Comparator<DatanodeInfo> comparator = avoidStaleDataNodesForRead ? new DFSUtil.DecomStaleComparator(staleInterval) : DFSUtil.DECOM_COMPARATOR; for (LocatedBlock b : locatedblocks) { DatanodeInfo[] di = b.getLocations(); // Move decommissioned/stale datanodes to the bottom Arrays.sort(di, comparator); int lastActiveIndex = di.length - 1; while (lastActiveIndex > 0 && isInactive(di[lastActiveIndex])) { --lastActiveIndex; } int activeLen = lastActiveIndex + 1; networktopology.sortByDistance(client, b.getLocations(), activeLen); // must update cache since we modified locations array b.updateCachedStorageInfo(); } } /** @return the datanode descriptor for the host. */ public DatanodeDescriptor getDatanodeByHost(final String host) { return host2DatanodeMap.getDatanodeByHost(host); } /** @return the datanode descriptor for the host. */ public DatanodeDescriptor getDatanodeByXferAddr(String host, int xferPort) { return host2DatanodeMap.getDatanodeByXferAddr(host, xferPort); } /** @return the Host2NodesMap */ public Host2NodesMap getHost2DatanodeMap() { return this.host2DatanodeMap; } /** * Given datanode address or host name, returns the DatanodeDescriptor for the * same, or if it doesn't find the datanode, it looks for a machine local and * then rack local datanode, if a rack local datanode is not possible either, * it returns the DatanodeDescriptor of any random node in the cluster. * * @param address hostaddress:transfer address * @return the best match for the given datanode */ DatanodeDescriptor getDatanodeDescriptor(String address) { DatanodeID dnId = parseDNFromHostsEntry(address); String host = dnId.getIpAddr(); int xferPort = dnId.getXferPort(); DatanodeDescriptor node = getDatanodeByXferAddr(host, xferPort); if (node == null) { node = getDatanodeByHost(host); } if (node == null) { String networkLocation = resolveNetworkLocationWithFallBackToDefaultLocation(dnId); // If the current cluster doesn't contain the node, fallback to // something machine local and then rack local. List<Node> rackNodes = getNetworkTopology() .getDatanodesInRack(networkLocation); if (rackNodes != null) { // Try something machine local. for (Node rackNode : rackNodes) { if (((DatanodeDescriptor) rackNode).getIpAddr().equals(host)) { node = (DatanodeDescriptor) rackNode; break; } } // Try something rack local. if (node == null && !rackNodes.isEmpty()) { node = (DatanodeDescriptor) (rackNodes .get(ThreadLocalRandom.current().nextInt(rackNodes.size()))); } } // If we can't even choose rack local, just choose any node in the // cluster. if (node == null) { node = (DatanodeDescriptor)getNetworkTopology() .chooseRandom(NodeBase.ROOT); } } return node; } /** Get a datanode descriptor given corresponding DatanodeUUID */ DatanodeDescriptor getDatanode(final String datanodeUuid) { if (datanodeUuid == null) { return null; } return datanodeMap.get(datanodeUuid); } /** * Get data node by datanode ID. * * @param nodeID datanode ID * @return DatanodeDescriptor or null if the node is not found. * @throws UnregisteredNodeException */ public DatanodeDescriptor getDatanode(DatanodeID nodeID ) throws UnregisteredNodeException { final DatanodeDescriptor node = getDatanode(nodeID.getDatanodeUuid()); if (node == null) return null; if (!node.getXferAddr().equals(nodeID.getXferAddr())) { final UnregisteredNodeException e = new UnregisteredNodeException( nodeID, node); NameNode.stateChangeLog.error("BLOCK* NameSystem.getDatanode: " + e.getLocalizedMessage()); throw e; } return node; } public DatanodeStorageInfo[] getDatanodeStorageInfos( DatanodeID[] datanodeID, String[] storageIDs) throws UnregisteredNodeException { if (datanodeID.length == 0) { return null; } final DatanodeStorageInfo[] storages = new DatanodeStorageInfo[datanodeID.length]; for(int i = 0; i < datanodeID.length; i++) { final DatanodeDescriptor dd = getDatanode(datanodeID[i]); storages[i] = dd.getStorageInfo(storageIDs[i]); } return storages; } /** Prints information about all datanodes. */ void datanodeDump(final PrintWriter out) { synchronized (datanodeMap) { Map<String,DatanodeDescriptor> sortedDatanodeMap = new TreeMap<>(datanodeMap); out.println("Metasave: Number of datanodes: " + datanodeMap.size()); for (DatanodeDescriptor node : sortedDatanodeMap.values()) { out.println(node.dumpDatanode()); } } } /** * Remove a datanode descriptor. * @param nodeInfo datanode descriptor. */ private void removeDatanode(DatanodeDescriptor nodeInfo) { assert namesystem.hasWriteLock(); heartbeatManager.removeDatanode(nodeInfo); blockManager.removeBlocksAssociatedTo(nodeInfo); networktopology.remove(nodeInfo); decrementVersionCount(nodeInfo.getSoftwareVersion()); blockManager.getBlockReportLeaseManager().unregister(nodeInfo); if (LOG.isDebugEnabled()) { LOG.debug("remove datanode " + nodeInfo); } namesystem.checkSafeMode(); } /** * Remove a datanode * @throws UnregisteredNodeException */ public void removeDatanode(final DatanodeID node ) throws UnregisteredNodeException { namesystem.writeLock(); try { final DatanodeDescriptor descriptor = getDatanode(node); if (descriptor != null) { removeDatanode(descriptor); } else { NameNode.stateChangeLog.warn("BLOCK* removeDatanode: " + node + " does not exist"); } } finally { namesystem.writeUnlock(); } } /** Remove a dead datanode. */ void removeDeadDatanode(final DatanodeID nodeID) { synchronized(datanodeMap) { DatanodeDescriptor d; try { d = getDatanode(nodeID); } catch(IOException e) { d = null; } if (d != null && isDatanodeDead(d)) { NameNode.stateChangeLog.info( "BLOCK* removeDeadDatanode: lost heartbeat from " + d); removeDatanode(d); } } } /** Is the datanode dead? */ boolean isDatanodeDead(DatanodeDescriptor node) { return (node.getLastUpdateMonotonic() < (monotonicNow() - heartbeatExpireInterval)); } /** Add a datanode. */ void addDatanode(final DatanodeDescriptor node) { // To keep host2DatanodeMap consistent with datanodeMap, // remove from host2DatanodeMap the datanodeDescriptor removed // from datanodeMap before adding node to host2DatanodeMap. synchronized(datanodeMap) { host2DatanodeMap.remove(datanodeMap.put(node.getDatanodeUuid(), node)); } networktopology.add(node); // may throw InvalidTopologyException host2DatanodeMap.add(node); checkIfClusterIsNowMultiRack(node); blockManager.getBlockReportLeaseManager().register(node); if (LOG.isDebugEnabled()) { LOG.debug(getClass().getSimpleName() + ".addDatanode: " + "node " + node + " is added to datanodeMap."); } } /** Physically remove node from datanodeMap. */ private void wipeDatanode(final DatanodeID node) { final String key = node.getDatanodeUuid(); synchronized (datanodeMap) { host2DatanodeMap.remove(datanodeMap.remove(key)); } // Also remove all block invalidation tasks under this node blockManager.removeFromInvalidates(new DatanodeInfo(node)); if (LOG.isDebugEnabled()) { LOG.debug(getClass().getSimpleName() + ".wipeDatanode(" + node + "): storage " + key + " is removed from datanodeMap."); } } private void incrementVersionCount(String version) { if (version == null) { return; } synchronized(datanodeMap) { Integer count = this.datanodesSoftwareVersions.get(version); count = count == null ? 1 : count + 1; this.datanodesSoftwareVersions.put(version, count); } } private void decrementVersionCount(String version) { if (version == null) { return; } synchronized(datanodeMap) { Integer count = this.datanodesSoftwareVersions.get(version); if(count != null) { if(count > 1) { this.datanodesSoftwareVersions.put(version, count-1); } else { this.datanodesSoftwareVersions.remove(version); } } } } private boolean shouldCountVersion(DatanodeDescriptor node) { return node.getSoftwareVersion() != null && node.isAlive && !isDatanodeDead(node); } private void countSoftwareVersions() { synchronized(datanodeMap) { HashMap<String, Integer> versionCount = new HashMap<>(); for(DatanodeDescriptor dn: datanodeMap.values()) { // Check isAlive too because right after removeDatanode(), // isDatanodeDead() is still true if(shouldCountVersion(dn)) { Integer num = versionCount.get(dn.getSoftwareVersion()); num = num == null ? 1 : num+1; versionCount.put(dn.getSoftwareVersion(), num); } } this.datanodesSoftwareVersions = versionCount; } } public HashMap<String, Integer> getDatanodesSoftwareVersions() { synchronized(datanodeMap) { return new HashMap<> (this.datanodesSoftwareVersions); } } /** * Resolve a node's network location. If the DNS to switch mapping fails * then this method guarantees default rack location. * @param node to resolve to network location * @return network location path */ private String resolveNetworkLocationWithFallBackToDefaultLocation ( DatanodeID node) { String networkLocation; try { networkLocation = resolveNetworkLocation(node); } catch (UnresolvedTopologyException e) { LOG.error("Unresolved topology mapping. Using " + NetworkTopology.DEFAULT_RACK + " for host " + node.getHostName()); networkLocation = NetworkTopology.DEFAULT_RACK; } return networkLocation; } /** * Resolve a node's network location. If the DNS to switch mapping fails, * then this method throws UnresolvedTopologyException. * @param node to resolve to network location * @return network location path. * @throws UnresolvedTopologyException if the DNS to switch mapping fails * to resolve network location. */ private String resolveNetworkLocation (DatanodeID node) throws UnresolvedTopologyException { List<String> names = new ArrayList<>(1); if (dnsToSwitchMapping instanceof CachedDNSToSwitchMapping) { names.add(node.getIpAddr()); } else { names.add(node.getHostName()); } List<String> rName = resolveNetworkLocation(names); String networkLocation; if (rName == null) { LOG.error("The resolve call returned null!"); throw new UnresolvedTopologyException( "Unresolved topology mapping for host " + node.getHostName()); } else { networkLocation = rName.get(0); } return networkLocation; } /** * Resolve network locations for specified hosts * * @param names * @return Network locations if available, Else returns null */ public List<String> resolveNetworkLocation(List<String> names) { // resolve its network location List<String> rName = dnsToSwitchMapping.resolve(names); return rName; } /** * Resolve a node's dependencies in the network. If the DNS to switch * mapping fails then this method returns empty list of dependencies * @param node to get dependencies for * @return List of dependent host names */ private List<String> getNetworkDependenciesWithDefault(DatanodeInfo node) { List<String> dependencies; try { dependencies = getNetworkDependencies(node); } catch (UnresolvedTopologyException e) { LOG.error("Unresolved dependency mapping for host " + node.getHostName() +". Continuing with an empty dependency list"); dependencies = Collections.emptyList(); } return dependencies; } /** * Resolves a node's dependencies in the network. If the DNS to switch * mapping fails to get dependencies, then this method throws * UnresolvedTopologyException. * @param node to get dependencies for * @return List of dependent host names * @throws UnresolvedTopologyException if the DNS to switch mapping fails */ private List<String> getNetworkDependencies(DatanodeInfo node) throws UnresolvedTopologyException { List<String> dependencies = Collections.emptyList(); if (dnsToSwitchMapping instanceof DNSToSwitchMappingWithDependency) { //Get dependencies dependencies = ((DNSToSwitchMappingWithDependency)dnsToSwitchMapping).getDependency( node.getHostName()); if(dependencies == null) { LOG.error("The dependency call returned null for host " + node.getHostName()); throw new UnresolvedTopologyException("The dependency call returned " + "null for host " + node.getHostName()); } } return dependencies; } /** * Remove an already decommissioned data node who is neither in include nor * exclude hosts lists from the the list of live or dead nodes. This is used * to not display an already decommssioned data node to the operators. * The operation procedure of making a already decommissioned data node not * to be displayed is as following: * <ol> * <li> * Host must have been in the include hosts list and the include hosts list * must not be empty. * </li> * <li> * Host is decommissioned by remaining in the include hosts list and added * into the exclude hosts list. Name node is updated with the new * information by issuing dfsadmin -refreshNodes command. * </li> * <li> * Host is removed from both include hosts and exclude hosts lists. Name * node is updated with the new informationby issuing dfsamin -refreshNodes * command. * <li> * </ol> * * @param nodeList * , array list of live or dead nodes. */ private void removeDecomNodeFromList(final List<DatanodeDescriptor> nodeList) { // If the include list is empty, any nodes are welcomed and it does not // make sense to exclude any nodes from the cluster. Therefore, no remove. if (!hostFileManager.hasIncludes()) { return; } for (Iterator<DatanodeDescriptor> it = nodeList.iterator(); it.hasNext();) { DatanodeDescriptor node = it.next(); if ((!hostFileManager.isIncluded(node)) && (!hostFileManager.isExcluded(node)) && node.isDecommissioned()) { // Include list is not empty, an existing datanode does not appear // in both include or exclude lists and it has been decommissioned. it.remove(); } } } /** * Decommission the node if it is in the host exclude list. * * @param nodeReg datanode */ void startDecommissioningIfExcluded(DatanodeDescriptor nodeReg) { // If the registered node is in exclude list, then decommission it if (getHostFileManager().isExcluded(nodeReg)) { decomManager.startDecommission(nodeReg); } } /** * Register the given datanode with the namenode. NB: the given * registration is mutated and given back to the datanode. * * @param nodeReg the datanode registration * @throws DisallowedDatanodeException if the registration request is * denied because the datanode does not match includes/excludes * @throws UnresolvedTopologyException if the registration request is * denied because resolving datanode network location fails. */ public void registerDatanode(DatanodeRegistration nodeReg) throws DisallowedDatanodeException, UnresolvedTopologyException { InetAddress dnAddress = Server.getRemoteIp(); if (dnAddress != null) { // Mostly called inside an RPC, update ip and peer hostname String hostname = dnAddress.getHostName(); String ip = dnAddress.getHostAddress(); if (checkIpHostnameInRegistration && !isNameResolved(dnAddress)) { // Reject registration of unresolved datanode to prevent performance // impact of repetitive DNS lookups later. final String message = "hostname cannot be resolved (ip=" + ip + ", hostname=" + hostname + ")"; LOG.warn("Unresolved datanode registration: " + message); throw new DisallowedDatanodeException(nodeReg, message); } // update node registration with the ip and hostname from rpc request nodeReg.setIpAddr(ip); nodeReg.setPeerHostName(hostname); } try { nodeReg.setExportedKeys(blockManager.getBlockKeys()); // Checks if the node is not on the hosts list. If it is not, then // it will be disallowed from registering. if (!hostFileManager.isIncluded(nodeReg)) { throw new DisallowedDatanodeException(nodeReg); } NameNode.stateChangeLog.info("BLOCK* registerDatanode: from " + nodeReg + " storage " + nodeReg.getDatanodeUuid()); DatanodeDescriptor nodeS = getDatanode(nodeReg.getDatanodeUuid()); DatanodeDescriptor nodeN = host2DatanodeMap.getDatanodeByXferAddr( nodeReg.getIpAddr(), nodeReg.getXferPort()); if (nodeN != null && nodeN != nodeS) { NameNode.LOG.info("BLOCK* registerDatanode: " + nodeN); // nodeN previously served a different data storage, // which is not served by anybody anymore. removeDatanode(nodeN); // physically remove node from datanodeMap wipeDatanode(nodeN); nodeN = null; } if (nodeS != null) { if (nodeN == nodeS) { // The same datanode has been just restarted to serve the same data // storage. We do not need to remove old data blocks, the delta will // be calculated on the next block report from the datanode if(NameNode.stateChangeLog.isDebugEnabled()) { NameNode.stateChangeLog.debug("BLOCK* registerDatanode: " + "node restarted."); } } else { // nodeS is found /* The registering datanode is a replacement node for the existing data storage, which from now on will be served by a new node. If this message repeats, both nodes might have same storageID by (insanely rare) random chance. User needs to restart one of the nodes with its data cleared (or user can just remove the StorageID value in "VERSION" file under the data directory of the datanode, but this is might not work if VERSION file format has changed */ NameNode.stateChangeLog.info("BLOCK* registerDatanode: " + nodeS + " is replaced by " + nodeReg + " with the same storageID " + nodeReg.getDatanodeUuid()); } boolean success = false; try { // update cluster map getNetworkTopology().remove(nodeS); if(shouldCountVersion(nodeS)) { decrementVersionCount(nodeS.getSoftwareVersion()); } nodeS.updateRegInfo(nodeReg); nodeS.setSoftwareVersion(nodeReg.getSoftwareVersion()); nodeS.setDisallowed(false); // Node is in the include list // resolve network location if(this.rejectUnresolvedTopologyDN) { nodeS.setNetworkLocation(resolveNetworkLocation(nodeS)); nodeS.setDependentHostNames(getNetworkDependencies(nodeS)); } else { nodeS.setNetworkLocation( resolveNetworkLocationWithFallBackToDefaultLocation(nodeS)); nodeS.setDependentHostNames( getNetworkDependenciesWithDefault(nodeS)); } getNetworkTopology().add(nodeS); // also treat the registration message as a heartbeat heartbeatManager.register(nodeS); incrementVersionCount(nodeS.getSoftwareVersion()); startDecommissioningIfExcluded(nodeS); success = true; } finally { if (!success) { removeDatanode(nodeS); wipeDatanode(nodeS); countSoftwareVersions(); } } return; } DatanodeDescriptor nodeDescr = new DatanodeDescriptor(nodeReg, NetworkTopology.DEFAULT_RACK); boolean success = false; try { // resolve network location if(this.rejectUnresolvedTopologyDN) { nodeDescr.setNetworkLocation(resolveNetworkLocation(nodeDescr)); nodeDescr.setDependentHostNames(getNetworkDependencies(nodeDescr)); } else { nodeDescr.setNetworkLocation( resolveNetworkLocationWithFallBackToDefaultLocation(nodeDescr)); nodeDescr.setDependentHostNames( getNetworkDependenciesWithDefault(nodeDescr)); } networktopology.add(nodeDescr); nodeDescr.setSoftwareVersion(nodeReg.getSoftwareVersion()); // register new datanode addDatanode(nodeDescr); // also treat the registration message as a heartbeat // no need to update its timestamp // because its is done when the descriptor is created heartbeatManager.addDatanode(nodeDescr); incrementVersionCount(nodeReg.getSoftwareVersion()); startDecommissioningIfExcluded(nodeDescr); success = true; } finally { if (!success) { removeDatanode(nodeDescr); wipeDatanode(nodeDescr); countSoftwareVersions(); } } } catch (InvalidTopologyException e) { // If the network location is invalid, clear the cached mappings // so that we have a chance to re-add this DataNode with the // correct network location later. List<String> invalidNodeNames = new ArrayList<>(3); // clear cache for nodes in IP or Hostname invalidNodeNames.add(nodeReg.getIpAddr()); invalidNodeNames.add(nodeReg.getHostName()); invalidNodeNames.add(nodeReg.getPeerHostName()); dnsToSwitchMapping.reloadCachedMappings(invalidNodeNames); throw e; } } /** * Rereads conf to get hosts and exclude list file names. * Rereads the files to update the hosts and exclude lists. It * checks if any of the hosts have changed states: */ public void refreshNodes(final Configuration conf) throws IOException { refreshHostsReader(conf); namesystem.writeLock(); try { refreshDatanodes(); countSoftwareVersions(); } finally { namesystem.writeUnlock(); } } /** Reread include/exclude files. */ private void refreshHostsReader(Configuration conf) throws IOException { // Reread the conf to get dfs.hosts and dfs.hosts.exclude filenames. // Update the file names and refresh internal includes and excludes list. if (conf == null) { conf = new HdfsConfiguration(); } this.hostFileManager.refresh(conf.get(DFSConfigKeys.DFS_HOSTS, ""), conf.get(DFSConfigKeys.DFS_HOSTS_EXCLUDE, "")); } /** * 1. Added to hosts --> no further work needed here. * 2. Removed from hosts --> mark AdminState as decommissioned. * 3. Added to exclude --> start decommission. * 4. Removed from exclude --> stop decommission. */ private void refreshDatanodes() { for(DatanodeDescriptor node : datanodeMap.values()) { // Check if not include. if (!hostFileManager.isIncluded(node)) { node.setDisallowed(true); // case 2. } else { if (hostFileManager.isExcluded(node)) { decomManager.startDecommission(node); // case 3. } else { decomManager.stopDecommission(node); // case 4. } } } } /** @return the number of live datanodes. */ public int getNumLiveDataNodes() { int numLive = 0; synchronized (datanodeMap) { for(DatanodeDescriptor dn : datanodeMap.values()) { if (!isDatanodeDead(dn) ) { numLive++; } } } return numLive; } /** @return the number of dead datanodes. */ public int getNumDeadDataNodes() { return getDatanodeListForReport(DatanodeReportType.DEAD).size(); } /** @return list of datanodes where decommissioning is in progress. */ public List<DatanodeDescriptor> getDecommissioningNodes() { // There is no need to take namesystem reader lock as // getDatanodeListForReport will synchronize on datanodeMap // A decommissioning DN may be "alive" or "dead". return getDatanodeListForReport(DatanodeReportType.DECOMMISSIONING); } /* Getter and Setter for stale DataNodes related attributes */ /** * Whether stale datanodes should be avoided as targets on the write path. * The result of this function may change if the number of stale datanodes * eclipses a configurable threshold. * * @return whether stale datanodes should be avoided on the write path */ public boolean shouldAvoidStaleDataNodesForWrite() { // If # stale exceeds maximum staleness ratio, disable stale // datanode avoidance on the write path return avoidStaleDataNodesForWrite && (numStaleNodes <= heartbeatManager.getLiveDatanodeCount() * ratioUseStaleDataNodesForWrite); } public long getBlocksPerPostponedMisreplicatedBlocksRescan() { return blocksPerPostponedMisreplicatedBlocksRescan; } /** * @return The time interval used to mark DataNodes as stale. */ long getStaleInterval() { return staleInterval; } /** * Set the number of current stale DataNodes. The HeartbeatManager got this * number based on DataNodes' heartbeats. * * @param numStaleNodes * The number of stale DataNodes to be set. */ void setNumStaleNodes(int numStaleNodes) { this.numStaleNodes = numStaleNodes; } /** * @return Return the current number of stale DataNodes (detected by * HeartbeatManager). */ public int getNumStaleNodes() { return this.numStaleNodes; } /** * Get the number of content stale storages. */ public int getNumStaleStorages() { return numStaleStorages; } /** * Set the number of content stale storages. * * @param numStaleStorages The number of content stale storages. */ void setNumStaleStorages(int numStaleStorages) { this.numStaleStorages = numStaleStorages; } /** Fetch live and dead datanodes. */ public void fetchDatanodes(final List<DatanodeDescriptor> live, final List<DatanodeDescriptor> dead, final boolean removeDecommissionNode) { if (live == null && dead == null) { throw new HadoopIllegalArgumentException("Both live and dead lists are null"); } // There is no need to take namesystem reader lock as // getDatanodeListForReport will synchronize on datanodeMap final List<DatanodeDescriptor> results = getDatanodeListForReport(DatanodeReportType.ALL); for(DatanodeDescriptor node : results) { if (isDatanodeDead(node)) { if (dead != null) { dead.add(node); } } else { if (live != null) { live.add(node); } } } if (removeDecommissionNode) { if (live != null) { removeDecomNodeFromList(live); } if (dead != null) { removeDecomNodeFromList(dead); } } } /** * @return true if this cluster has ever consisted of multiple racks, even if * it is not now a multi-rack cluster. */ boolean hasClusterEverBeenMultiRack() { return hasClusterEverBeenMultiRack; } /** * Check if the cluster now consists of multiple racks. If it does, and this * is the first time it's consisted of multiple racks, then process blocks * that may now be misreplicated. * * @param node DN which caused cluster to become multi-rack. Used for logging. */ @VisibleForTesting void checkIfClusterIsNowMultiRack(DatanodeDescriptor node) { if (!hasClusterEverBeenMultiRack && networktopology.getNumOfRacks() > 1) { String message = "DN " + node + " joining cluster has expanded a formerly " + "single-rack cluster to be multi-rack. "; if (namesystem.isPopulatingReplQueues()) { message += "Re-checking all blocks for replication, since they should " + "now be replicated cross-rack"; LOG.info(message); } else { message += "Not checking for mis-replicated blocks because this NN is " + "not yet processing repl queues."; LOG.debug(message); } hasClusterEverBeenMultiRack = true; if (namesystem.isPopulatingReplQueues()) { blockManager.processMisReplicatedBlocks(); } } } /** * Parse a DatanodeID from a hosts file entry * @param hostLine of form [hostname|ip][:port]? * @return DatanodeID constructed from the given string */ private DatanodeID parseDNFromHostsEntry(String hostLine) { DatanodeID dnId; String hostStr; int port; int idx = hostLine.indexOf(':'); if (-1 == idx) { hostStr = hostLine; port = DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT; } else { hostStr = hostLine.substring(0, idx); port = Integer.parseInt(hostLine.substring(idx+1)); } if (InetAddresses.isInetAddress(hostStr)) { // The IP:port is sufficient for listing in a report dnId = new DatanodeID(hostStr, "", "", port, DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT, DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT, DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT); } else { String ipAddr = ""; try { ipAddr = InetAddress.getByName(hostStr).getHostAddress(); } catch (UnknownHostException e) { LOG.warn("Invalid hostname " + hostStr + " in hosts file"); } dnId = new DatanodeID(ipAddr, hostStr, "", port, DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT, DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT, DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT); } return dnId; } /** For generating datanode reports */ public List<DatanodeDescriptor> getDatanodeListForReport( final DatanodeReportType type) { final boolean listLiveNodes = type == DatanodeReportType.ALL || type == DatanodeReportType.LIVE; final boolean listDeadNodes = type == DatanodeReportType.ALL || type == DatanodeReportType.DEAD; final boolean listDecommissioningNodes = type == DatanodeReportType.ALL || type == DatanodeReportType.DECOMMISSIONING; ArrayList<DatanodeDescriptor> nodes; final HostFileManager.HostSet foundNodes = new HostFileManager.HostSet(); final HostFileManager.HostSet includedNodes = hostFileManager.getIncludes(); final HostFileManager.HostSet excludedNodes = hostFileManager.getExcludes(); synchronized(datanodeMap) { nodes = new ArrayList<>(datanodeMap.size()); for (DatanodeDescriptor dn : datanodeMap.values()) { final boolean isDead = isDatanodeDead(dn); final boolean isDecommissioning = dn.isDecommissionInProgress(); if ((listLiveNodes && !isDead) || (listDeadNodes && isDead) || (listDecommissioningNodes && isDecommissioning)) { nodes.add(dn); } foundNodes.add(HostFileManager.resolvedAddressFromDatanodeID(dn)); } } Collections.sort(nodes); if (listDeadNodes) { for (InetSocketAddress addr : includedNodes) { if (foundNodes.matchedBy(addr) || excludedNodes.match(addr)) { continue; } // The remaining nodes are ones that are referenced by the hosts // files but that we do not know about, ie that we have never // head from. Eg. an entry that is no longer part of the cluster // or a bogus entry was given in the hosts files // // If the host file entry specified the xferPort, we use that. // Otherwise, we guess that it is the default xfer port. // We can't ask the DataNode what it had configured, because it's // dead. DatanodeDescriptor dn = new DatanodeDescriptor(new DatanodeID(addr .getAddress().getHostAddress(), addr.getHostName(), "", addr.getPort() == 0 ? defaultXferPort : addr.getPort(), defaultInfoPort, defaultInfoSecurePort, defaultIpcPort)); setDatanodeDead(dn); nodes.add(dn); } } if (LOG.isDebugEnabled()) { LOG.debug("getDatanodeListForReport with " + "includedNodes = " + hostFileManager.getIncludes() + ", excludedNodes = " + hostFileManager.getExcludes() + ", foundNodes = " + foundNodes + ", nodes = " + nodes); } return nodes; } /** * Checks if name resolution was successful for the given address. If IP * address and host name are the same, then it means name resolution has * failed. As a special case, local addresses are also considered * acceptable. This is particularly important on Windows, where 127.0.0.1 does * not resolve to "localhost". * * @param address InetAddress to check * @return boolean true if name resolution successful or address is local */ private static boolean isNameResolved(InetAddress address) { String hostname = address.getHostName(); String ip = address.getHostAddress(); return !hostname.equals(ip) || NetUtils.isLocalAddress(address); } private void setDatanodeDead(DatanodeDescriptor node) { node.setLastUpdate(0); node.setLastUpdateMonotonic(0); } /** Handle heartbeat from datanodes. */ public DatanodeCommand[] handleHeartbeat(DatanodeRegistration nodeReg, StorageReport[] reports, final String blockPoolId, long cacheCapacity, long cacheUsed, int xceiverCount, int maxTransfers, int failedVolumes, VolumeFailureSummary volumeFailureSummary) throws IOException { synchronized (heartbeatManager) { synchronized (datanodeMap) { DatanodeDescriptor nodeinfo; try { nodeinfo = getDatanode(nodeReg); } catch(UnregisteredNodeException e) { return new DatanodeCommand[]{RegisterCommand.REGISTER}; } // Check if this datanode should actually be shutdown instead. if (nodeinfo != null && nodeinfo.isDisallowed()) { setDatanodeDead(nodeinfo); throw new DisallowedDatanodeException(nodeinfo); } if (nodeinfo == null || !nodeinfo.isAlive) { return new DatanodeCommand[]{RegisterCommand.REGISTER}; } heartbeatManager.updateHeartbeat(nodeinfo, reports, cacheCapacity, cacheUsed, xceiverCount, failedVolumes, volumeFailureSummary); // If we are in safemode, do not send back any recovery / replication // requests. Don't even drain the existing queue of work. if(namesystem.isInSafeMode()) { return new DatanodeCommand[0]; } //check lease recovery BlockInfoContiguousUnderConstruction[] blocks = nodeinfo .getLeaseRecoveryCommand(Integer.MAX_VALUE); if (blocks != null) { BlockRecoveryCommand brCommand = new BlockRecoveryCommand( blocks.length); for (BlockInfoContiguousUnderConstruction b : blocks) { final DatanodeStorageInfo[] storages = b.getExpectedStorageLocations(); // Skip stale nodes during recovery - not heart beated for some time (30s by default). final List<DatanodeStorageInfo> recoveryLocations = new ArrayList<>(storages.length); for (int i = 0; i < storages.length; i++) { if (!storages[i].getDatanodeDescriptor().isStale(staleInterval)) { recoveryLocations.add(storages[i]); } } // If we are performing a truncate recovery than set recovery fields // to old block. boolean truncateRecovery = b.getTruncateBlock() != null; boolean copyOnTruncateRecovery = truncateRecovery && b.getTruncateBlock().getBlockId() != b.getBlockId(); ExtendedBlock primaryBlock = (copyOnTruncateRecovery) ? new ExtendedBlock(blockPoolId, b.getTruncateBlock()) : new ExtendedBlock(blockPoolId, b); // If we only get 1 replica after eliminating stale nodes, then choose all // replicas for recovery and let the primary data node handle failures. DatanodeInfo[] recoveryInfos; if (recoveryLocations.size() > 1) { if (recoveryLocations.size() != storages.length) { LOG.info("Skipped stale nodes for recovery : " + (storages.length - recoveryLocations.size())); } recoveryInfos = DatanodeStorageInfo.toDatanodeInfos(recoveryLocations); } else { // If too many replicas are stale, then choose all replicas to participate // in block recovery. recoveryInfos = DatanodeStorageInfo.toDatanodeInfos(storages); } if(truncateRecovery) { Block recoveryBlock = (copyOnTruncateRecovery) ? b : b.getTruncateBlock(); brCommand.add(new RecoveringBlock(primaryBlock, recoveryInfos, recoveryBlock)); } else { brCommand.add(new RecoveringBlock(primaryBlock, recoveryInfos, b.getBlockRecoveryId())); } } return new DatanodeCommand[] { brCommand }; } final List<DatanodeCommand> cmds = new ArrayList<>(); //check pending replication List<BlockTargetPair> pendingList = nodeinfo.getReplicationCommand( maxTransfers); if (pendingList != null) { cmds.add(new BlockCommand(DatanodeProtocol.DNA_TRANSFER, blockPoolId, pendingList)); } //check block invalidation Block[] blks = nodeinfo.getInvalidateBlocks(blockInvalidateLimit); if (blks != null) { cmds.add(new BlockCommand(DatanodeProtocol.DNA_INVALIDATE, blockPoolId, blks)); } boolean sendingCachingCommands = false; long nowMs = monotonicNow(); if (shouldSendCachingCommands && ((nowMs - nodeinfo.getLastCachingDirectiveSentTimeMs()) >= timeBetweenResendingCachingDirectivesMs)) { DatanodeCommand pendingCacheCommand = getCacheCommand(nodeinfo.getPendingCached(), nodeinfo, DatanodeProtocol.DNA_CACHE, blockPoolId); if (pendingCacheCommand != null) { cmds.add(pendingCacheCommand); sendingCachingCommands = true; } DatanodeCommand pendingUncacheCommand = getCacheCommand(nodeinfo.getPendingUncached(), nodeinfo, DatanodeProtocol.DNA_UNCACHE, blockPoolId); if (pendingUncacheCommand != null) { cmds.add(pendingUncacheCommand); sendingCachingCommands = true; } if (sendingCachingCommands) { nodeinfo.setLastCachingDirectiveSentTimeMs(nowMs); } } blockManager.addKeyUpdateCommand(cmds, nodeinfo); // check for balancer bandwidth update if (nodeinfo.getBalancerBandwidth() > 0) { cmds.add(new BalancerBandwidthCommand(nodeinfo.getBalancerBandwidth())); // set back to 0 to indicate that datanode has been sent the new value nodeinfo.setBalancerBandwidth(0); } if (!cmds.isEmpty()) { return cmds.toArray(new DatanodeCommand[cmds.size()]); } } } return new DatanodeCommand[0]; } /** * Convert a CachedBlockList into a DatanodeCommand with a list of blocks. * * @param list The {@link CachedBlocksList}. This function * clears the list. * @param datanode The datanode. * @param action The action to perform in the command. * @param poolId The block pool id. * @return A DatanodeCommand to be sent back to the DN, or null if * there is nothing to be done. */ private DatanodeCommand getCacheCommand(CachedBlocksList list, DatanodeDescriptor datanode, int action, String poolId) { int length = list.size(); if (length == 0) { return null; } // Read the existing cache commands. long[] blockIds = new long[length]; int i = 0; for (Iterator<CachedBlock> iter = list.iterator(); iter.hasNext(); ) { CachedBlock cachedBlock = iter.next(); blockIds[i++] = cachedBlock.getBlockId(); } return new BlockIdCommand(action, poolId, blockIds); } /** * Tell all datanodes to use a new, non-persistent bandwidth value for * dfs.balance.bandwidthPerSec. * * A system administrator can tune the balancer bandwidth parameter * (dfs.datanode.balance.bandwidthPerSec) dynamically by calling * "dfsadmin -setBalanacerBandwidth newbandwidth", at which point the * following 'bandwidth' variable gets updated with the new value for each * node. Once the heartbeat command is issued to update the value on the * specified datanode, this value will be set back to 0. * * @param bandwidth Blanacer bandwidth in bytes per second for all datanodes. * @throws IOException */ public void setBalancerBandwidth(long bandwidth) throws IOException { synchronized(datanodeMap) { for (DatanodeDescriptor nodeInfo : datanodeMap.values()) { nodeInfo.setBalancerBandwidth(bandwidth); } } } public void markAllDatanodesStale() { LOG.info("Marking all datandoes as stale"); synchronized (datanodeMap) { for (DatanodeDescriptor dn : datanodeMap.values()) { for(DatanodeStorageInfo storage : dn.getStorageInfos()) { storage.markStaleAfterFailover(); } } } } /** * Clear any actions that are queued up to be sent to the DNs * on their next heartbeats. This includes block invalidations, * recoveries, and replication requests. */ public void clearPendingQueues() { synchronized (datanodeMap) { for (DatanodeDescriptor dn : datanodeMap.values()) { dn.clearBlockQueues(); } } } /** * Reset the lastCachingDirectiveSentTimeMs field of all the DataNodes we * know about. */ public void resetLastCachingDirectiveSentTime() { synchronized (datanodeMap) { for (DatanodeDescriptor dn : datanodeMap.values()) { dn.setLastCachingDirectiveSentTimeMs(0L); } } } @Override public String toString() { return getClass().getSimpleName() + ": " + host2DatanodeMap; } public void clearPendingCachingCommands() { for (DatanodeDescriptor dn : datanodeMap.values()) { dn.getPendingCached().clear(); dn.getPendingUncached().clear(); } } public void setShouldSendCachingCommands(boolean shouldSendCachingCommands) { this.shouldSendCachingCommands = shouldSendCachingCommands; } FSClusterStats newFSClusterStats() { return new FSClusterStats() { @Override public int getTotalLoad() { return heartbeatManager.getXceiverCount(); } @Override public boolean isAvoidingStaleDataNodesForWrite() { return shouldAvoidStaleDataNodesForWrite(); } @Override public int getNumDatanodesInService() { return heartbeatManager.getNumDatanodesInService(); } @Override public double getInServiceXceiverAverage() { double avgLoad = 0; final int nodes = getNumDatanodesInService(); if (nodes != 0) { final int xceivers = heartbeatManager .getInServiceXceiverCount(); avgLoad = (double)xceivers/nodes; } return avgLoad; } }; } }
60,655
36.372766
98
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementStatus.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.blockmanagement; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @InterfaceAudience.Private @InterfaceStability.Evolving public interface BlockPlacementStatus { /** * Boolean value to identify if replicas of this block satisfy requirement of * placement policy * @return if replicas satisfy placement policy's requirement */ public boolean isPlacementPolicySatisfied(); /** * Get description info for log or printed in case replicas are failed to meet * requirement of placement policy * @return description in case replicas are failed to meet requirement of * placement policy */ public String getErrorDescription(); }
1,580
35.767442
80
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.namenode; import java.io.DataInput; import java.io.DataOutput; import java.io.DataOutputStream; import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DeprecatedUTF8; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap; import org.apache.hadoop.hdfs.util.XMLUtils; import org.apache.hadoop.hdfs.util.XMLUtils.InvalidXmlException; import org.apache.hadoop.hdfs.util.XMLUtils.Stanza; import org.apache.hadoop.io.BooleanWritable; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.ShortWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.io.WritableUtils; import org.xml.sax.ContentHandler; import org.xml.sax.SAXException; import com.google.common.base.Preconditions; /** * Static utility functions for serializing various pieces of data in the correct * format for the FSImage file. * * Some members are currently public for the benefit of the Offline Image Viewer * which is located outside of this package. These members should be made * package-protected when the OIV is refactored. */ @InterfaceAudience.Private @InterfaceStability.Evolving public class FSImageSerialization { // Static-only class private FSImageSerialization() {} /** * In order to reduce allocation, we reuse some static objects. However, the methods * in this class should be thread-safe since image-saving is multithreaded, so * we need to keep the static objects in a thread-local. */ static private final ThreadLocal<TLData> TL_DATA = new ThreadLocal<TLData>() { @Override protected TLData initialValue() { return new TLData(); } }; /** * Simple container "struct" for threadlocal data. */ static private final class TLData { final DeprecatedUTF8 U_STR = new DeprecatedUTF8(); final ShortWritable U_SHORT = new ShortWritable(); final IntWritable U_INT = new IntWritable(); final LongWritable U_LONG = new LongWritable(); final FsPermission FILE_PERM = new FsPermission((short) 0); final BooleanWritable U_BOOLEAN = new BooleanWritable(); } private static void writePermissionStatus(INodeAttributes inode, DataOutput out) throws IOException { final FsPermission p = TL_DATA.get().FILE_PERM; p.fromShort(inode.getFsPermissionShort()); PermissionStatus.write(out, inode.getUserName(), inode.getGroupName(), p); } private static void writeBlocks(final Block[] blocks, final DataOutput out) throws IOException { if (blocks == null) { out.writeInt(0); } else { out.writeInt(blocks.length); for (Block blk : blocks) { blk.write(out); } } } // Helper function that reads in an INodeUnderConstruction // from the input stream // static INodeFile readINodeUnderConstruction( DataInput in, FSNamesystem fsNamesys, int imgVersion) throws IOException { byte[] name = readBytes(in); long inodeId = NameNodeLayoutVersion.supports( LayoutVersion.Feature.ADD_INODE_ID, imgVersion) ? in.readLong() : fsNamesys.dir.allocateNewInodeId(); short blockReplication = in.readShort(); long modificationTime = in.readLong(); long preferredBlockSize = in.readLong(); int numBlocks = in.readInt(); BlockInfo[] blocks = new BlockInfo[numBlocks]; Block blk = new Block(); int i = 0; for (; i < numBlocks-1; i++) { blk.readFields(in); blocks[i] = new BlockInfoContiguous(blk, blockReplication); } // last block is UNDER_CONSTRUCTION if(numBlocks > 0) { blk.readFields(in); blocks[i] = new BlockInfoContiguousUnderConstruction( blk, blockReplication, BlockUCState.UNDER_CONSTRUCTION, null); } PermissionStatus perm = PermissionStatus.read(in); String clientName = readString(in); String clientMachine = readString(in); // We previously stored locations for the last block, now we // just record that there are none int numLocs = in.readInt(); assert numLocs == 0 : "Unexpected block locations"; // Images in the pre-protobuf format will not have the lazyPersist flag, // so it is safe to pass false always. INodeFile file = new INodeFile(inodeId, name, perm, modificationTime, modificationTime, blocks, blockReplication, preferredBlockSize, (byte)0); file.toUnderConstruction(clientName, clientMachine); return file; } // Helper function that writes an INodeUnderConstruction // into the output stream // static void writeINodeUnderConstruction(DataOutputStream out, INodeFile cons, String path) throws IOException { writeString(path, out); out.writeLong(cons.getId()); out.writeShort(cons.getFileReplication()); out.writeLong(cons.getModificationTime()); out.writeLong(cons.getPreferredBlockSize()); writeBlocks(cons.getBlocks(), out); cons.getPermissionStatus().write(out); FileUnderConstructionFeature uc = cons.getFileUnderConstructionFeature(); writeString(uc.getClientName(), out); writeString(uc.getClientMachine(), out); out.writeInt(0); // do not store locations of last block } /** * Serialize a {@link INodeFile} node * @param node The node to write * @param out The {@link DataOutputStream} where the fields are written * @param writeBlock Whether to write block information */ public static void writeINodeFile(INodeFile file, DataOutput out, boolean writeUnderConstruction) throws IOException { writeLocalName(file, out); out.writeLong(file.getId()); out.writeShort(file.getFileReplication()); out.writeLong(file.getModificationTime()); out.writeLong(file.getAccessTime()); out.writeLong(file.getPreferredBlockSize()); writeBlocks(file.getBlocks(), out); SnapshotFSImageFormat.saveFileDiffList(file, out); if (writeUnderConstruction) { if (file.isUnderConstruction()) { out.writeBoolean(true); final FileUnderConstructionFeature uc = file.getFileUnderConstructionFeature(); writeString(uc.getClientName(), out); writeString(uc.getClientMachine(), out); } else { out.writeBoolean(false); } } writePermissionStatus(file, out); } /** Serialize an {@link INodeFileAttributes}. */ public static void writeINodeFileAttributes(INodeFileAttributes file, DataOutput out) throws IOException { writeLocalName(file, out); writePermissionStatus(file, out); out.writeLong(file.getModificationTime()); out.writeLong(file.getAccessTime()); out.writeShort(file.getFileReplication()); out.writeLong(file.getPreferredBlockSize()); } private static void writeQuota(QuotaCounts quota, DataOutput out) throws IOException { out.writeLong(quota.getNameSpace()); out.writeLong(quota.getStorageSpace()); } /** * Serialize a {@link INodeDirectory} * @param node The node to write * @param out The {@link DataOutput} where the fields are written */ public static void writeINodeDirectory(INodeDirectory node, DataOutput out) throws IOException { writeLocalName(node, out); out.writeLong(node.getId()); out.writeShort(0); // replication out.writeLong(node.getModificationTime()); out.writeLong(0); // access time out.writeLong(0); // preferred block size out.writeInt(-1); // # of blocks writeQuota(node.getQuotaCounts(), out); if (node.isSnapshottable()) { out.writeBoolean(true); } else { out.writeBoolean(false); out.writeBoolean(node.isWithSnapshot()); } writePermissionStatus(node, out); } /** * Serialize a {@link INodeDirectory} * @param a The node to write * @param out The {@link DataOutput} where the fields are written */ public static void writeINodeDirectoryAttributes( INodeDirectoryAttributes a, DataOutput out) throws IOException { writeLocalName(a, out); writePermissionStatus(a, out); out.writeLong(a.getModificationTime()); writeQuota(a.getQuotaCounts(), out); } /** * Serialize a {@link INodeSymlink} node * @param node The node to write * @param out The {@link DataOutput} where the fields are written */ private static void writeINodeSymlink(INodeSymlink node, DataOutput out) throws IOException { writeLocalName(node, out); out.writeLong(node.getId()); out.writeShort(0); // replication out.writeLong(0); // modification time out.writeLong(0); // access time out.writeLong(0); // preferred block size out.writeInt(-2); // # of blocks Text.writeString(out, node.getSymlinkString()); writePermissionStatus(node, out); } /** Serialize a {@link INodeReference} node */ private static void writeINodeReference(INodeReference ref, DataOutput out, boolean writeUnderConstruction, ReferenceMap referenceMap ) throws IOException { writeLocalName(ref, out); out.writeLong(ref.getId()); out.writeShort(0); // replication out.writeLong(0); // modification time out.writeLong(0); // access time out.writeLong(0); // preferred block size out.writeInt(-3); // # of blocks final boolean isWithName = ref instanceof INodeReference.WithName; out.writeBoolean(isWithName); if (!isWithName) { Preconditions.checkState(ref instanceof INodeReference.DstReference); // dst snapshot id out.writeInt(((INodeReference.DstReference) ref).getDstSnapshotId()); } else { out.writeInt(((INodeReference.WithName) ref).getLastSnapshotId()); } final INodeReference.WithCount withCount = (INodeReference.WithCount)ref.getReferredINode(); referenceMap.writeINodeReferenceWithCount(withCount, out, writeUnderConstruction); } /** * Save one inode's attributes to the image. */ public static void saveINode2Image(INode node, DataOutput out, boolean writeUnderConstruction, ReferenceMap referenceMap) throws IOException { if (node.isReference()) { writeINodeReference(node.asReference(), out, writeUnderConstruction, referenceMap); } else if (node.isDirectory()) { writeINodeDirectory(node.asDirectory(), out); } else if (node.isSymlink()) { writeINodeSymlink(node.asSymlink(), out); } else if (node.isFile()) { writeINodeFile(node.asFile(), out, writeUnderConstruction); } } // This should be reverted to package private once the ImageLoader // code is moved into this package. This method should not be called // by other code. @SuppressWarnings("deprecation") public static String readString(DataInput in) throws IOException { DeprecatedUTF8 ustr = TL_DATA.get().U_STR; ustr.readFields(in); return ustr.toStringChecked(); } static String readString_EmptyAsNull(DataInput in) throws IOException { final String s = readString(in); return s.isEmpty()? null: s; } @SuppressWarnings("deprecation") public static void writeString(String str, DataOutput out) throws IOException { DeprecatedUTF8 ustr = TL_DATA.get().U_STR; ustr.set(str); ustr.write(out); } /** read the long value */ static long readLong(DataInput in) throws IOException { LongWritable uLong = TL_DATA.get().U_LONG; uLong.readFields(in); return uLong.get(); } /** write the long value */ static void writeLong(long value, DataOutputStream out) throws IOException { LongWritable uLong = TL_DATA.get().U_LONG; uLong.set(value); uLong.write(out); } /** read the boolean value */ static boolean readBoolean(DataInput in) throws IOException { BooleanWritable uBoolean = TL_DATA.get().U_BOOLEAN; uBoolean.readFields(in); return uBoolean.get(); } /** write the boolean value */ static void writeBoolean(boolean value, DataOutputStream out) throws IOException { BooleanWritable uBoolean = TL_DATA.get().U_BOOLEAN; uBoolean.set(value); uBoolean.write(out); } /** write the byte value */ static void writeByte(byte value, DataOutputStream out) throws IOException { out.write(value); } /** read the int value */ static int readInt(DataInput in) throws IOException { IntWritable uInt = TL_DATA.get().U_INT; uInt.readFields(in); return uInt.get(); } /** write the int value */ static void writeInt(int value, DataOutputStream out) throws IOException { IntWritable uInt = TL_DATA.get().U_INT; uInt.set(value); uInt.write(out); } /** read short value */ static short readShort(DataInput in) throws IOException { ShortWritable uShort = TL_DATA.get().U_SHORT; uShort.readFields(in); return uShort.get(); } /** write short value */ static void writeShort(short value, DataOutputStream out) throws IOException { ShortWritable uShort = TL_DATA.get().U_SHORT; uShort.set(value); uShort.write(out); } // Same comments apply for this method as for readString() @SuppressWarnings("deprecation") public static byte[] readBytes(DataInput in) throws IOException { DeprecatedUTF8 ustr = TL_DATA.get().U_STR; ustr.readFields(in); int len = ustr.getLength(); byte[] bytes = new byte[len]; System.arraycopy(ustr.getBytes(), 0, bytes, 0, len); return bytes; } public static byte readByte(DataInput in) throws IOException { return in.readByte(); } /** * Reading the path from the image and converting it to byte[][] directly * this saves us an array copy and conversions to and from String * @param in input to read from * @return the array each element of which is a byte[] representation * of a path component * @throws IOException */ @SuppressWarnings("deprecation") public static byte[][] readPathComponents(DataInput in) throws IOException { DeprecatedUTF8 ustr = TL_DATA.get().U_STR; ustr.readFields(in); return DFSUtil.bytes2byteArray(ustr.getBytes(), ustr.getLength(), (byte) Path.SEPARATOR_CHAR); } public static byte[] readLocalName(DataInput in) throws IOException { byte[] createdNodeName = new byte[in.readShort()]; in.readFully(createdNodeName); return createdNodeName; } private static void writeLocalName(INodeAttributes inode, DataOutput out) throws IOException { final byte[] name = inode.getLocalNameBytes(); writeBytes(name, out); } public static void writeBytes(byte[] data, DataOutput out) throws IOException { out.writeShort(data.length); out.write(data); } /** * Write an array of blocks as compactly as possible. This uses * delta-encoding for the generation stamp and size, following * the principle that genstamp increases relatively slowly, * and size is equal for all but the last block of a file. */ public static void writeCompactBlockArray( Block[] blocks, DataOutputStream out) throws IOException { WritableUtils.writeVInt(out, blocks.length); Block prev = null; for (Block b : blocks) { long szDelta = b.getNumBytes() - (prev != null ? prev.getNumBytes() : 0); long gsDelta = b.getGenerationStamp() - (prev != null ? prev.getGenerationStamp() : 0); out.writeLong(b.getBlockId()); // blockid is random WritableUtils.writeVLong(out, szDelta); WritableUtils.writeVLong(out, gsDelta); prev = b; } } public static Block[] readCompactBlockArray( DataInput in, int logVersion) throws IOException { int num = WritableUtils.readVInt(in); if (num < 0) { throw new IOException("Invalid block array length: " + num); } Block prev = null; Block[] ret = new Block[num]; for (int i = 0; i < num; i++) { long id = in.readLong(); long sz = WritableUtils.readVLong(in) + ((prev != null) ? prev.getNumBytes() : 0); long gs = WritableUtils.readVLong(in) + ((prev != null) ? prev.getGenerationStamp() : 0); ret[i] = new Block(id, sz, gs); prev = ret[i]; } return ret; } public static void writeCacheDirectiveInfo(DataOutputStream out, CacheDirectiveInfo directive) throws IOException { writeLong(directive.getId(), out); int flags = ((directive.getPath() != null) ? 0x1 : 0) | ((directive.getReplication() != null) ? 0x2 : 0) | ((directive.getPool() != null) ? 0x4 : 0) | ((directive.getExpiration() != null) ? 0x8 : 0); out.writeInt(flags); if (directive.getPath() != null) { writeString(directive.getPath().toUri().getPath(), out); } if (directive.getReplication() != null) { writeShort(directive.getReplication(), out); } if (directive.getPool() != null) { writeString(directive.getPool(), out); } if (directive.getExpiration() != null) { writeLong(directive.getExpiration().getMillis(), out); } } public static CacheDirectiveInfo readCacheDirectiveInfo(DataInput in) throws IOException { CacheDirectiveInfo.Builder builder = new CacheDirectiveInfo.Builder(); builder.setId(readLong(in)); int flags = in.readInt(); if ((flags & 0x1) != 0) { builder.setPath(new Path(readString(in))); } if ((flags & 0x2) != 0) { builder.setReplication(readShort(in)); } if ((flags & 0x4) != 0) { builder.setPool(readString(in)); } if ((flags & 0x8) != 0) { builder.setExpiration( CacheDirectiveInfo.Expiration.newAbsolute(readLong(in))); } if ((flags & ~0xF) != 0) { throw new IOException("unknown flags set in " + "ModifyCacheDirectiveInfoOp: " + flags); } return builder.build(); } public static CacheDirectiveInfo readCacheDirectiveInfo(Stanza st) throws InvalidXmlException { CacheDirectiveInfo.Builder builder = new CacheDirectiveInfo.Builder(); builder.setId(Long.parseLong(st.getValue("ID"))); String path = st.getValueOrNull("PATH"); if (path != null) { builder.setPath(new Path(path)); } String replicationString = st.getValueOrNull("REPLICATION"); if (replicationString != null) { builder.setReplication(Short.parseShort(replicationString)); } String pool = st.getValueOrNull("POOL"); if (pool != null) { builder.setPool(pool); } String expiryTime = st.getValueOrNull("EXPIRATION"); if (expiryTime != null) { builder.setExpiration(CacheDirectiveInfo.Expiration.newAbsolute( Long.parseLong(expiryTime))); } return builder.build(); } public static void writeCacheDirectiveInfo(ContentHandler contentHandler, CacheDirectiveInfo directive) throws SAXException { XMLUtils.addSaxString(contentHandler, "ID", Long.toString(directive.getId())); if (directive.getPath() != null) { XMLUtils.addSaxString(contentHandler, "PATH", directive.getPath().toUri().getPath()); } if (directive.getReplication() != null) { XMLUtils.addSaxString(contentHandler, "REPLICATION", Short.toString(directive.getReplication())); } if (directive.getPool() != null) { XMLUtils.addSaxString(contentHandler, "POOL", directive.getPool()); } if (directive.getExpiration() != null) { XMLUtils.addSaxString(contentHandler, "EXPIRATION", "" + directive.getExpiration().getMillis()); } } public static void writeCachePoolInfo(DataOutputStream out, CachePoolInfo info) throws IOException { writeString(info.getPoolName(), out); final String ownerName = info.getOwnerName(); final String groupName = info.getGroupName(); final Long limit = info.getLimit(); final FsPermission mode = info.getMode(); final Long maxRelativeExpiry = info.getMaxRelativeExpiryMs(); boolean hasOwner, hasGroup, hasMode, hasLimit, hasMaxRelativeExpiry; hasOwner = ownerName != null; hasGroup = groupName != null; hasMode = mode != null; hasLimit = limit != null; hasMaxRelativeExpiry = maxRelativeExpiry != null; int flags = (hasOwner ? 0x1 : 0) | (hasGroup ? 0x2 : 0) | (hasMode ? 0x4 : 0) | (hasLimit ? 0x8 : 0) | (hasMaxRelativeExpiry ? 0x10 : 0); writeInt(flags, out); if (hasOwner) { writeString(ownerName, out); } if (hasGroup) { writeString(groupName, out); } if (hasMode) { mode.write(out); } if (hasLimit) { writeLong(limit, out); } if (hasMaxRelativeExpiry) { writeLong(maxRelativeExpiry, out); } } public static CachePoolInfo readCachePoolInfo(DataInput in) throws IOException { String poolName = readString(in); CachePoolInfo info = new CachePoolInfo(poolName); int flags = readInt(in); if ((flags & 0x1) != 0) { info.setOwnerName(readString(in)); } if ((flags & 0x2) != 0) { info.setGroupName(readString(in)); } if ((flags & 0x4) != 0) { info.setMode(FsPermission.read(in)); } if ((flags & 0x8) != 0) { info.setLimit(readLong(in)); } if ((flags & 0x10) != 0) { info.setMaxRelativeExpiryMs(readLong(in)); } if ((flags & ~0x1F) != 0) { throw new IOException("Unknown flag in CachePoolInfo: " + flags); } return info; } public static void writeCachePoolInfo(ContentHandler contentHandler, CachePoolInfo info) throws SAXException { XMLUtils.addSaxString(contentHandler, "POOLNAME", info.getPoolName()); final String ownerName = info.getOwnerName(); final String groupName = info.getGroupName(); final Long limit = info.getLimit(); final FsPermission mode = info.getMode(); final Long maxRelativeExpiry = info.getMaxRelativeExpiryMs(); if (ownerName != null) { XMLUtils.addSaxString(contentHandler, "OWNERNAME", ownerName); } if (groupName != null) { XMLUtils.addSaxString(contentHandler, "GROUPNAME", groupName); } if (mode != null) { FSEditLogOp.fsPermissionToXml(contentHandler, mode); } if (limit != null) { XMLUtils.addSaxString(contentHandler, "LIMIT", Long.toString(limit)); } if (maxRelativeExpiry != null) { XMLUtils.addSaxString(contentHandler, "MAXRELATIVEEXPIRY", Long.toString(maxRelativeExpiry)); } } public static CachePoolInfo readCachePoolInfo(Stanza st) throws InvalidXmlException { String poolName = st.getValue("POOLNAME"); CachePoolInfo info = new CachePoolInfo(poolName); if (st.hasChildren("OWNERNAME")) { info.setOwnerName(st.getValue("OWNERNAME")); } if (st.hasChildren("GROUPNAME")) { info.setGroupName(st.getValue("GROUPNAME")); } if (st.hasChildren("MODE")) { info.setMode(FSEditLogOp.fsPermissionFromXml(st)); } if (st.hasChildren("LIMIT")) { info.setLimit(Long.parseLong(st.getValue("LIMIT"))); } if (st.hasChildren("MAXRELATIVEEXPIRY")) { info.setMaxRelativeExpiryMs( Long.parseLong(st.getValue("MAXRELATIVEEXPIRY"))); } return info; } }
24,866
32.924966
90
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.namenode; import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; import org.apache.hadoop.hdfs.server.namenode.INode.ReclaimContext; import org.apache.hadoop.util.ChunkedArrayList; import java.io.IOException; import java.util.ArrayList; import java.util.List; import static org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.CURRENT_STATE_ID; import static org.apache.hadoop.util.Time.now; class FSDirDeleteOp { /** * Delete the target directory and collect the blocks under it * * @param fsd the FSDirectory instance * @param iip the INodesInPath instance containing all the INodes for the path * @param collectedBlocks Blocks under the deleted directory * @param removedINodes INodes that should be removed from inodeMap * @return the number of files that have been removed */ static long delete(FSDirectory fsd, INodesInPath iip, BlocksMapUpdateInfo collectedBlocks, List<INode> removedINodes, List<Long> removedUCFiles, long mtime) throws IOException { if (NameNode.stateChangeLog.isDebugEnabled()) { NameNode.stateChangeLog.debug("DIR* FSDirectory.delete: " + iip.getPath()); } long filesRemoved = -1; fsd.writeLock(); try { if (deleteAllowed(iip, iip.getPath()) ) { List<INodeDirectory> snapshottableDirs = new ArrayList<>(); FSDirSnapshotOp.checkSnapshot(iip.getLastINode(), snapshottableDirs); ReclaimContext context = new ReclaimContext( fsd.getBlockStoragePolicySuite(), collectedBlocks, removedINodes, removedUCFiles); if (unprotectedDelete(fsd, iip, context, mtime)) { filesRemoved = context.quotaDelta().getNsDelta(); } fsd.getFSNamesystem().removeSnapshottableDirs(snapshottableDirs); fsd.updateCount(iip, context.quotaDelta(), false); } } finally { fsd.writeUnlock(); } return filesRemoved; } /** * Remove a file/directory from the namespace. * <p> * For large directories, deletion is incremental. The blocks under * the directory are collected and deleted a small number at a time holding * the {@link FSNamesystem} lock. * <p> * For small directory or file the deletion is done in one shot. * * @param fsn namespace * @param src path name to be deleted * @param recursive boolean true to apply to all sub-directories recursively * @param logRetryCache whether to record RPC ids in editlog for retry cache * rebuilding * @return blocks collected from the deleted path * @throws IOException */ static BlocksMapUpdateInfo delete( FSNamesystem fsn, String src, boolean recursive, boolean logRetryCache) throws IOException { FSDirectory fsd = fsn.getFSDirectory(); FSPermissionChecker pc = fsd.getPermissionChecker(); byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); src = fsd.resolvePath(pc, src, pathComponents); final INodesInPath iip = fsd.getINodesInPath4Write(src, false); if (!recursive && fsd.isNonEmptyDirectory(iip)) { throw new PathIsNotEmptyDirectoryException(src + " is non empty"); } if (fsd.isPermissionEnabled()) { fsd.checkPermission(pc, iip, false, null, FsAction.WRITE, null, FsAction.ALL, true); } return deleteInternal(fsn, src, iip, logRetryCache); } /** * Delete a path from the name space * Update the count at each ancestor directory with quota * <br> * Note: This is to be used by * {@link org.apache.hadoop.hdfs.server.namenode.FSEditLog} only. * <br> * * @param fsd the FSDirectory instance * @param src a string representation of a path to an inode * @param mtime the time the inode is removed */ static void deleteForEditLog(FSDirectory fsd, String src, long mtime) throws IOException { assert fsd.hasWriteLock(); FSNamesystem fsn = fsd.getFSNamesystem(); BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo(); List<INode> removedINodes = new ChunkedArrayList<>(); List<Long> removedUCFiles = new ChunkedArrayList<>(); final INodesInPath iip = fsd.getINodesInPath4Write( FSDirectory.normalizePath(src), false); if (!deleteAllowed(iip, src)) { return; } List<INodeDirectory> snapshottableDirs = new ArrayList<>(); FSDirSnapshotOp.checkSnapshot(iip.getLastINode(), snapshottableDirs); boolean filesRemoved = unprotectedDelete(fsd, iip, new ReclaimContext(fsd.getBlockStoragePolicySuite(), collectedBlocks, removedINodes, removedUCFiles), mtime); fsn.removeSnapshottableDirs(snapshottableDirs); if (filesRemoved) { fsn.removeLeasesAndINodes(removedUCFiles, removedINodes, false); fsn.removeBlocksAndUpdateSafemodeTotal(collectedBlocks); } } /** * Remove a file/directory from the namespace. * <p> * For large directories, deletion is incremental. The blocks under * the directory are collected and deleted a small number at a time holding * the {@link org.apache.hadoop.hdfs.server.namenode.FSNamesystem} lock. * <p> * For small directory or file the deletion is done in one shot. * @param fsn namespace * @param src path name to be deleted * @param iip the INodesInPath instance containing all the INodes for the path * @param logRetryCache whether to record RPC ids in editlog for retry cache * rebuilding * @return blocks collected from the deleted path * @throws IOException */ static BlocksMapUpdateInfo deleteInternal( FSNamesystem fsn, String src, INodesInPath iip, boolean logRetryCache) throws IOException { assert fsn.hasWriteLock(); if (NameNode.stateChangeLog.isDebugEnabled()) { NameNode.stateChangeLog.debug("DIR* NameSystem.delete: " + src); } FSDirectory fsd = fsn.getFSDirectory(); BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo(); List<INode> removedINodes = new ChunkedArrayList<>(); List<Long> removedUCFiles = new ChunkedArrayList<>(); long mtime = now(); // Unlink the target directory from directory tree long filesRemoved = delete( fsd, iip, collectedBlocks, removedINodes, removedUCFiles, mtime); if (filesRemoved < 0) { return null; } fsd.getEditLog().logDelete(src, mtime, logRetryCache); incrDeletedFileCount(filesRemoved); fsn.removeLeasesAndINodes(removedUCFiles, removedINodes, true); if (NameNode.stateChangeLog.isDebugEnabled()) { NameNode.stateChangeLog.debug("DIR* Namesystem.delete: " + src +" is removed"); } return collectedBlocks; } static void incrDeletedFileCount(long count) { NameNode.getNameNodeMetrics().incrFilesDeleted(count); } private static boolean deleteAllowed(final INodesInPath iip, final String src) { if (iip.length() < 1 || iip.getLastINode() == null) { if (NameNode.stateChangeLog.isDebugEnabled()) { NameNode.stateChangeLog.debug( "DIR* FSDirectory.unprotectedDelete: failed to remove " + src + " because it does not exist"); } return false; } else if (iip.length() == 1) { // src is the root NameNode.stateChangeLog.warn( "DIR* FSDirectory.unprotectedDelete: failed to remove " + src + " because the root is not allowed to be deleted"); return false; } return true; } /** * Delete a path from the name space * Update the count at each ancestor directory with quota * @param fsd the FSDirectory instance * @param iip the inodes resolved from the path * @param reclaimContext used to collect blocks and inodes to be removed * @param mtime the time the inode is removed * @return true if there are inodes deleted */ private static boolean unprotectedDelete(FSDirectory fsd, INodesInPath iip, ReclaimContext reclaimContext, long mtime) { assert fsd.hasWriteLock(); // check if target node exists INode targetNode = iip.getLastINode(); if (targetNode == null) { return false; } // record modification final int latestSnapshot = iip.getLatestSnapshotId(); targetNode.recordModification(latestSnapshot); // Remove the node from the namespace long removed = fsd.removeLastINode(iip); if (removed == -1) { return false; } // set the parent's modification time final INodeDirectory parent = targetNode.getParent(); parent.updateModificationTime(mtime, latestSnapshot); // collect block and update quota if (!targetNode.isInLatestSnapshot(latestSnapshot)) { targetNode.destroyAndCollectBlocks(reclaimContext); } else { targetNode.cleanSubtree(reclaimContext, CURRENT_STATE_ID, latestSnapshot); } if (NameNode.stateChangeLog.isDebugEnabled()) { NameNode.stateChangeLog.debug("DIR* FSDirectory.unprotectedDelete: " + iip.getPath() + " is removed"); } return true; } }
10,006
37.04943
88
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.namenode; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory; import org.apache.hadoop.hdfs.util.RwLock; import org.apache.hadoop.ipc.StandbyException; import org.apache.hadoop.security.AccessControlException; /** Namesystem operations. */ @InterfaceAudience.Private public interface Namesystem extends RwLock, SafeMode { /** Is this name system running? */ boolean isRunning(); /** Check if the user has superuser privilege. */ void checkSuperuserPrivilege() throws AccessControlException; /** @return the block pool ID */ String getBlockPoolId(); boolean isInStandbyState(); boolean isGenStampInFuture(Block block); void adjustSafeModeBlockTotals(int deltaSafe, int deltaTotal); void checkOperation(OperationCategory read) throws StandbyException; boolean isInSnapshot(BlockInfoContiguousUnderConstruction blockUC); CacheManager getCacheManager(); }
1,948
37.215686
90
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.namenode; import static org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_DEFAULT; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_STANDBY_CHECKPOINTS_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_STANDBY_CHECKPOINTS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_AUDIT_LOGGERS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_ASYNC_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_ASYNC_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_TOKEN_TRACKING_ID_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_TOKEN_TRACKING_ID_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DEFAULT_AUDIT_LOGGER_NAME; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_REQUIRED_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDIT_LOG_AUTOROLL_CHECK_INTERVAL_MS; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDIT_LOG_AUTOROLL_CHECK_INTERVAL_MS_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDIT_LOG_AUTOROLL_MULTIPLIER_THRESHOLD; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDIT_LOG_AUTOROLL_MULTIPLIER_THRESHOLD_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_ENABLE_RETRY_CACHE_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LAZY_PERSIST_FILE_SCRUB_INTERVAL_SEC; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LAZY_PERSIST_FILE_SCRUB_INTERVAL_SEC_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_INODE_ATTRIBUTES_PROVIDER_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_OBJECTS_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_OBJECTS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPL_QUEUE_THRESHOLD_PCT_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RETRY_CACHE_EXPIRYTIME_MILLIS_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RETRY_CACHE_EXPIRYTIME_MILLIS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RETRY_CACHE_HEAP_PERCENT_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RETRY_CACHE_HEAP_PERCENT_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SAFEMODE_MIN_DATANODES_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SAFEMODE_MIN_DATANODES_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_ENABLED_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SUPPORT_APPEND_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SUPPORT_APPEND_KEY; import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.SECURITY_XATTR_UNREADABLE_BY_SUPERUSER; import static org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.*; import static org.apache.hadoop.util.Time.now; import static org.apache.hadoop.util.Time.monotonicNow; import java.io.BufferedWriter; import java.io.ByteArrayInputStream; import java.io.DataInput; import java.io.DataInputStream; import java.io.DataOutputStream; import java.io.File; import java.io.FileNotFoundException; import java.io.FileOutputStream; import java.io.IOException; import java.io.OutputStreamWriter; import java.io.PrintWriter; import java.io.StringWriter; import java.lang.management.ManagementFactory; import java.net.InetAddress; import java.net.URI; import java.security.GeneralSecurityException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.Date; import java.util.EnumSet; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.LinkedHashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.TreeMap; import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.Condition; import java.util.concurrent.locks.ReentrantLock; import java.util.concurrent.locks.ReentrantReadWriteLock; import javax.management.NotCompliantMBeanException; import javax.management.ObjectName; import javax.management.StandardMBean; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.crypto.CipherSuite; import org.apache.hadoop.crypto.CryptoProtocolVersion; import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.crypto.CryptoCodec; import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries; import org.apache.hadoop.fs.CacheFlag; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FsServerDefaults; import org.apache.hadoop.fs.InvalidPathException; import org.apache.hadoop.fs.Options; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.XAttr; import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; import org.apache.hadoop.ha.ServiceFailedException; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.HAUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.UnknownCryptoProtocolVersionException; import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry; import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; import org.apache.hadoop.hdfs.protocol.CachePoolEntry; import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.EncryptionZone; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException; import org.apache.hadoop.hdfs.protocol.RollingUpgradeException; import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo; import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager.SecretManagerState; import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection; import org.apache.hadoop.hdfs.server.blockmanagement.BlockIdManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStatistics; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeStartupOption; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirType; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.common.Util; import org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection; import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; import org.apache.hadoop.hdfs.server.namenode.JournalSet.JournalAndStream; import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile; import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory; import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion.Feature; import org.apache.hadoop.hdfs.server.namenode.ha.EditLogTailer; import org.apache.hadoop.hdfs.server.namenode.ha.HAContext; import org.apache.hadoop.hdfs.server.namenode.ha.StandbyCheckpointer; import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean; import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotManager; import org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase; import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.Counter; import org.apache.hadoop.hdfs.server.namenode.startupprogress.Status; import org.apache.hadoop.hdfs.server.namenode.startupprogress.Step; import org.apache.hadoop.hdfs.server.namenode.startupprogress.StepType; import org.apache.hadoop.hdfs.server.namenode.top.TopAuditLogger; import org.apache.hadoop.hdfs.server.namenode.top.TopConf; import org.apache.hadoop.hdfs.server.namenode.top.metrics.TopMetrics; import org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowManager; import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods; import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse; import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat; import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand; import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks; import org.apache.hadoop.hdfs.server.protocol.StorageReport; import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.Text; import org.apache.hadoop.ipc.RetriableException; import org.apache.hadoop.ipc.RetryCache; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.ipc.StandbyException; import org.apache.hadoop.metrics2.annotation.Metric; import org.apache.hadoop.metrics2.annotation.Metrics; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.util.MBeans; import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.net.Node; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; import org.apache.hadoop.security.token.SecretManager.InvalidToken; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.security.token.delegation.DelegationKey; import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.VersionInfo; import org.apache.log4j.Appender; import org.apache.log4j.AsyncAppender; import org.apache.log4j.Logger; import org.codehaus.jackson.map.ObjectMapper; import org.mortbay.util.ajax.JSON; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Charsets; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Lists; /*************************************************** * FSNamesystem does the actual bookkeeping work for the * DataNode. * * It tracks several important tables. * * 1) valid fsname --> blocklist (kept on disk, logged) * 2) Set of all valid blocks (inverted #1) * 3) block --> machinelist (kept in memory, rebuilt dynamically from reports) * 4) machine --> blocklist (inverted #2) * 5) LRU cache of updated-heartbeat machines ***************************************************/ @InterfaceAudience.Private @Metrics(context="dfs") public class FSNamesystem implements Namesystem, FSNamesystemMBean, NameNodeMXBean { public static final Log LOG = LogFactory.getLog(FSNamesystem.class); private static final ThreadLocal<StringBuilder> auditBuffer = new ThreadLocal<StringBuilder>() { @Override protected StringBuilder initialValue() { return new StringBuilder(); } }; private final BlockIdManager blockIdManager; @VisibleForTesting public boolean isAuditEnabled() { return !isDefaultAuditLogger || auditLog.isInfoEnabled(); } private void logAuditEvent(boolean succeeded, String cmd, String src) throws IOException { logAuditEvent(succeeded, cmd, src, null, null); } private void logAuditEvent(boolean succeeded, String cmd, String src, String dst, HdfsFileStatus stat) throws IOException { if (isAuditEnabled() && isExternalInvocation()) { logAuditEvent(succeeded, getRemoteUser(), getRemoteIp(), cmd, src, dst, stat); } } private void logAuditEvent(boolean succeeded, UserGroupInformation ugi, InetAddress addr, String cmd, String src, String dst, HdfsFileStatus stat) { FileStatus status = null; if (stat != null) { Path symlink = stat.isSymlink() ? new Path(stat.getSymlink()) : null; Path path = dst != null ? new Path(dst) : new Path(src); status = new FileStatus(stat.getLen(), stat.isDir(), stat.getReplication(), stat.getBlockSize(), stat.getModificationTime(), stat.getAccessTime(), stat.getPermission(), stat.getOwner(), stat.getGroup(), symlink, path); } for (AuditLogger logger : auditLoggers) { if (logger instanceof HdfsAuditLogger) { HdfsAuditLogger hdfsLogger = (HdfsAuditLogger) logger; hdfsLogger.logAuditEvent(succeeded, ugi.toString(), addr, cmd, src, dst, status, ugi, dtSecretManager); } else { logger.logAuditEvent(succeeded, ugi.toString(), addr, cmd, src, dst, status); } } } /** * Logger for audit events, noting successful FSNamesystem operations. Emits * to FSNamesystem.audit at INFO. Each event causes a set of tab-separated * <code>key=value</code> pairs to be written for the following properties: * <code> * ugi=&lt;ugi in RPC&gt; * ip=&lt;remote IP&gt; * cmd=&lt;command&gt; * src=&lt;src path&gt; * dst=&lt;dst path (optional)&gt; * perm=&lt;permissions (optional)&gt; * </code> */ public static final Log auditLog = LogFactory.getLog( FSNamesystem.class.getName() + ".audit"); static final int DEFAULT_MAX_CORRUPT_FILEBLOCKS_RETURNED = 100; static int BLOCK_DELETION_INCREMENT = 1000; private final boolean isPermissionEnabled; private final UserGroupInformation fsOwner; private final String supergroup; private final boolean standbyShouldCheckpoint; // Scan interval is not configurable. private static final long DELEGATION_TOKEN_REMOVER_SCAN_INTERVAL = TimeUnit.MILLISECONDS.convert(1, TimeUnit.HOURS); final DelegationTokenSecretManager dtSecretManager; private final boolean alwaysUseDelegationTokensForTests; private static final Step STEP_AWAITING_REPORTED_BLOCKS = new Step(StepType.AWAITING_REPORTED_BLOCKS); // Tracks whether the default audit logger is the only configured audit // logger; this allows isAuditEnabled() to return false in case the // underlying logger is disabled, and avoid some unnecessary work. private final boolean isDefaultAuditLogger; private final List<AuditLogger> auditLoggers; /** The namespace tree. */ FSDirectory dir; private final BlockManager blockManager; private final SnapshotManager snapshotManager; private final CacheManager cacheManager; private final DatanodeStatistics datanodeStatistics; private String nameserviceId; private volatile RollingUpgradeInfo rollingUpgradeInfo = null; /** * A flag that indicates whether the checkpointer should checkpoint a rollback * fsimage. The edit log tailer sets this flag. The checkpoint will create a * rollback fsimage if the flag is true, and then change the flag to false. */ private volatile boolean needRollbackFsImage; // Block pool ID used by this namenode private String blockPoolId; final LeaseManager leaseManager = new LeaseManager(this); volatile Daemon smmthread = null; // SafeModeMonitor thread Daemon nnrmthread = null; // NamenodeResourceMonitor thread Daemon nnEditLogRoller = null; // NameNodeEditLogRoller thread // A daemon to periodically clean up corrupt lazyPersist files // from the name space. Daemon lazyPersistFileScrubber = null; /** * When an active namenode will roll its own edit log, in # edits */ private final long editLogRollerThreshold; /** * Check interval of an active namenode's edit log roller thread */ private final int editLogRollerInterval; /** * How frequently we scan and unlink corrupt lazyPersist files. * (In seconds) */ private final int lazyPersistFileScrubIntervalSec; private volatile boolean hasResourcesAvailable = false; private volatile boolean fsRunning = true; /** The start time of the namesystem. */ private final long startTime = now(); /** The interval of namenode checking for the disk space availability */ private final long resourceRecheckInterval; // The actual resource checker instance. NameNodeResourceChecker nnResourceChecker; private final FsServerDefaults serverDefaults; private final boolean supportAppends; private final ReplaceDatanodeOnFailure dtpReplaceDatanodeOnFailure; private volatile SafeModeInfo safeMode; // safe mode information private final long maxFsObjects; // maximum number of fs objects private final long minBlockSize; // minimum block size final long maxBlocksPerFile; // maximum # of blocks per file /** Lock to protect FSNamesystem. */ private final FSNamesystemLock fsLock; /** * Checkpoint lock to protect FSNamesystem modification on standby NNs. * Unlike fsLock, it does not affect block updates. On active NNs, this lock * does not provide proper protection, because there are operations that * modify both block and name system state. Even on standby, fsLock is * used when block state changes need to be blocked. */ private final ReentrantLock cpLock; /** * Used when this NN is in standby state to read from the shared edit log. */ private EditLogTailer editLogTailer = null; /** * Used when this NN is in standby state to perform checkpoints. */ private StandbyCheckpointer standbyCheckpointer; /** * Reference to the NN's HAContext object. This is only set once * {@link #startCommonServices(Configuration, HAContext)} is called. */ private HAContext haContext; private final boolean haEnabled; /** flag indicating whether replication queues have been initialized */ boolean initializedReplQueues = false; /** * Whether the namenode is in the middle of starting the active service */ private volatile boolean startingActiveService = false; private final RetryCache retryCache; private KeyProviderCryptoExtension provider = null; private volatile boolean imageLoaded = false; private final Condition cond; private final FSImage fsImage; private final TopConf topConf; private TopMetrics topMetrics; private INodeAttributeProvider inodeAttributeProvider; /** * Notify that loading of this FSDirectory is complete, and * it is imageLoaded for use */ void imageLoadComplete() { Preconditions.checkState(!imageLoaded, "FSDirectory already loaded"); setImageLoaded(); } void setImageLoaded() { if(imageLoaded) return; writeLock(); try { setImageLoaded(true); dir.markNameCacheInitialized(); cond.signalAll(); } finally { writeUnlock(); } } //This is for testing purposes only @VisibleForTesting boolean isImageLoaded() { return imageLoaded; } // exposed for unit tests protected void setImageLoaded(boolean flag) { imageLoaded = flag; } /** * Block until the object is imageLoaded to be used. */ void waitForLoadingFSImage() { if (!imageLoaded) { writeLock(); try { while (!imageLoaded) { try { cond.await(5000, TimeUnit.MILLISECONDS); } catch (InterruptedException ignored) { } } } finally { writeUnlock(); } } } /** * Clear all loaded data */ void clear() { dir.reset(); dtSecretManager.reset(); blockIdManager.clear(); leaseManager.removeAllLeases(); snapshotManager.clearSnapshottableDirs(); cacheManager.clear(); setImageLoaded(false); blockManager.clear(); } @VisibleForTesting LeaseManager getLeaseManager() { return leaseManager; } boolean isHaEnabled() { return haEnabled; } /** * Check the supplied configuration for correctness. * @param conf Supplies the configuration to validate. * @throws IOException if the configuration could not be queried. * @throws IllegalArgumentException if the configuration is invalid. */ private static void checkConfiguration(Configuration conf) throws IOException { final Collection<URI> namespaceDirs = FSNamesystem.getNamespaceDirs(conf); final Collection<URI> editsDirs = FSNamesystem.getNamespaceEditsDirs(conf); final Collection<URI> requiredEditsDirs = FSNamesystem.getRequiredNamespaceEditsDirs(conf); final Collection<URI> sharedEditsDirs = FSNamesystem.getSharedEditsDirs(conf); for (URI u : requiredEditsDirs) { if (u.toString().compareTo( DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_DEFAULT) == 0) { continue; } // Each required directory must also be in editsDirs or in // sharedEditsDirs. if (!editsDirs.contains(u) && !sharedEditsDirs.contains(u)) { throw new IllegalArgumentException( "Required edits directory " + u.toString() + " not present in " + DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY + ". " + DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY + "=" + editsDirs.toString() + "; " + DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_REQUIRED_KEY + "=" + requiredEditsDirs.toString() + ". " + DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY + "=" + sharedEditsDirs.toString() + "."); } } if (namespaceDirs.size() == 1) { LOG.warn("Only one image storage directory (" + DFS_NAMENODE_NAME_DIR_KEY + ") configured. Beware of data loss" + " due to lack of redundant storage directories!"); } if (editsDirs.size() == 1) { LOG.warn("Only one namespace edits storage directory (" + DFS_NAMENODE_EDITS_DIR_KEY + ") configured. Beware of data loss" + " due to lack of redundant storage directories!"); } } /** * Instantiates an FSNamesystem loaded from the image and edits * directories specified in the passed Configuration. * * @param conf the Configuration which specifies the storage directories * from which to load * @return an FSNamesystem which contains the loaded namespace * @throws IOException if loading fails */ static FSNamesystem loadFromDisk(Configuration conf) throws IOException { checkConfiguration(conf); FSImage fsImage = new FSImage(conf, FSNamesystem.getNamespaceDirs(conf), FSNamesystem.getNamespaceEditsDirs(conf)); FSNamesystem namesystem = new FSNamesystem(conf, fsImage, false); StartupOption startOpt = NameNode.getStartupOption(conf); if (startOpt == StartupOption.RECOVER) { namesystem.setSafeMode(SafeModeAction.SAFEMODE_ENTER); } long loadStart = monotonicNow(); try { namesystem.loadFSImage(startOpt); } catch (IOException ioe) { LOG.warn("Encountered exception loading fsimage", ioe); fsImage.close(); throw ioe; } long timeTakenToLoadFSImage = monotonicNow() - loadStart; LOG.info("Finished loading FSImage in " + timeTakenToLoadFSImage + " msecs"); NameNodeMetrics nnMetrics = NameNode.getNameNodeMetrics(); if (nnMetrics != null) { nnMetrics.setFsImageLoadTime((int) timeTakenToLoadFSImage); } return namesystem; } FSNamesystem(Configuration conf, FSImage fsImage) throws IOException { this(conf, fsImage, false); } /** * Create an FSNamesystem associated with the specified image. * * Note that this does not load any data off of disk -- if you would * like that behavior, use {@link #loadFromDisk(Configuration)} * * @param conf configuration * @param fsImage The FSImage to associate with * @param ignoreRetryCache Whether or not should ignore the retry cache setup * step. For Secondary NN this should be set to true. * @throws IOException on bad configuration */ FSNamesystem(Configuration conf, FSImage fsImage, boolean ignoreRetryCache) throws IOException { provider = DFSUtil.createKeyProviderCryptoExtension(conf); if (provider == null) { LOG.info("No KeyProvider found."); } else { LOG.info("Found KeyProvider: " + provider.toString()); } if (conf.getBoolean(DFS_NAMENODE_AUDIT_LOG_ASYNC_KEY, DFS_NAMENODE_AUDIT_LOG_ASYNC_DEFAULT)) { LOG.info("Enabling async auditlog"); enableAsyncAuditLog(); } boolean fair = conf.getBoolean("dfs.namenode.fslock.fair", true); LOG.info("fsLock is fair:" + fair); fsLock = new FSNamesystemLock(fair); cond = fsLock.writeLock().newCondition(); cpLock = new ReentrantLock(); this.fsImage = fsImage; try { resourceRecheckInterval = conf.getLong( DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY, DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_DEFAULT); this.blockManager = new BlockManager(this, conf); this.datanodeStatistics = blockManager.getDatanodeManager().getDatanodeStatistics(); this.blockIdManager = new BlockIdManager(blockManager); this.fsOwner = UserGroupInformation.getCurrentUser(); this.supergroup = conf.get(DFS_PERMISSIONS_SUPERUSERGROUP_KEY, DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT); this.isPermissionEnabled = conf.getBoolean(DFS_PERMISSIONS_ENABLED_KEY, DFS_PERMISSIONS_ENABLED_DEFAULT); LOG.info("fsOwner = " + fsOwner); LOG.info("supergroup = " + supergroup); LOG.info("isPermissionEnabled = " + isPermissionEnabled); // block allocation has to be persisted in HA using a shared edits directory // so that the standby has up-to-date namespace information nameserviceId = DFSUtil.getNamenodeNameServiceId(conf); this.haEnabled = HAUtil.isHAEnabled(conf, nameserviceId); // Sanity check the HA-related config. if (nameserviceId != null) { LOG.info("Determined nameservice ID: " + nameserviceId); } LOG.info("HA Enabled: " + haEnabled); if (!haEnabled && HAUtil.usesSharedEditsDir(conf)) { LOG.warn("Configured NNs:\n" + DFSUtil.nnAddressesAsString(conf)); throw new IOException("Invalid configuration: a shared edits dir " + "must not be specified if HA is not enabled."); } // Get the checksum type from config String checksumTypeStr = conf.get(DFS_CHECKSUM_TYPE_KEY, DFS_CHECKSUM_TYPE_DEFAULT); DataChecksum.Type checksumType; try { checksumType = DataChecksum.Type.valueOf(checksumTypeStr); } catch (IllegalArgumentException iae) { throw new IOException("Invalid checksum type in " + DFS_CHECKSUM_TYPE_KEY + ": " + checksumTypeStr); } this.serverDefaults = new FsServerDefaults( conf.getLongBytes(DFS_BLOCK_SIZE_KEY, DFS_BLOCK_SIZE_DEFAULT), conf.getInt(DFS_BYTES_PER_CHECKSUM_KEY, DFS_BYTES_PER_CHECKSUM_DEFAULT), conf.getInt(DFS_CLIENT_WRITE_PACKET_SIZE_KEY, DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT), (short) conf.getInt(DFS_REPLICATION_KEY, DFS_REPLICATION_DEFAULT), conf.getInt(IO_FILE_BUFFER_SIZE_KEY, IO_FILE_BUFFER_SIZE_DEFAULT), conf.getBoolean(DFS_ENCRYPT_DATA_TRANSFER_KEY, DFS_ENCRYPT_DATA_TRANSFER_DEFAULT), conf.getLong(FS_TRASH_INTERVAL_KEY, FS_TRASH_INTERVAL_DEFAULT), checksumType); this.maxFsObjects = conf.getLong(DFS_NAMENODE_MAX_OBJECTS_KEY, DFS_NAMENODE_MAX_OBJECTS_DEFAULT); this.minBlockSize = conf.getLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_DEFAULT); this.maxBlocksPerFile = conf.getLong(DFSConfigKeys.DFS_NAMENODE_MAX_BLOCKS_PER_FILE_KEY, DFSConfigKeys.DFS_NAMENODE_MAX_BLOCKS_PER_FILE_DEFAULT); this.supportAppends = conf.getBoolean(DFS_SUPPORT_APPEND_KEY, DFS_SUPPORT_APPEND_DEFAULT); LOG.info("Append Enabled: " + supportAppends); this.dtpReplaceDatanodeOnFailure = ReplaceDatanodeOnFailure.get(conf); this.standbyShouldCheckpoint = conf.getBoolean( DFS_HA_STANDBY_CHECKPOINTS_KEY, DFS_HA_STANDBY_CHECKPOINTS_DEFAULT); // # edit autoroll threshold is a multiple of the checkpoint threshold this.editLogRollerThreshold = (long) (conf.getFloat( DFS_NAMENODE_EDIT_LOG_AUTOROLL_MULTIPLIER_THRESHOLD, DFS_NAMENODE_EDIT_LOG_AUTOROLL_MULTIPLIER_THRESHOLD_DEFAULT) * conf.getLong( DFS_NAMENODE_CHECKPOINT_TXNS_KEY, DFS_NAMENODE_CHECKPOINT_TXNS_DEFAULT)); this.editLogRollerInterval = conf.getInt( DFS_NAMENODE_EDIT_LOG_AUTOROLL_CHECK_INTERVAL_MS, DFS_NAMENODE_EDIT_LOG_AUTOROLL_CHECK_INTERVAL_MS_DEFAULT); this.lazyPersistFileScrubIntervalSec = conf.getInt( DFS_NAMENODE_LAZY_PERSIST_FILE_SCRUB_INTERVAL_SEC, DFS_NAMENODE_LAZY_PERSIST_FILE_SCRUB_INTERVAL_SEC_DEFAULT); if (this.lazyPersistFileScrubIntervalSec < 0) { throw new IllegalArgumentException( DFS_NAMENODE_LAZY_PERSIST_FILE_SCRUB_INTERVAL_SEC + " must be zero (for disable) or greater than zero."); } // For testing purposes, allow the DT secret manager to be started regardless // of whether security is enabled. alwaysUseDelegationTokensForTests = conf.getBoolean( DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_DEFAULT); this.dtSecretManager = createDelegationTokenSecretManager(conf); this.dir = new FSDirectory(this, conf); this.snapshotManager = new SnapshotManager(dir); this.cacheManager = new CacheManager(this, conf, blockManager); this.safeMode = new SafeModeInfo(conf); this.topConf = new TopConf(conf); this.auditLoggers = initAuditLoggers(conf); this.isDefaultAuditLogger = auditLoggers.size() == 1 && auditLoggers.get(0) instanceof DefaultAuditLogger; this.retryCache = ignoreRetryCache ? null : initRetryCache(conf); Class<? extends INodeAttributeProvider> klass = conf.getClass( DFS_NAMENODE_INODE_ATTRIBUTES_PROVIDER_KEY, null, INodeAttributeProvider.class); if (klass != null) { inodeAttributeProvider = ReflectionUtils.newInstance(klass, conf); LOG.info("Using INode attribute provider: " + klass.getName()); } } catch(IOException e) { LOG.error(getClass().getSimpleName() + " initialization failed.", e); close(); throw e; } catch (RuntimeException re) { LOG.error(getClass().getSimpleName() + " initialization failed.", re); close(); throw re; } } @VisibleForTesting public List<AuditLogger> getAuditLoggers() { return auditLoggers; } @VisibleForTesting public RetryCache getRetryCache() { return retryCache; } void lockRetryCache() { if (retryCache != null) { retryCache.lock(); } } void unlockRetryCache() { if (retryCache != null) { retryCache.unlock(); } } /** Whether or not retry cache is enabled */ boolean hasRetryCache() { return retryCache != null; } void addCacheEntryWithPayload(byte[] clientId, int callId, Object payload) { if (retryCache != null) { retryCache.addCacheEntryWithPayload(clientId, callId, payload); } } void addCacheEntry(byte[] clientId, int callId) { if (retryCache != null) { retryCache.addCacheEntry(clientId, callId); } } @VisibleForTesting public KeyProviderCryptoExtension getProvider() { return provider; } @VisibleForTesting static RetryCache initRetryCache(Configuration conf) { boolean enable = conf.getBoolean(DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY, DFS_NAMENODE_ENABLE_RETRY_CACHE_DEFAULT); LOG.info("Retry cache on namenode is " + (enable ? "enabled" : "disabled")); if (enable) { float heapPercent = conf.getFloat( DFS_NAMENODE_RETRY_CACHE_HEAP_PERCENT_KEY, DFS_NAMENODE_RETRY_CACHE_HEAP_PERCENT_DEFAULT); long entryExpiryMillis = conf.getLong( DFS_NAMENODE_RETRY_CACHE_EXPIRYTIME_MILLIS_KEY, DFS_NAMENODE_RETRY_CACHE_EXPIRYTIME_MILLIS_DEFAULT); LOG.info("Retry cache will use " + heapPercent + " of total heap and retry cache entry expiry time is " + entryExpiryMillis + " millis"); long entryExpiryNanos = entryExpiryMillis * 1000 * 1000; return new RetryCache("NameNodeRetryCache", heapPercent, entryExpiryNanos); } return null; } private List<AuditLogger> initAuditLoggers(Configuration conf) { // Initialize the custom access loggers if configured. Collection<String> alClasses = conf.getStringCollection(DFS_NAMENODE_AUDIT_LOGGERS_KEY); List<AuditLogger> auditLoggers = Lists.newArrayList(); if (alClasses != null && !alClasses.isEmpty()) { for (String className : alClasses) { try { AuditLogger logger; if (DFS_NAMENODE_DEFAULT_AUDIT_LOGGER_NAME.equals(className)) { logger = new DefaultAuditLogger(); } else { logger = (AuditLogger) Class.forName(className).newInstance(); } logger.initialize(conf); auditLoggers.add(logger); } catch (RuntimeException re) { throw re; } catch (Exception e) { throw new RuntimeException(e); } } } // Make sure there is at least one logger installed. if (auditLoggers.isEmpty()) { auditLoggers.add(new DefaultAuditLogger()); } // Add audit logger to calculate top users if (topConf.isEnabled) { topMetrics = new TopMetrics(conf, topConf.nntopReportingPeriodsMs); auditLoggers.add(new TopAuditLogger(topMetrics)); } return Collections.unmodifiableList(auditLoggers); } private void loadFSImage(StartupOption startOpt) throws IOException { final FSImage fsImage = getFSImage(); // format before starting up if requested if (startOpt == StartupOption.FORMAT) { fsImage.format(this, fsImage.getStorage().determineClusterId());// reuse current id startOpt = StartupOption.REGULAR; } boolean success = false; writeLock(); try { // We shouldn't be calling saveNamespace if we've come up in standby state. MetaRecoveryContext recovery = startOpt.createRecoveryContext(); final boolean staleImage = fsImage.recoverTransitionRead(startOpt, this, recovery); if (RollingUpgradeStartupOption.ROLLBACK.matches(startOpt) || RollingUpgradeStartupOption.DOWNGRADE.matches(startOpt)) { rollingUpgradeInfo = null; } final boolean needToSave = staleImage && !haEnabled && !isRollingUpgrade(); LOG.info("Need to save fs image? " + needToSave + " (staleImage=" + staleImage + ", haEnabled=" + haEnabled + ", isRollingUpgrade=" + isRollingUpgrade() + ")"); if (needToSave) { fsImage.saveNamespace(this); } else { // No need to save, so mark the phase done. StartupProgress prog = NameNode.getStartupProgress(); prog.beginPhase(Phase.SAVING_CHECKPOINT); prog.endPhase(Phase.SAVING_CHECKPOINT); } // This will start a new log segment and write to the seen_txid file, so // we shouldn't do it when coming up in standby state if (!haEnabled || (haEnabled && startOpt == StartupOption.UPGRADE) || (haEnabled && startOpt == StartupOption.UPGRADEONLY)) { fsImage.openEditLogForWrite(getEffectiveLayoutVersion()); } success = true; } finally { if (!success) { fsImage.close(); } writeUnlock(); } imageLoadComplete(); } private void startSecretManager() { if (dtSecretManager != null) { try { dtSecretManager.startThreads(); } catch (IOException e) { // Inability to start secret manager // can't be recovered from. throw new RuntimeException(e); } } } private void startSecretManagerIfNecessary() { boolean shouldRun = shouldUseDelegationTokens() && !isInSafeMode() && getEditLog().isOpenForWrite(); boolean running = dtSecretManager.isRunning(); if (shouldRun && !running) { startSecretManager(); } } private void stopSecretManager() { if (dtSecretManager != null) { dtSecretManager.stopThreads(); } } /** * Start services common to both active and standby states */ void startCommonServices(Configuration conf, HAContext haContext) throws IOException { this.registerMBean(); // register the MBean for the FSNamesystemState writeLock(); this.haContext = haContext; try { nnResourceChecker = new NameNodeResourceChecker(conf); checkAvailableResources(); assert safeMode != null && !isPopulatingReplQueues(); StartupProgress prog = NameNode.getStartupProgress(); prog.beginPhase(Phase.SAFEMODE); long completeBlocksTotal = getCompleteBlocksTotal(); prog.setTotal(Phase.SAFEMODE, STEP_AWAITING_REPORTED_BLOCKS, completeBlocksTotal); setBlockTotal(completeBlocksTotal); blockManager.activate(conf); } finally { writeUnlock(); } registerMXBean(); DefaultMetricsSystem.instance().register(this); if (inodeAttributeProvider != null) { inodeAttributeProvider.start(); dir.setINodeAttributeProvider(inodeAttributeProvider); } snapshotManager.registerMXBean(); } /** * Stop services common to both active and standby states */ void stopCommonServices() { writeLock(); if (inodeAttributeProvider != null) { dir.setINodeAttributeProvider(null); inodeAttributeProvider.stop(); } try { if (blockManager != null) blockManager.close(); } finally { writeUnlock(); } RetryCache.clear(retryCache); } /** * Start services required in active state * @throws IOException */ void startActiveServices() throws IOException { startingActiveService = true; LOG.info("Starting services required for active state"); writeLock(); try { FSEditLog editLog = getFSImage().getEditLog(); if (!editLog.isOpenForWrite()) { // During startup, we're already open for write during initialization. editLog.initJournalsForWrite(); // May need to recover editLog.recoverUnclosedStreams(); LOG.info("Catching up to latest edits from old active before " + "taking over writer role in edits logs"); editLogTailer.catchupDuringFailover(); blockManager.setPostponeBlocksFromFuture(false); blockManager.getDatanodeManager().markAllDatanodesStale(); blockManager.clearQueues(); blockManager.processAllPendingDNMessages(); // Only need to re-process the queue, If not in SafeMode. if (!isInSafeMode()) { LOG.info("Reprocessing replication and invalidation queues"); initializeReplQueues(); } if (LOG.isDebugEnabled()) { LOG.debug("NameNode metadata after re-processing " + "replication and invalidation queues during failover:\n" + metaSaveAsString()); } long nextTxId = getFSImage().getLastAppliedTxId() + 1; LOG.info("Will take over writing edit logs at txnid " + nextTxId); editLog.setNextTxId(nextTxId); getFSImage().editLog.openForWrite(getEffectiveLayoutVersion()); } // Enable quota checks. dir.enableQuotaChecks(); if (haEnabled) { // Renew all of the leases before becoming active. // This is because, while we were in standby mode, // the leases weren't getting renewed on this NN. // Give them all a fresh start here. leaseManager.renewAllLeases(); } leaseManager.startMonitor(); startSecretManagerIfNecessary(); //ResourceMonitor required only at ActiveNN. See HDFS-2914 this.nnrmthread = new Daemon(new NameNodeResourceMonitor()); nnrmthread.start(); nnEditLogRoller = new Daemon(new NameNodeEditLogRoller( editLogRollerThreshold, editLogRollerInterval)); nnEditLogRoller.start(); if (lazyPersistFileScrubIntervalSec > 0) { lazyPersistFileScrubber = new Daemon(new LazyPersistFileScrubber( lazyPersistFileScrubIntervalSec)); lazyPersistFileScrubber.start(); } else { LOG.warn("Lazy persist file scrubber is disabled," + " configured scrub interval is zero."); } cacheManager.startMonitorThread(); blockManager.getDatanodeManager().setShouldSendCachingCommands(true); } finally { startingActiveService = false; checkSafeMode(); writeUnlock(); } } /** * Initialize replication queues. */ private void initializeReplQueues() { LOG.info("initializing replication queues"); blockManager.processMisReplicatedBlocks(); initializedReplQueues = true; } private boolean inActiveState() { return haContext != null && haContext.getState().getServiceState() == HAServiceState.ACTIVE; } /** * @return Whether the namenode is transitioning to active state and is in the * middle of the {@link #startActiveServices()} */ public boolean inTransitionToActive() { return haEnabled && inActiveState() && startingActiveService; } private boolean shouldUseDelegationTokens() { return UserGroupInformation.isSecurityEnabled() || alwaysUseDelegationTokensForTests; } /** * Stop services required in active state */ void stopActiveServices() { LOG.info("Stopping services started for active state"); writeLock(); try { stopSecretManager(); leaseManager.stopMonitor(); if (nnrmthread != null) { ((NameNodeResourceMonitor) nnrmthread.getRunnable()).stopMonitor(); nnrmthread.interrupt(); } if (nnEditLogRoller != null) { ((NameNodeEditLogRoller)nnEditLogRoller.getRunnable()).stop(); nnEditLogRoller.interrupt(); } if (lazyPersistFileScrubber != null) { ((LazyPersistFileScrubber) lazyPersistFileScrubber.getRunnable()).stop(); lazyPersistFileScrubber.interrupt(); } if (dir != null && getFSImage() != null) { if (getFSImage().editLog != null) { getFSImage().editLog.close(); } // Update the fsimage with the last txid that we wrote // so that the tailer starts from the right spot. getFSImage().updateLastAppliedTxIdFromWritten(); } if (cacheManager != null) { cacheManager.stopMonitorThread(); cacheManager.clearDirectiveStats(); } if (blockManager != null) { blockManager.getDatanodeManager().clearPendingCachingCommands(); blockManager.getDatanodeManager().setShouldSendCachingCommands(false); // Don't want to keep replication queues when not in Active. blockManager.clearQueues(); } initializedReplQueues = false; } finally { writeUnlock(); } } /** * Start services required in standby state * * @throws IOException */ void startStandbyServices(final Configuration conf) throws IOException { LOG.info("Starting services required for standby state"); if (!getFSImage().editLog.isOpenForRead()) { // During startup, we're already open for read. getFSImage().editLog.initSharedJournalsForRead(); } blockManager.setPostponeBlocksFromFuture(true); // Disable quota checks while in standby. dir.disableQuotaChecks(); editLogTailer = new EditLogTailer(this, conf); editLogTailer.start(); if (standbyShouldCheckpoint) { standbyCheckpointer = new StandbyCheckpointer(conf, this); standbyCheckpointer.start(); } } /** * Called when the NN is in Standby state and the editlog tailer tails the * OP_ROLLING_UPGRADE_START. */ void triggerRollbackCheckpoint() { setNeedRollbackFsImage(true); if (standbyCheckpointer != null) { standbyCheckpointer.triggerRollbackCheckpoint(); } } /** * Called while the NN is in Standby state, but just about to be * asked to enter Active state. This cancels any checkpoints * currently being taken. */ void prepareToStopStandbyServices() throws ServiceFailedException { if (standbyCheckpointer != null) { standbyCheckpointer.cancelAndPreventCheckpoints( "About to leave standby state"); } } /** Stop services required in standby state */ void stopStandbyServices() throws IOException { LOG.info("Stopping services started for standby state"); if (standbyCheckpointer != null) { standbyCheckpointer.stop(); } if (editLogTailer != null) { editLogTailer.stop(); } if (dir != null && getFSImage() != null && getFSImage().editLog != null) { getFSImage().editLog.close(); } } @Override public void checkOperation(OperationCategory op) throws StandbyException { if (haContext != null) { // null in some unit tests haContext.checkOperation(op); } } /** * @throws RetriableException * If 1) The NameNode is in SafeMode, 2) HA is enabled, and 3) * NameNode is in active state * @throws SafeModeException * Otherwise if NameNode is in SafeMode. */ void checkNameNodeSafeMode(String errorMsg) throws RetriableException, SafeModeException { if (isInSafeMode()) { SafeModeException se = newSafemodeException(errorMsg); if (haEnabled && haContext != null && haContext.getState().getServiceState() == HAServiceState.ACTIVE && shouldRetrySafeMode(this.safeMode)) { throw new RetriableException(se); } else { throw se; } } } private SafeModeException newSafemodeException(String errorMsg) { return new SafeModeException(errorMsg + ". Name node is in safe " + "mode.\n" + safeMode.getTurnOffTip()); } boolean isPermissionEnabled() { return isPermissionEnabled; } /** * We already know that the safemode is on. We will throw a RetriableException * if the safemode is not manual or caused by low resource. */ private boolean shouldRetrySafeMode(SafeModeInfo safeMode) { if (safeMode == null) { return false; } else { return !safeMode.isManual() && !safeMode.areResourcesLow(); } } public static Collection<URI> getNamespaceDirs(Configuration conf) { return getStorageDirs(conf, DFS_NAMENODE_NAME_DIR_KEY); } /** * Get all edits dirs which are required. If any shared edits dirs are * configured, these are also included in the set of required dirs. * * @param conf the HDFS configuration. * @return all required dirs. */ public static Collection<URI> getRequiredNamespaceEditsDirs(Configuration conf) { Set<URI> ret = new HashSet<URI>(); ret.addAll(getStorageDirs(conf, DFS_NAMENODE_EDITS_DIR_REQUIRED_KEY)); ret.addAll(getSharedEditsDirs(conf)); return ret; } private static Collection<URI> getStorageDirs(Configuration conf, String propertyName) { Collection<String> dirNames = conf.getTrimmedStringCollection(propertyName); StartupOption startOpt = NameNode.getStartupOption(conf); if(startOpt == StartupOption.IMPORT) { // In case of IMPORT this will get rid of default directories // but will retain directories specified in hdfs-site.xml // When importing image from a checkpoint, the name-node can // start with empty set of storage directories. Configuration cE = new HdfsConfiguration(false); cE.addResource("core-default.xml"); cE.addResource("core-site.xml"); cE.addResource("hdfs-default.xml"); Collection<String> dirNames2 = cE.getTrimmedStringCollection(propertyName); dirNames.removeAll(dirNames2); if(dirNames.isEmpty()) LOG.warn("!!! WARNING !!!" + "\n\tThe NameNode currently runs without persistent storage." + "\n\tAny changes to the file system meta-data may be lost." + "\n\tRecommended actions:" + "\n\t\t- shutdown and restart NameNode with configured \"" + propertyName + "\" in hdfs-site.xml;" + "\n\t\t- use Backup Node as a persistent and up-to-date storage " + "of the file system meta-data."); } else if (dirNames.isEmpty()) { dirNames = Collections.singletonList( DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_DEFAULT); } return Util.stringCollectionAsURIs(dirNames); } /** * Return an ordered list of edits directories to write to. * The list is ordered such that all shared edits directories * are ordered before non-shared directories, and any duplicates * are removed. The order they are specified in the configuration * is retained. * @return Collection of shared edits directories. * @throws IOException if multiple shared edits directories are configured */ public static List<URI> getNamespaceEditsDirs(Configuration conf) throws IOException { return getNamespaceEditsDirs(conf, true); } public static List<URI> getNamespaceEditsDirs(Configuration conf, boolean includeShared) throws IOException { // Use a LinkedHashSet so that order is maintained while we de-dup // the entries. LinkedHashSet<URI> editsDirs = new LinkedHashSet<URI>(); if (includeShared) { List<URI> sharedDirs = getSharedEditsDirs(conf); // Fail until multiple shared edits directories are supported (HDFS-2782) if (sharedDirs.size() > 1) { throw new IOException( "Multiple shared edits directories are not yet supported"); } // First add the shared edits dirs. It's critical that the shared dirs // are added first, since JournalSet syncs them in the order they are listed, // and we need to make sure all edits are in place in the shared storage // before they are replicated locally. See HDFS-2874. for (URI dir : sharedDirs) { if (!editsDirs.add(dir)) { LOG.warn("Edits URI " + dir + " listed multiple times in " + DFS_NAMENODE_SHARED_EDITS_DIR_KEY + ". Ignoring duplicates."); } } } // Now add the non-shared dirs. for (URI dir : getStorageDirs(conf, DFS_NAMENODE_EDITS_DIR_KEY)) { if (!editsDirs.add(dir)) { LOG.warn("Edits URI " + dir + " listed multiple times in " + DFS_NAMENODE_SHARED_EDITS_DIR_KEY + " and " + DFS_NAMENODE_EDITS_DIR_KEY + ". Ignoring duplicates."); } } if (editsDirs.isEmpty()) { // If this is the case, no edit dirs have been explicitly configured. // Image dirs are to be used for edits too. return Lists.newArrayList(getNamespaceDirs(conf)); } else { return Lists.newArrayList(editsDirs); } } /** * Returns edit directories that are shared between primary and secondary. * @param conf configuration * @return collection of edit directories from {@code conf} */ public static List<URI> getSharedEditsDirs(Configuration conf) { // don't use getStorageDirs here, because we want an empty default // rather than the dir in /tmp Collection<String> dirNames = conf.getTrimmedStringCollection( DFS_NAMENODE_SHARED_EDITS_DIR_KEY); return Util.stringCollectionAsURIs(dirNames); } @Override public void readLock() { this.fsLock.readLock().lock(); } @Override public void readUnlock() { this.fsLock.readLock().unlock(); } @Override public void writeLock() { this.fsLock.writeLock().lock(); } @Override public void writeLockInterruptibly() throws InterruptedException { this.fsLock.writeLock().lockInterruptibly(); } @Override public void writeUnlock() { this.fsLock.writeLock().unlock(); } @Override public boolean hasWriteLock() { return this.fsLock.isWriteLockedByCurrentThread(); } @Override public boolean hasReadLock() { return this.fsLock.getReadHoldCount() > 0 || hasWriteLock(); } public int getReadHoldCount() { return this.fsLock.getReadHoldCount(); } public int getWriteHoldCount() { return this.fsLock.getWriteHoldCount(); } /** Lock the checkpoint lock */ public void cpLock() { this.cpLock.lock(); } /** Lock the checkpoint lock interrupibly */ public void cpLockInterruptibly() throws InterruptedException { this.cpLock.lockInterruptibly(); } /** Unlock the checkpoint lock */ public void cpUnlock() { this.cpLock.unlock(); } NamespaceInfo getNamespaceInfo() { readLock(); try { return unprotectedGetNamespaceInfo(); } finally { readUnlock(); } } /** * Version of @see #getNamespaceInfo() that is not protected by a lock. */ NamespaceInfo unprotectedGetNamespaceInfo() { return new NamespaceInfo(getFSImage().getStorage().getNamespaceID(), getClusterId(), getBlockPoolId(), getFSImage().getStorage().getCTime()); } /** * Close down this file system manager. * Causes heartbeat and lease daemons to stop; waits briefly for * them to finish, but a short timeout returns control back to caller. */ void close() { fsRunning = false; try { stopCommonServices(); if (smmthread != null) smmthread.interrupt(); } finally { // using finally to ensure we also wait for lease daemon try { stopActiveServices(); stopStandbyServices(); } catch (IOException ie) { } finally { IOUtils.cleanup(LOG, dir); IOUtils.cleanup(LOG, fsImage); } } } @Override public boolean isRunning() { return fsRunning; } @Override public boolean isInStandbyState() { if (haContext == null || haContext.getState() == null) { // We're still starting up. In this case, if HA is // on for the cluster, we always start in standby. Otherwise // start in active. return haEnabled; } return HAServiceState.STANDBY == haContext.getState().getServiceState(); } /** * Dump all metadata into specified file */ void metaSave(String filename) throws IOException { checkSuperuserPrivilege(); checkOperation(OperationCategory.UNCHECKED); writeLock(); try { checkOperation(OperationCategory.UNCHECKED); File file = new File(System.getProperty("hadoop.log.dir"), filename); PrintWriter out = new PrintWriter(new BufferedWriter( new OutputStreamWriter(new FileOutputStream(file), Charsets.UTF_8))); metaSave(out); out.flush(); out.close(); } finally { writeUnlock(); } } private void metaSave(PrintWriter out) { assert hasWriteLock(); long totalInodes = this.dir.totalInodes(); long totalBlocks = this.getBlocksTotal(); out.println(totalInodes + " files and directories, " + totalBlocks + " blocks = " + (totalInodes + totalBlocks) + " total"); blockManager.metaSave(out); } private String metaSaveAsString() { StringWriter sw = new StringWriter(); PrintWriter pw = new PrintWriter(sw); metaSave(pw); pw.flush(); return sw.toString(); } FsServerDefaults getServerDefaults() throws StandbyException { checkOperation(OperationCategory.READ); return serverDefaults; } ///////////////////////////////////////////////////////// // // These methods are called by HadoopFS clients // ///////////////////////////////////////////////////////// /** * Set permissions for an existing file. * @throws IOException */ void setPermission(String src, FsPermission permission) throws IOException { HdfsFileStatus auditStat; checkOperation(OperationCategory.WRITE); writeLock(); try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot set permission for " + src); auditStat = FSDirAttrOp.setPermission(dir, src, permission); } catch (AccessControlException e) { logAuditEvent(false, "setPermission", src); throw e; } finally { writeUnlock(); } getEditLog().logSync(); logAuditEvent(true, "setPermission", src, null, auditStat); } /** * Set owner for an existing file. * @throws IOException */ void setOwner(String src, String username, String group) throws IOException { HdfsFileStatus auditStat; checkOperation(OperationCategory.WRITE); writeLock(); try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot set owner for " + src); auditStat = FSDirAttrOp.setOwner(dir, src, username, group); } catch (AccessControlException e) { logAuditEvent(false, "setOwner", src); throw e; } finally { writeUnlock(); } getEditLog().logSync(); logAuditEvent(true, "setOwner", src, null, auditStat); } /** * Get block locations within the specified range. * @see ClientProtocol#getBlockLocations(String, long, long) */ LocatedBlocks getBlockLocations(String clientMachine, String srcArg, long offset, long length) throws IOException { checkOperation(OperationCategory.READ); GetBlockLocationsResult res = null; FSPermissionChecker pc = getPermissionChecker(); readLock(); try { checkOperation(OperationCategory.READ); res = FSDirStatAndListingOp.getBlockLocations( dir, pc, srcArg, offset, length, true); if (isInSafeMode()) { for (LocatedBlock b : res.blocks.getLocatedBlocks()) { // if safemode & no block locations yet then throw safemodeException if ((b.getLocations() == null) || (b.getLocations().length == 0)) { SafeModeException se = newSafemodeException( "Zero blocklocations for " + srcArg); if (haEnabled && haContext != null && haContext.getState().getServiceState() == HAServiceState.ACTIVE) { throw new RetriableException(se); } else { throw se; } } } } } catch (AccessControlException e) { logAuditEvent(false, "open", srcArg); throw e; } finally { readUnlock(); } logAuditEvent(true, "open", srcArg); if (!isInSafeMode() && res.updateAccessTime()) { byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath( srcArg); String src = srcArg; writeLock(); final long now = now(); try { checkOperation(OperationCategory.WRITE); /** * Resolve the path again and update the atime only when the file * exists. * * XXX: Races can still occur even after resolving the path again. * For example: * * <ul> * <li>Get the block location for "/a/b"</li> * <li>Rename "/a/b" to "/c/b"</li> * <li>The second resolution still points to "/a/b", which is * wrong.</li> * </ul> * * The behavior is incorrect but consistent with the one before * HDFS-7463. A better fix is to change the edit log of SetTime to * use inode id instead of a path. */ src = dir.resolvePath(pc, srcArg, pathComponents); final INodesInPath iip = dir.getINodesInPath(src, true); INode inode = iip.getLastINode(); boolean updateAccessTime = inode != null && now > inode.getAccessTime() + dir.getAccessTimePrecision(); if (!isInSafeMode() && updateAccessTime) { boolean changed = FSDirAttrOp.setTimes(dir, inode, -1, now, false, iip.getLatestSnapshotId()); if (changed) { getEditLog().logTimes(src, -1, now); } } } catch (Throwable e) { LOG.warn("Failed to update the access time of " + src, e); } finally { writeUnlock(); } } LocatedBlocks blocks = res.blocks; if (blocks != null) { blockManager.getDatanodeManager().sortLocatedBlocks( clientMachine, blocks.getLocatedBlocks()); // lastBlock is not part of getLocatedBlocks(), might need to sort it too LocatedBlock lastBlock = blocks.getLastLocatedBlock(); if (lastBlock != null) { ArrayList<LocatedBlock> lastBlockList = Lists.newArrayList(lastBlock); blockManager.getDatanodeManager().sortLocatedBlocks( clientMachine, lastBlockList); } } return blocks; } /** * Moves all the blocks from {@code srcs} and appends them to {@code target} * To avoid rollbacks we will verify validity of ALL of the args * before we start actual move. * * This does not support ".inodes" relative path * @param target target to concat into * @param srcs file that will be concatenated * @throws IOException on error */ void concat(String target, String [] srcs, boolean logRetryCache) throws IOException { waitForLoadingFSImage(); HdfsFileStatus stat = null; boolean success = false; writeLock(); try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot concat " + target); stat = FSDirConcatOp.concat(dir, target, srcs, logRetryCache); success = true; } finally { writeUnlock(); if (success) { getEditLog().logSync(); } logAuditEvent(success, "concat", Arrays.toString(srcs), target, stat); } } /** * stores the modification and access time for this inode. * The access time is precise up to an hour. The transaction, if needed, is * written to the edits log but is not flushed. */ void setTimes(String src, long mtime, long atime) throws IOException { HdfsFileStatus auditStat; checkOperation(OperationCategory.WRITE); writeLock(); try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot set times " + src); auditStat = FSDirAttrOp.setTimes(dir, src, mtime, atime); } catch (AccessControlException e) { logAuditEvent(false, "setTimes", src); throw e; } finally { writeUnlock(); } getEditLog().logSync(); logAuditEvent(true, "setTimes", src, null, auditStat); } /** * Create a symbolic link. */ @SuppressWarnings("deprecation") void createSymlink(String target, String link, PermissionStatus dirPerms, boolean createParent, boolean logRetryCache) throws IOException { if (!FileSystem.areSymlinksEnabled()) { throw new UnsupportedOperationException("Symlinks not supported"); } HdfsFileStatus auditStat = null; checkOperation(OperationCategory.WRITE); writeLock(); try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot create symlink " + link); auditStat = FSDirSymlinkOp.createSymlinkInt(this, target, link, dirPerms, createParent, logRetryCache); } catch (AccessControlException e) { logAuditEvent(false, "createSymlink", link, target, null); throw e; } finally { writeUnlock(); } getEditLog().logSync(); logAuditEvent(true, "createSymlink", link, target, auditStat); } /** * Set replication for an existing file. * * The NameNode sets new replication and schedules either replication of * under-replicated data blocks or removal of the excessive block copies * if the blocks are over-replicated. * * @see ClientProtocol#setReplication(String, short) * @param src file name * @param replication new replication * @return true if successful; * false if file does not exist or is a directory */ boolean setReplication(final String src, final short replication) throws IOException { boolean success = false; waitForLoadingFSImage(); checkOperation(OperationCategory.WRITE); writeLock(); try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot set replication for " + src); success = FSDirAttrOp.setReplication(dir, blockManager, src, replication); } catch (AccessControlException e) { logAuditEvent(false, "setReplication", src); throw e; } finally { writeUnlock(); } if (success) { getEditLog().logSync(); logAuditEvent(true, "setReplication", src); } return success; } /** * Truncate file to a lower length. * Truncate cannot be reverted / recovered from as it causes data loss. * Truncation at block boundary is atomic, otherwise it requires * block recovery to truncate the last block of the file. * * @return true if client does not need to wait for block recovery, * false if client needs to wait for block recovery. */ boolean truncate(String src, long newLength, String clientName, String clientMachine, long mtime) throws IOException, UnresolvedLinkException { requireEffectiveLayoutVersionForFeature(Feature.TRUNCATE); final FSDirTruncateOp.TruncateResult r; try { NameNode.stateChangeLog.debug( "DIR* NameSystem.truncate: src={} newLength={}", src, newLength); if (newLength < 0) { throw new HadoopIllegalArgumentException( "Cannot truncate to a negative file size: " + newLength + "."); } final FSPermissionChecker pc = getPermissionChecker(); checkOperation(OperationCategory.WRITE); writeLock(); BlocksMapUpdateInfo toRemoveBlocks = new BlocksMapUpdateInfo(); try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot truncate for " + src); r = FSDirTruncateOp.truncate(this, src, newLength, clientName, clientMachine, mtime, toRemoveBlocks, pc); } finally { writeUnlock(); } getEditLog().logSync(); if (!toRemoveBlocks.getToDeleteList().isEmpty()) { removeBlocks(toRemoveBlocks); toRemoveBlocks.clear(); } logAuditEvent(true, "truncate", src, null, r.getFileStatus()); } catch (AccessControlException e) { logAuditEvent(false, "truncate", src); throw e; } return r.getResult(); } /** * Set the storage policy for a file or a directory. * * @param src file/directory path * @param policyName storage policy name */ void setStoragePolicy(String src, String policyName) throws IOException { HdfsFileStatus auditStat; waitForLoadingFSImage(); checkOperation(OperationCategory.WRITE); writeLock(); try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot set storage policy for " + src); auditStat = FSDirAttrOp.setStoragePolicy(dir, blockManager, src, policyName); } catch (AccessControlException e) { logAuditEvent(false, "setStoragePolicy", src); throw e; } finally { writeUnlock(); } getEditLog().logSync(); logAuditEvent(true, "setStoragePolicy", src, null, auditStat); } /** * Get the storage policy for a file or a directory. * * @param src * file/directory path * @return storage policy object */ BlockStoragePolicy getStoragePolicy(String src) throws IOException { checkOperation(OperationCategory.READ); waitForLoadingFSImage(); readLock(); try { checkOperation(OperationCategory.READ); return FSDirAttrOp.getStoragePolicy(dir, blockManager, src); } finally { readUnlock(); } } /** * @return All the existing block storage policies */ BlockStoragePolicy[] getStoragePolicies() throws IOException { checkOperation(OperationCategory.READ); waitForLoadingFSImage(); readLock(); try { checkOperation(OperationCategory.READ); return FSDirAttrOp.getStoragePolicies(blockManager); } finally { readUnlock(); } } long getPreferredBlockSize(String src) throws IOException { checkOperation(OperationCategory.READ); readLock(); try { checkOperation(OperationCategory.READ); return FSDirAttrOp.getPreferredBlockSize(dir, src); } finally { readUnlock(); } } /** * If the file is within an encryption zone, select the appropriate * CryptoProtocolVersion from the list provided by the client. Since the * client may be newer, we need to handle unknown versions. * * @param zone EncryptionZone of the file * @param supportedVersions List of supported protocol versions * @return chosen protocol version * @throws IOException */ CryptoProtocolVersion chooseProtocolVersion( EncryptionZone zone, CryptoProtocolVersion[] supportedVersions) throws UnknownCryptoProtocolVersionException, UnresolvedLinkException, SnapshotAccessControlException { Preconditions.checkNotNull(zone); Preconditions.checkNotNull(supportedVersions); // Right now, we only support a single protocol version, // so simply look for it in the list of provided options final CryptoProtocolVersion required = zone.getVersion(); for (CryptoProtocolVersion c : supportedVersions) { if (c.equals(CryptoProtocolVersion.UNKNOWN)) { if (LOG.isDebugEnabled()) { LOG.debug("Ignoring unknown CryptoProtocolVersion provided by " + "client: " + c.getUnknownValue()); } continue; } if (c.equals(required)) { return c; } } throw new UnknownCryptoProtocolVersionException( "No crypto protocol versions provided by the client are supported." + " Client provided: " + Arrays.toString(supportedVersions) + " NameNode supports: " + Arrays.toString(CryptoProtocolVersion .values())); } /** * Invoke KeyProvider APIs to generate an encrypted data encryption key for an * encryption zone. Should not be called with any locks held. * * @param ezKeyName key name of an encryption zone * @return New EDEK, or null if ezKeyName is null * @throws IOException */ private EncryptedKeyVersion generateEncryptedDataEncryptionKey(String ezKeyName) throws IOException { if (ezKeyName == null) { return null; } EncryptedKeyVersion edek = null; try { edek = provider.generateEncryptedKey(ezKeyName); } catch (GeneralSecurityException e) { throw new IOException(e); } Preconditions.checkNotNull(edek); return edek; } /** * Create a new file entry in the namespace. * * For description of parameters and exceptions thrown see * {@link ClientProtocol#create}, except it returns valid file status upon * success */ HdfsFileStatus startFile(String src, PermissionStatus permissions, String holder, String clientMachine, EnumSet<CreateFlag> flag, boolean createParent, short replication, long blockSize, CryptoProtocolVersion[] supportedVersions, boolean logRetryCache) throws IOException { HdfsFileStatus status; try { status = startFileInt(src, permissions, holder, clientMachine, flag, createParent, replication, blockSize, supportedVersions, logRetryCache); } catch (AccessControlException e) { logAuditEvent(false, "create", src); throw e; } logAuditEvent(true, "create", src, null, status); return status; } private HdfsFileStatus startFileInt(final String src, PermissionStatus permissions, String holder, String clientMachine, EnumSet<CreateFlag> flag, boolean createParent, short replication, long blockSize, CryptoProtocolVersion[] supportedVersions, boolean logRetryCache) throws IOException { if (NameNode.stateChangeLog.isDebugEnabled()) { StringBuilder builder = new StringBuilder(); builder.append("DIR* NameSystem.startFile: src=").append(src) .append(", holder=").append(holder) .append(", clientMachine=").append(clientMachine) .append(", createParent=").append(createParent) .append(", replication=").append(replication) .append(", createFlag=").append(flag.toString()) .append(", blockSize=").append(blockSize) .append(", supportedVersions=") .append(supportedVersions == null ? null : Arrays.toString (supportedVersions)); NameNode.stateChangeLog.debug(builder.toString()); } if (!DFSUtil.isValidName(src)) { throw new InvalidPathException(src); } blockManager.verifyReplication(src, replication, clientMachine); if (blockSize < minBlockSize) { throw new IOException("Specified block size is less than configured" + " minimum value (" + DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY + "): " + blockSize + " < " + minBlockSize); } FSPermissionChecker pc = getPermissionChecker(); waitForLoadingFSImage(); /** * If the file is in an encryption zone, we optimistically create an * EDEK for the file by calling out to the configured KeyProvider. * Since this typically involves doing an RPC, we take the readLock * initially, then drop it to do the RPC. * * Since the path can flip-flop between being in an encryption zone and not * in the meantime, we need to recheck the preconditions when we retake the * lock to do the create. If the preconditions are not met, we throw a * special RetryStartFileException to ask the DFSClient to try the create * again later. */ FSDirWriteFileOp.EncryptionKeyInfo ezInfo = null; if (provider != null) { readLock(); try { checkOperation(OperationCategory.READ); ezInfo = FSDirWriteFileOp .getEncryptionKeyInfo(this, pc, src, supportedVersions); } finally { readUnlock(); } // Generate EDEK if necessary while not holding the lock if (ezInfo != null) { ezInfo.edek = generateEncryptedDataEncryptionKey(ezInfo.ezKeyName); } EncryptionFaultInjector.getInstance().startFileAfterGenerateKey(); } boolean skipSync = false; HdfsFileStatus stat = null; // Proceed with the create, using the computed cipher suite and // generated EDEK BlocksMapUpdateInfo toRemoveBlocks = new BlocksMapUpdateInfo(); writeLock(); try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot create file" + src); dir.writeLock(); try { stat = FSDirWriteFileOp.startFile(this, pc, src, permissions, holder, clientMachine, flag, createParent, replication, blockSize, ezInfo, toRemoveBlocks, logRetryCache); } finally { dir.writeUnlock(); } } catch (IOException e) { skipSync = e instanceof StandbyException; throw e; } finally { writeUnlock(); // There might be transactions logged while trying to recover the lease. // They need to be sync'ed even when an exception was thrown. if (!skipSync) { getEditLog().logSync(); removeBlocks(toRemoveBlocks); toRemoveBlocks.clear(); } } return stat; } /** * Recover lease; * Immediately revoke the lease of the current lease holder and start lease * recovery so that the file can be forced to be closed. * * @param src the path of the file to start lease recovery * @param holder the lease holder's name * @param clientMachine the client machine's name * @return true if the file is already closed or * if the lease can be released and the file can be closed. * @throws IOException */ boolean recoverLease(String src, String holder, String clientMachine) throws IOException { if (!DFSUtil.isValidName(src)) { throw new IOException("Invalid file name: " + src); } boolean skipSync = false; FSPermissionChecker pc = getPermissionChecker(); checkOperation(OperationCategory.WRITE); byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); writeLock(); try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot recover the lease of " + src); src = dir.resolvePath(pc, src, pathComponents); final INodesInPath iip = dir.getINodesInPath4Write(src); final INodeFile inode = INodeFile.valueOf(iip.getLastINode(), src); if (!inode.isUnderConstruction()) { return true; } if (isPermissionEnabled) { dir.checkPathAccess(pc, iip, FsAction.WRITE); } return recoverLeaseInternal(RecoverLeaseOp.RECOVER_LEASE, iip, src, holder, clientMachine, true); } catch (StandbyException se) { skipSync = true; throw se; } finally { writeUnlock(); // There might be transactions logged while trying to recover the lease. // They need to be sync'ed even when an exception was thrown. if (!skipSync) { getEditLog().logSync(); } } } enum RecoverLeaseOp { CREATE_FILE, APPEND_FILE, TRUNCATE_FILE, RECOVER_LEASE; private String getExceptionMessage(String src, String holder, String clientMachine, String reason) { return "Failed to " + this + " " + src + " for " + holder + " on " + clientMachine + " because " + reason; } } boolean recoverLeaseInternal(RecoverLeaseOp op, INodesInPath iip, String src, String holder, String clientMachine, boolean force) throws IOException { assert hasWriteLock(); INodeFile file = iip.getLastINode().asFile(); if (file.isUnderConstruction()) { // // If the file is under construction , then it must be in our // leases. Find the appropriate lease record. // Lease lease = leaseManager.getLease(holder); if (!force && lease != null) { Lease leaseFile = leaseManager.getLease(file); if (leaseFile != null && leaseFile.equals(lease)) { // We found the lease for this file but the original // holder is trying to obtain it again. throw new AlreadyBeingCreatedException( op.getExceptionMessage(src, holder, clientMachine, holder + " is already the current lease holder.")); } } // // Find the original holder. // FileUnderConstructionFeature uc = file.getFileUnderConstructionFeature(); String clientName = uc.getClientName(); lease = leaseManager.getLease(clientName); if (lease == null) { throw new AlreadyBeingCreatedException( op.getExceptionMessage(src, holder, clientMachine, "the file is under construction but no leases found.")); } if (force) { // close now: no need to wait for soft lease expiration and // close only the file src LOG.info("recoverLease: " + lease + ", src=" + src + " from client " + clientName); return internalReleaseLease(lease, src, iip, holder); } else { assert lease.getHolder().equals(clientName) : "Current lease holder " + lease.getHolder() + " does not match file creator " + clientName; // // If the original holder has not renewed in the last SOFTLIMIT // period, then start lease recovery. // if (lease.expiredSoftLimit()) { LOG.info("startFile: recover " + lease + ", src=" + src + " client " + clientName); if (internalReleaseLease(lease, src, iip, null)) { return true; } else { throw new RecoveryInProgressException( op.getExceptionMessage(src, holder, clientMachine, "lease recovery is in progress. Try again later.")); } } else { final BlockInfo lastBlock = file.getLastBlock(); if (lastBlock != null && lastBlock.getBlockUCState() == BlockUCState.UNDER_RECOVERY) { throw new RecoveryInProgressException( op.getExceptionMessage(src, holder, clientMachine, "another recovery is in progress by " + clientName + " on " + uc.getClientMachine())); } else { throw new AlreadyBeingCreatedException( op.getExceptionMessage(src, holder, clientMachine, "this file lease is currently owned by " + clientName + " on " + uc.getClientMachine())); } } } } else { return true; } } /** * Append to an existing file in the namespace. */ LastBlockWithStatus appendFile(String srcArg, String holder, String clientMachine, EnumSet<CreateFlag> flag, boolean logRetryCache) throws IOException { boolean newBlock = flag.contains(CreateFlag.NEW_BLOCK); if (newBlock) { requireEffectiveLayoutVersionForFeature(Feature.APPEND_NEW_BLOCK); } if (!supportAppends) { throw new UnsupportedOperationException( "Append is not enabled on this NameNode. Use the " + DFS_SUPPORT_APPEND_KEY + " configuration option to enable it."); } NameNode.stateChangeLog.debug( "DIR* NameSystem.appendFile: src={}, holder={}, clientMachine={}", srcArg, holder, clientMachine); try { boolean skipSync = false; LastBlockWithStatus lbs = null; final FSPermissionChecker pc = getPermissionChecker(); checkOperation(OperationCategory.WRITE); writeLock(); try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot append to file" + srcArg); lbs = FSDirAppendOp.appendFile(this, srcArg, pc, holder, clientMachine, newBlock, logRetryCache); } catch (StandbyException se) { skipSync = true; throw se; } finally { writeUnlock(); // There might be transactions logged while trying to recover the lease // They need to be sync'ed even when an exception was thrown. if (!skipSync) { getEditLog().logSync(); } } logAuditEvent(true, "append", srcArg); return lbs; } catch (AccessControlException e) { logAuditEvent(false, "append", srcArg); throw e; } } ExtendedBlock getExtendedBlock(Block blk) { return new ExtendedBlock(blockPoolId, blk); } void setBlockPoolId(String bpid) { blockPoolId = bpid; blockManager.setBlockPoolId(blockPoolId); } /** * The client would like to obtain an additional block for the indicated * filename (which is being written-to). Return an array that consists * of the block, plus a set of machines. The first on this list should * be where the client writes data. Subsequent items in the list must * be provided in the connection to the first datanode. * * Make sure the previous blocks have been reported by datanodes and * are replicated. Will return an empty 2-elt array if we want the * client to "try again later". */ LocatedBlock getAdditionalBlock( String src, long fileId, String clientName, ExtendedBlock previous, DatanodeInfo[] excludedNodes, String[] favoredNodes) throws IOException { NameNode.stateChangeLog.debug("BLOCK* getAdditionalBlock: {} inodeId {}" + " for {}", src, fileId, clientName); waitForLoadingFSImage(); LocatedBlock[] onRetryBlock = new LocatedBlock[1]; FSDirWriteFileOp.ValidateAddBlockResult r; FSPermissionChecker pc = getPermissionChecker(); checkOperation(OperationCategory.READ); readLock(); try { checkOperation(OperationCategory.READ); r = FSDirWriteFileOp.validateAddBlock(this, pc, src, fileId, clientName, previous, onRetryBlock); } finally { readUnlock(); } if (r == null) { assert onRetryBlock[0] != null : "Retry block is null"; // This is a retry. Just return the last block. return onRetryBlock[0]; } DatanodeStorageInfo[] targets = FSDirWriteFileOp.chooseTargetForNewBlock( blockManager, src, excludedNodes, favoredNodes, r); checkOperation(OperationCategory.WRITE); writeLock(); LocatedBlock lb; try { checkOperation(OperationCategory.WRITE); lb = FSDirWriteFileOp.storeAllocatedBlock( this, src, fileId, clientName, previous, targets); } finally { writeUnlock(); } getEditLog().logSync(); return lb; } /** @see ClientProtocol#getAdditionalDatanode */ LocatedBlock getAdditionalDatanode(String src, long fileId, final ExtendedBlock blk, final DatanodeInfo[] existings, final String[] storageIDs, final Set<Node> excludes, final int numAdditionalNodes, final String clientName ) throws IOException { //check if the feature is enabled dtpReplaceDatanodeOnFailure.checkEnabled(); Node clientnode = null; String clientMachine; final long preferredblocksize; final byte storagePolicyID; final List<DatanodeStorageInfo> chosen; checkOperation(OperationCategory.READ); byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); FSPermissionChecker pc = getPermissionChecker(); readLock(); try { checkOperation(OperationCategory.READ); //check safe mode checkNameNodeSafeMode("Cannot add datanode; src=" + src + ", blk=" + blk); src = dir.resolvePath(pc, src, pathComponents); //check lease final INode inode; if (fileId == HdfsConstants.GRANDFATHER_INODE_ID) { // Older clients may not have given us an inode ID to work with. // In this case, we have to try to resolve the path and hope it // hasn't changed or been deleted since the file was opened for write. inode = dir.getINode(src); } else { inode = dir.getInode(fileId); if (inode != null) src = inode.getFullPathName(); } final INodeFile file = checkLease(src, clientName, inode, fileId); clientMachine = file.getFileUnderConstructionFeature().getClientMachine(); clientnode = blockManager.getDatanodeManager().getDatanodeByHost(clientMachine); preferredblocksize = file.getPreferredBlockSize(); storagePolicyID = file.getStoragePolicyID(); //find datanode storages final DatanodeManager dm = blockManager.getDatanodeManager(); chosen = Arrays.asList(dm.getDatanodeStorageInfos(existings, storageIDs)); } finally { readUnlock(); } if (clientnode == null) { clientnode = FSDirWriteFileOp.getClientNode(blockManager, clientMachine); } // choose new datanodes. final DatanodeStorageInfo[] targets = blockManager.chooseTarget4AdditionalDatanode( src, numAdditionalNodes, clientnode, chosen, excludes, preferredblocksize, storagePolicyID); final LocatedBlock lb = BlockManager.newLocatedBlock( blk, targets, -1, false); blockManager.setBlockToken(lb, BlockTokenIdentifier.AccessMode.COPY); return lb; } /** * The client would like to let go of the given block */ void abandonBlock(ExtendedBlock b, long fileId, String src, String holder) throws IOException { NameNode.stateChangeLog.debug( "BLOCK* NameSystem.abandonBlock: {} of file {}", b, src); waitForLoadingFSImage(); checkOperation(OperationCategory.WRITE); FSPermissionChecker pc = getPermissionChecker(); writeLock(); try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot abandon block " + b + " for file" + src); FSDirWriteFileOp.abandonBlock(dir, pc, b, fileId, src, holder); NameNode.stateChangeLog.debug("BLOCK* NameSystem.abandonBlock: {} is" + " removed from pendingCreates", b); } finally { writeUnlock(); } getEditLog().logSync(); } INodeFile checkLease( String src, String holder, INode inode, long fileId) throws LeaseExpiredException, FileNotFoundException { assert hasReadLock(); final String ident = src + " (inode " + fileId + ")"; if (inode == null) { Lease lease = leaseManager.getLease(holder); throw new FileNotFoundException( "No lease on " + ident + ": File does not exist. " + (lease != null ? lease.toString() : "Holder " + holder + " does not have any open files.")); } if (!inode.isFile()) { Lease lease = leaseManager.getLease(holder); throw new LeaseExpiredException( "No lease on " + ident + ": INode is not a regular file. " + (lease != null ? lease.toString() : "Holder " + holder + " does not have any open files.")); } final INodeFile file = inode.asFile(); if (!file.isUnderConstruction()) { Lease lease = leaseManager.getLease(holder); throw new LeaseExpiredException( "No lease on " + ident + ": File is not open for writing. " + (lease != null ? lease.toString() : "Holder " + holder + " does not have any open files.")); } // No further modification is allowed on a deleted file. // A file is considered deleted, if it is not in the inodeMap or is marked // as deleted in the snapshot feature. if (isFileDeleted(file)) { throw new FileNotFoundException(src); } String clientName = file.getFileUnderConstructionFeature().getClientName(); if (holder != null && !clientName.equals(holder)) { throw new LeaseExpiredException("Lease mismatch on " + ident + " owned by " + clientName + " but is accessed by " + holder); } return file; } /** * Complete in-progress write to the given file. * @return true if successful, false if the client should continue to retry * (e.g if not all blocks have reached minimum replication yet) * @throws IOException on error (eg lease mismatch, file not open, file deleted) */ boolean completeFile(final String src, String holder, ExtendedBlock last, long fileId) throws IOException { boolean success = false; checkOperation(OperationCategory.WRITE); waitForLoadingFSImage(); FSPermissionChecker pc = getPermissionChecker(); writeLock(); try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot complete file " + src); success = FSDirWriteFileOp.completeFile(this, pc, src, holder, last, fileId); } finally { writeUnlock(); } getEditLog().logSync(); return success; } /** * Create new block with a unique block id and a new generation stamp. */ Block createNewBlock() throws IOException { assert hasWriteLock(); Block b = new Block(nextBlockId(), 0, 0); // Increment the generation stamp for every new block. b.setGenerationStamp(nextGenerationStamp(false)); return b; } /** * Check that the indicated file's blocks are present and * replicated. If not, return false. If checkall is true, then check * all blocks, otherwise check only penultimate block. */ boolean checkFileProgress(String src, INodeFile v, boolean checkall) { assert hasReadLock(); if (checkall) { return blockManager.checkBlocksProperlyReplicated(src, v .getBlocks()); } else { // check the penultimate block of this file BlockInfo b = v.getPenultimateBlock(); return b == null || blockManager.checkBlocksProperlyReplicated( src, new BlockInfo[] { b }); } } /** * Change the indicated filename. * @deprecated Use {@link #renameTo(String, String, boolean, * Options.Rename...)} instead. */ @Deprecated boolean renameTo(String src, String dst, boolean logRetryCache) throws IOException { waitForLoadingFSImage(); FSDirRenameOp.RenameOldResult ret = null; writeLock(); try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot rename " + src); ret = FSDirRenameOp.renameToInt(dir, src, dst, logRetryCache); } catch (AccessControlException e) { logAuditEvent(false, "rename", src, dst, null); throw e; } finally { writeUnlock(); } boolean success = ret != null && ret.success; if (success) { getEditLog().logSync(); } logAuditEvent(success, "rename", src, dst, ret == null ? null : ret.auditStat); return success; } void renameTo(final String src, final String dst, boolean logRetryCache, Options.Rename... options) throws IOException { waitForLoadingFSImage(); Map.Entry<BlocksMapUpdateInfo, HdfsFileStatus> res = null; writeLock(); try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot rename " + src); res = FSDirRenameOp.renameToInt(dir, src, dst, logRetryCache, options); } catch (AccessControlException e) { logAuditEvent(false, "rename (options=" + Arrays.toString(options) + ")", src, dst, null); throw e; } finally { writeUnlock(); } getEditLog().logSync(); BlocksMapUpdateInfo collectedBlocks = res.getKey(); HdfsFileStatus auditStat = res.getValue(); if (!collectedBlocks.getToDeleteList().isEmpty()) { removeBlocks(collectedBlocks); collectedBlocks.clear(); } logAuditEvent(true, "rename (options=" + Arrays.toString(options) + ")", src, dst, auditStat); } /** * Remove the indicated file from namespace. * * @see ClientProtocol#delete(String, boolean) for detailed description and * description of exceptions */ boolean delete(String src, boolean recursive, boolean logRetryCache) throws IOException { waitForLoadingFSImage(); BlocksMapUpdateInfo toRemovedBlocks = null; writeLock(); boolean ret = false; try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot delete " + src); toRemovedBlocks = FSDirDeleteOp.delete( this, src, recursive, logRetryCache); ret = toRemovedBlocks != null; } catch (AccessControlException e) { logAuditEvent(false, "delete", src); throw e; } finally { writeUnlock(); } getEditLog().logSync(); if (toRemovedBlocks != null) { removeBlocks(toRemovedBlocks); // Incremental deletion of blocks } logAuditEvent(true, "delete", src); return ret; } FSPermissionChecker getPermissionChecker() throws AccessControlException { return dir.getPermissionChecker(); } /** * From the given list, incrementally remove the blocks from blockManager * Writelock is dropped and reacquired every BLOCK_DELETION_INCREMENT to * ensure that other waiters on the lock can get in. See HDFS-2938 * * @param blocks * An instance of {@link BlocksMapUpdateInfo} which contains a list * of blocks that need to be removed from blocksMap */ void removeBlocks(BlocksMapUpdateInfo blocks) { List<BlockInfo> toDeleteList = blocks.getToDeleteList(); Iterator<BlockInfo> iter = toDeleteList.iterator(); while (iter.hasNext()) { writeLock(); try { for (int i = 0; i < BLOCK_DELETION_INCREMENT && iter.hasNext(); i++) { blockManager.removeBlock(iter.next()); } } finally { writeUnlock(); } } } /** * Remove leases and inodes related to a given path * @param removedUCFiles INodes whose leases need to be released * @param removedINodes Containing the list of inodes to be removed from * inodesMap * @param acquireINodeMapLock Whether to acquire the lock for inode removal */ void removeLeasesAndINodes(List<Long> removedUCFiles, List<INode> removedINodes, final boolean acquireINodeMapLock) { assert hasWriteLock(); leaseManager.removeLeases(removedUCFiles); // remove inodes from inodesMap if (removedINodes != null) { if (acquireINodeMapLock) { dir.writeLock(); } try { dir.removeFromInodeMap(removedINodes); } finally { if (acquireINodeMapLock) { dir.writeUnlock(); } } removedINodes.clear(); } } /** * Removes the blocks from blocksmap and updates the safemode blocks total * * @param blocks * An instance of {@link BlocksMapUpdateInfo} which contains a list * of blocks that need to be removed from blocksMap */ void removeBlocksAndUpdateSafemodeTotal(BlocksMapUpdateInfo blocks) { assert hasWriteLock(); // In the case that we are a Standby tailing edits from the // active while in safe-mode, we need to track the total number // of blocks and safe blocks in the system. boolean trackBlockCounts = isSafeModeTrackingBlocks(); int numRemovedComplete = 0, numRemovedSafe = 0; for (BlockInfo b : blocks.getToDeleteList()) { if (trackBlockCounts) { if (b.isComplete()) { numRemovedComplete++; if (blockManager.checkMinReplication(b)) { numRemovedSafe++; } } } blockManager.removeBlock(b); } if (trackBlockCounts) { if (LOG.isDebugEnabled()) { LOG.debug("Adjusting safe-mode totals for deletion." + "decreasing safeBlocks by " + numRemovedSafe + ", totalBlocks by " + numRemovedComplete); } adjustSafeModeBlockTotals(-numRemovedSafe, -numRemovedComplete); } } /** * @see SafeModeInfo#shouldIncrementallyTrackBlocks */ private boolean isSafeModeTrackingBlocks() { if (!haEnabled) { // Never track blocks incrementally in non-HA code. return false; } SafeModeInfo sm = this.safeMode; return sm != null && sm.shouldIncrementallyTrackBlocks(); } /** * Get the file info for a specific file. * * @param src The string representation of the path to the file * @param resolveLink whether to throw UnresolvedLinkException * if src refers to a symlink * * @throws AccessControlException if access is denied * @throws UnresolvedLinkException if a symlink is encountered. * * @return object containing information regarding the file * or null if file not found * @throws StandbyException */ HdfsFileStatus getFileInfo(final String src, boolean resolveLink) throws IOException { checkOperation(OperationCategory.READ); HdfsFileStatus stat = null; readLock(); try { checkOperation(OperationCategory.READ); stat = FSDirStatAndListingOp.getFileInfo(dir, src, resolveLink); } catch (AccessControlException e) { logAuditEvent(false, "getfileinfo", src); throw e; } finally { readUnlock(); } logAuditEvent(true, "getfileinfo", src); return stat; } /** * Returns true if the file is closed */ boolean isFileClosed(final String src) throws IOException { checkOperation(OperationCategory.READ); readLock(); try { checkOperation(OperationCategory.READ); return FSDirStatAndListingOp.isFileClosed(dir, src); } catch (AccessControlException e) { logAuditEvent(false, "isFileClosed", src); throw e; } finally { readUnlock(); } } /** * Create all the necessary directories */ boolean mkdirs(String src, PermissionStatus permissions, boolean createParent) throws IOException { HdfsFileStatus auditStat = null; checkOperation(OperationCategory.WRITE); writeLock(); try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot create directory " + src); auditStat = FSDirMkdirOp.mkdirs(this, src, permissions, createParent); } catch (AccessControlException e) { logAuditEvent(false, "mkdirs", src); throw e; } finally { writeUnlock(); } getEditLog().logSync(); logAuditEvent(true, "mkdirs", src, null, auditStat); return true; } /** * Get the content summary for a specific file/dir. * * @param src The string representation of the path to the file * * @throws AccessControlException if access is denied * @throws UnresolvedLinkException if a symlink is encountered. * @throws FileNotFoundException if no file exists * @throws StandbyException * @throws IOException for issues with writing to the audit log * * @return object containing information regarding the file * or null if file not found */ ContentSummary getContentSummary(final String src) throws IOException { readLock(); boolean success = true; try { return FSDirStatAndListingOp.getContentSummary(dir, src); } catch (AccessControlException ace) { success = false; throw ace; } finally { readUnlock(); logAuditEvent(success, "contentSummary", src); } } /** * Set the namespace quota and storage space quota for a directory. * See {@link ClientProtocol#setQuota(String, long, long, StorageType)} for the * contract. * * Note: This does not support ".inodes" relative path. */ void setQuota(String src, long nsQuota, long ssQuota, StorageType type) throws IOException { if (type != null) { requireEffectiveLayoutVersionForFeature(Feature.QUOTA_BY_STORAGE_TYPE); } checkOperation(OperationCategory.WRITE); writeLock(); boolean success = false; try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot set quota on " + src); FSDirAttrOp.setQuota(dir, src, nsQuota, ssQuota, type); success = true; } finally { writeUnlock(); if (success) { getEditLog().logSync(); } logAuditEvent(success, "setQuota", src); } } /** Persist all metadata about this file. * @param src The string representation of the path * @param fileId The inode ID that we're fsyncing. Older clients will pass * INodeId.GRANDFATHER_INODE_ID here. * @param clientName The string representation of the client * @param lastBlockLength The length of the last block * under construction reported from client. * @throws IOException if path does not exist */ void fsync(String src, long fileId, String clientName, long lastBlockLength) throws IOException { NameNode.stateChangeLog.info("BLOCK* fsync: " + src + " for " + clientName); checkOperation(OperationCategory.WRITE); byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); FSPermissionChecker pc = getPermissionChecker(); waitForLoadingFSImage(); writeLock(); try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot fsync file " + src); src = dir.resolvePath(pc, src, pathComponents); final INode inode; if (fileId == HdfsConstants.GRANDFATHER_INODE_ID) { // Older clients may not have given us an inode ID to work with. // In this case, we have to try to resolve the path and hope it // hasn't changed or been deleted since the file was opened for write. inode = dir.getINode(src); } else { inode = dir.getInode(fileId); if (inode != null) src = inode.getFullPathName(); } final INodeFile pendingFile = checkLease(src, clientName, inode, fileId); if (lastBlockLength > 0) { pendingFile.getFileUnderConstructionFeature().updateLengthOfLastBlock( pendingFile, lastBlockLength); } FSDirWriteFileOp.persistBlocks(dir, src, pendingFile, false); } finally { writeUnlock(); } getEditLog().logSync(); } /** * Move a file that is being written to be immutable. * @param src The filename * @param lease The lease for the client creating the file * @param recoveryLeaseHolder reassign lease to this holder if the last block * needs recovery; keep current holder if null. * @throws AlreadyBeingCreatedException if file is waiting to achieve minimal * replication;<br> * RecoveryInProgressException if lease recovery is in progress.<br> * IOException in case of an error. * @return true if file has been successfully finalized and closed or * false if block recovery has been initiated. Since the lease owner * has been changed and logged, caller should call logSync(). */ boolean internalReleaseLease(Lease lease, String src, INodesInPath iip, String recoveryLeaseHolder) throws IOException { LOG.info("Recovering " + lease + ", src=" + src); assert !isInSafeMode(); assert hasWriteLock(); final INodeFile pendingFile = iip.getLastINode().asFile(); int nrBlocks = pendingFile.numBlocks(); BlockInfo[] blocks = pendingFile.getBlocks(); int nrCompleteBlocks; BlockInfo curBlock = null; for(nrCompleteBlocks = 0; nrCompleteBlocks < nrBlocks; nrCompleteBlocks++) { curBlock = blocks[nrCompleteBlocks]; if(!curBlock.isComplete()) break; assert blockManager.checkMinReplication(curBlock) : "A COMPLETE block is not minimally replicated in " + src; } // If there are no incomplete blocks associated with this file, // then reap lease immediately and close the file. if(nrCompleteBlocks == nrBlocks) { finalizeINodeFileUnderConstruction(src, pendingFile, iip.getLatestSnapshotId()); NameNode.stateChangeLog.warn("BLOCK*" + " internalReleaseLease: All existing blocks are COMPLETE," + " lease removed, file closed."); return true; // closed! } // Only the last and the penultimate blocks may be in non COMPLETE state. // If the penultimate block is not COMPLETE, then it must be COMMITTED. if(nrCompleteBlocks < nrBlocks - 2 || nrCompleteBlocks == nrBlocks - 2 && curBlock != null && curBlock.getBlockUCState() != BlockUCState.COMMITTED) { final String message = "DIR* NameSystem.internalReleaseLease: " + "attempt to release a create lock on " + src + " but file is already closed."; NameNode.stateChangeLog.warn(message); throw new IOException(message); } // The last block is not COMPLETE, and // that the penultimate block if exists is either COMPLETE or COMMITTED final BlockInfo lastBlock = pendingFile.getLastBlock(); BlockUCState lastBlockState = lastBlock.getBlockUCState(); BlockInfo penultimateBlock = pendingFile.getPenultimateBlock(); // If penultimate block doesn't exist then its minReplication is met boolean penultimateBlockMinReplication = penultimateBlock == null ? true : blockManager.checkMinReplication(penultimateBlock); switch(lastBlockState) { case COMPLETE: assert false : "Already checked that the last block is incomplete"; break; case COMMITTED: // Close file if committed blocks are minimally replicated if(penultimateBlockMinReplication && blockManager.checkMinReplication(lastBlock)) { finalizeINodeFileUnderConstruction(src, pendingFile, iip.getLatestSnapshotId()); NameNode.stateChangeLog.warn("BLOCK*" + " internalReleaseLease: Committed blocks are minimally replicated," + " lease removed, file closed."); return true; // closed! } // Cannot close file right now, since some blocks // are not yet minimally replicated. // This may potentially cause infinite loop in lease recovery // if there are no valid replicas on data-nodes. String message = "DIR* NameSystem.internalReleaseLease: " + "Failed to release lease for file " + src + ". Committed blocks are waiting to be minimally replicated." + " Try again later."; NameNode.stateChangeLog.warn(message); throw new AlreadyBeingCreatedException(message); case UNDER_CONSTRUCTION: case UNDER_RECOVERY: final BlockInfoContiguousUnderConstruction uc = (BlockInfoContiguousUnderConstruction)lastBlock; // determine if last block was intended to be truncated Block recoveryBlock = uc.getTruncateBlock(); boolean truncateRecovery = recoveryBlock != null; boolean copyOnTruncate = truncateRecovery && recoveryBlock.getBlockId() != uc.getBlockId(); assert !copyOnTruncate || recoveryBlock.getBlockId() < uc.getBlockId() && recoveryBlock.getGenerationStamp() < uc.getGenerationStamp() && recoveryBlock.getNumBytes() > uc.getNumBytes() : "wrong recoveryBlock"; // setup the last block locations from the blockManager if not known if (uc.getNumExpectedLocations() == 0) { uc.setExpectedLocations(blockManager.getStorages(lastBlock)); } if (uc.getNumExpectedLocations() == 0 && uc.getNumBytes() == 0) { // There is no datanode reported to this block. // may be client have crashed before writing data to pipeline. // This blocks doesn't need any recovery. // We can remove this block and close the file. pendingFile.removeLastBlock(lastBlock); finalizeINodeFileUnderConstruction(src, pendingFile, iip.getLatestSnapshotId()); NameNode.stateChangeLog.warn("BLOCK* internalReleaseLease: " + "Removed empty last block and closed file."); return true; } // start recovery of the last block for this file long blockRecoveryId = nextGenerationStamp(blockIdManager.isLegacyBlock(uc)); lease = reassignLease(lease, src, recoveryLeaseHolder, pendingFile); if(copyOnTruncate) { uc.setGenerationStamp(blockRecoveryId); } else if(truncateRecovery) { recoveryBlock.setGenerationStamp(blockRecoveryId); } uc.initializeBlockRecovery(blockRecoveryId); leaseManager.renewLease(lease); // Cannot close file right now, since the last block requires recovery. // This may potentially cause infinite loop in lease recovery // if there are no valid replicas on data-nodes. NameNode.stateChangeLog.warn( "DIR* NameSystem.internalReleaseLease: " + "File " + src + " has not been closed." + " Lease recovery is in progress. " + "RecoveryId = " + blockRecoveryId + " for block " + lastBlock); break; } return false; } private Lease reassignLease(Lease lease, String src, String newHolder, INodeFile pendingFile) { assert hasWriteLock(); if(newHolder == null) return lease; // The following transaction is not synced. Make sure it's sync'ed later. logReassignLease(lease.getHolder(), src, newHolder); return reassignLeaseInternal(lease, newHolder, pendingFile); } Lease reassignLeaseInternal(Lease lease, String newHolder, INodeFile pendingFile) { assert hasWriteLock(); pendingFile.getFileUnderConstructionFeature().setClientName(newHolder); return leaseManager.reassignLease(lease, pendingFile, newHolder); } void commitOrCompleteLastBlock( final INodeFile fileINode, final INodesInPath iip, final Block commitBlock) throws IOException { assert hasWriteLock(); Preconditions.checkArgument(fileINode.isUnderConstruction()); if (!blockManager.commitOrCompleteLastBlock(fileINode, commitBlock)) { return; } // Adjust disk space consumption if required final long diff = fileINode.getPreferredBlockSize() - commitBlock.getNumBytes(); if (diff > 0) { try { dir.updateSpaceConsumed(iip, 0, -diff, fileINode.getFileReplication()); } catch (IOException e) { LOG.warn("Unexpected exception while updating disk space.", e); } } } void finalizeINodeFileUnderConstruction( String src, INodeFile pendingFile, int latestSnapshot) throws IOException { assert hasWriteLock(); FileUnderConstructionFeature uc = pendingFile.getFileUnderConstructionFeature(); Preconditions.checkArgument(uc != null); leaseManager.removeLease(uc.getClientName(), pendingFile); pendingFile.recordModification(latestSnapshot); // The file is no longer pending. // Create permanent INode, update blocks. No need to replace the inode here // since we just remove the uc feature from pendingFile pendingFile.toCompleteFile(now()); waitForLoadingFSImage(); // close file and persist block allocations for this file closeFile(src, pendingFile); blockManager.checkReplication(pendingFile); } @VisibleForTesting BlockInfo getStoredBlock(Block block) { return blockManager.getStoredBlock(block); } @Override public boolean isInSnapshot(BlockInfoContiguousUnderConstruction blockUC) { assert hasReadLock(); final BlockCollection bc = blockUC.getBlockCollection(); if (bc == null || !(bc instanceof INodeFile) || !bc.isUnderConstruction()) { return false; } String fullName = bc.getName(); try { if (fullName != null && fullName.startsWith(Path.SEPARATOR) && dir.getINode(fullName) == bc) { // If file exists in normal path then no need to look in snapshot return false; } } catch (UnresolvedLinkException e) { LOG.error("Error while resolving the link : " + fullName, e); return false; } /* * 1. if bc is under construction and also with snapshot, and * bc is not in the current fsdirectory tree, bc must represent a snapshot * file. * 2. if fullName is not an absolute path, bc cannot be existent in the * current fsdirectory tree. * 3. if bc is not the current node associated with fullName, bc must be a * snapshot inode. */ return true; } void commitBlockSynchronization(ExtendedBlock oldBlock, long newgenerationstamp, long newlength, boolean closeFile, boolean deleteblock, DatanodeID[] newtargets, String[] newtargetstorages) throws IOException { LOG.info("commitBlockSynchronization(oldBlock=" + oldBlock + ", newgenerationstamp=" + newgenerationstamp + ", newlength=" + newlength + ", newtargets=" + Arrays.asList(newtargets) + ", closeFile=" + closeFile + ", deleteBlock=" + deleteblock + ")"); checkOperation(OperationCategory.WRITE); String src = ""; waitForLoadingFSImage(); writeLock(); boolean copyTruncate = false; BlockInfoContiguousUnderConstruction truncatedBlock = null; try { checkOperation(OperationCategory.WRITE); // If a DN tries to commit to the standby, the recovery will // fail, and the next retry will succeed on the new NN. checkNameNodeSafeMode( "Cannot commitBlockSynchronization while in safe mode"); final BlockInfo storedBlock = getStoredBlock( ExtendedBlock.getLocalBlock(oldBlock)); if (storedBlock == null) { if (deleteblock) { // This may be a retry attempt so ignore the failure // to locate the block. if (LOG.isDebugEnabled()) { LOG.debug("Block (=" + oldBlock + ") not found"); } return; } else { throw new IOException("Block (=" + oldBlock + ") not found"); } } final long oldGenerationStamp = storedBlock.getGenerationStamp(); final long oldNumBytes = storedBlock.getNumBytes(); // // The implementation of delete operation (see @deleteInternal method) // first removes the file paths from namespace, and delays the removal // of blocks to later time for better performance. When // commitBlockSynchronization (this method) is called in between, the // blockCollection of storedBlock could have been assigned to null by // the delete operation, throw IOException here instead of NPE; if the // file path is already removed from namespace by the delete operation, // throw FileNotFoundException here, so not to proceed to the end of // this method to add a CloseOp to the edit log for an already deleted // file (See HDFS-6825). // if (storedBlock.isDeleted()) { throw new IOException("The blockCollection of " + storedBlock + " is null, likely because the file owning this block was" + " deleted and the block removal is delayed"); } INodeFile iFile = ((INode)storedBlock.getBlockCollection()).asFile(); if (isFileDeleted(iFile)) { throw new FileNotFoundException("File not found: " + iFile.getFullPathName() + ", likely due to delayed block" + " removal"); } if ((!iFile.isUnderConstruction() || storedBlock.isComplete()) && iFile.getLastBlock().isComplete()) { if (LOG.isDebugEnabled()) { LOG.debug("Unexpected block (=" + oldBlock + ") since the file (=" + iFile.getLocalName() + ") is not under construction"); } return; } truncatedBlock = (BlockInfoContiguousUnderConstruction) iFile .getLastBlock(); long recoveryId = truncatedBlock.getBlockRecoveryId(); copyTruncate = truncatedBlock.getBlockId() != storedBlock.getBlockId(); if(recoveryId != newgenerationstamp) { throw new IOException("The recovery id " + newgenerationstamp + " does not match current recovery id " + recoveryId + " for block " + oldBlock); } if (deleteblock) { Block blockToDel = ExtendedBlock.getLocalBlock(oldBlock); boolean remove = iFile.removeLastBlock(blockToDel) != null; if (remove) { blockManager.removeBlock(storedBlock); } } else { // update last block if(!copyTruncate) { storedBlock.setGenerationStamp(newgenerationstamp); storedBlock.setNumBytes(newlength); } // find the DatanodeDescriptor objects ArrayList<DatanodeDescriptor> trimmedTargets = new ArrayList<DatanodeDescriptor>(newtargets.length); ArrayList<String> trimmedStorages = new ArrayList<String>(newtargets.length); if (newtargets.length > 0) { for (int i = 0; i < newtargets.length; ++i) { // try to get targetNode DatanodeDescriptor targetNode = blockManager.getDatanodeManager().getDatanode(newtargets[i]); if (targetNode != null) { trimmedTargets.add(targetNode); trimmedStorages.add(newtargetstorages[i]); } else if (LOG.isDebugEnabled()) { LOG.debug("DatanodeDescriptor (=" + newtargets[i] + ") not found"); } } } if ((closeFile) && !trimmedTargets.isEmpty()) { // the file is getting closed. Insert block locations into blockManager. // Otherwise fsck will report these blocks as MISSING, especially if the // blocksReceived from Datanodes take a long time to arrive. for (int i = 0; i < trimmedTargets.size(); i++) { DatanodeStorageInfo storageInfo = trimmedTargets.get(i).getStorageInfo(trimmedStorages.get(i)); if (storageInfo != null) { if(copyTruncate) { storageInfo.addBlock(truncatedBlock); } else { storageInfo.addBlock(storedBlock); } } } } // add pipeline locations into the INodeUnderConstruction DatanodeStorageInfo[] trimmedStorageInfos = blockManager.getDatanodeManager().getDatanodeStorageInfos( trimmedTargets.toArray(new DatanodeID[trimmedTargets.size()]), trimmedStorages.toArray(new String[trimmedStorages.size()])); if(copyTruncate) { iFile.setLastBlock(truncatedBlock, trimmedStorageInfos); } else { iFile.setLastBlock(storedBlock, trimmedStorageInfos); if (closeFile) { blockManager.markBlockReplicasAsCorrupt(storedBlock, oldGenerationStamp, oldNumBytes, trimmedStorageInfos); } } } if (closeFile) { if(copyTruncate) { src = closeFileCommitBlocks(iFile, truncatedBlock); if(!iFile.isBlockInLatestSnapshot(storedBlock)) { blockManager.removeBlock(storedBlock); } } else { src = closeFileCommitBlocks(iFile, storedBlock); } } else { // If this commit does not want to close the file, persist blocks src = iFile.getFullPathName(); FSDirWriteFileOp.persistBlocks(dir, src, iFile, false); } } finally { writeUnlock(); } getEditLog().logSync(); if (closeFile) { LOG.info("commitBlockSynchronization(oldBlock=" + oldBlock + ", file=" + src + (copyTruncate ? ", newBlock=" + truncatedBlock : ", newgenerationstamp=" + newgenerationstamp) + ", newlength=" + newlength + ", newtargets=" + Arrays.asList(newtargets) + ") successful"); } else { LOG.info("commitBlockSynchronization(" + oldBlock + ") successful"); } } /** * @param pendingFile open file that needs to be closed * @param storedBlock last block * @return Path of the file that was closed. * @throws IOException on error */ @VisibleForTesting String closeFileCommitBlocks(INodeFile pendingFile, BlockInfo storedBlock) throws IOException { final INodesInPath iip = INodesInPath.fromINode(pendingFile); final String src = iip.getPath(); // commit the last block and complete it if it has minimum replicas commitOrCompleteLastBlock(pendingFile, iip, storedBlock); //remove lease, close file finalizeINodeFileUnderConstruction(src, pendingFile, Snapshot.findLatestSnapshot(pendingFile, Snapshot.CURRENT_STATE_ID)); return src; } /** * Renew the lease(s) held by the given client */ void renewLease(String holder) throws IOException { checkOperation(OperationCategory.WRITE); readLock(); try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot renew lease for " + holder); leaseManager.renewLease(holder); } finally { readUnlock(); } } /** * Get a partial listing of the indicated directory * * @param src the directory name * @param startAfter the name to start after * @param needLocation if blockLocations need to be returned * @return a partial listing starting after startAfter * * @throws AccessControlException if access is denied * @throws UnresolvedLinkException if symbolic link is encountered * @throws IOException if other I/O error occurred */ DirectoryListing getListing(String src, byte[] startAfter, boolean needLocation) throws IOException { checkOperation(OperationCategory.READ); DirectoryListing dl = null; readLock(); try { checkOperation(NameNode.OperationCategory.READ); dl = getListingInt(dir, src, startAfter, needLocation); } catch (AccessControlException e) { logAuditEvent(false, "listStatus", src); throw e; } finally { readUnlock(); } logAuditEvent(true, "listStatus", src); return dl; } ///////////////////////////////////////////////////////// // // These methods are called by datanodes // ///////////////////////////////////////////////////////// /** * Register Datanode. * <p> * The purpose of registration is to identify whether the new datanode * serves a new data storage, and will report new data block copies, * which the namenode was not aware of; or the datanode is a replacement * node for the data storage that was previously served by a different * or the same (in terms of host:port) datanode. * The data storages are distinguished by their storageIDs. When a new * data storage is reported the namenode issues a new unique storageID. * <p> * Finally, the namenode returns its namespaceID as the registrationID * for the datanodes. * namespaceID is a persistent attribute of the name space. * The registrationID is checked every time the datanode is communicating * with the namenode. * Datanodes with inappropriate registrationID are rejected. * If the namenode stops, and then restarts it can restore its * namespaceID and will continue serving the datanodes that has previously * registered with the namenode without restarting the whole cluster. * * @see org.apache.hadoop.hdfs.server.datanode.DataNode */ void registerDatanode(DatanodeRegistration nodeReg) throws IOException { writeLock(); try { getBlockManager().getDatanodeManager().registerDatanode(nodeReg); checkSafeMode(); } finally { writeUnlock(); } } /** * Get registrationID for datanodes based on the namespaceID. * * @see #registerDatanode(DatanodeRegistration) * @return registration ID */ String getRegistrationID() { return Storage.getRegistrationID(getFSImage().getStorage()); } /** * The given node has reported in. This method should: * 1) Record the heartbeat, so the datanode isn't timed out * 2) Adjust usage stats for future block allocation * * If a substantial amount of time passed since the last datanode * heartbeat then request an immediate block report. * * @return an array of datanode commands * @throws IOException */ HeartbeatResponse handleHeartbeat(DatanodeRegistration nodeReg, StorageReport[] reports, long cacheCapacity, long cacheUsed, int xceiverCount, int xmitsInProgress, int failedVolumes, VolumeFailureSummary volumeFailureSummary, boolean requestFullBlockReportLease) throws IOException { readLock(); try { //get datanode commands final int maxTransfer = blockManager.getMaxReplicationStreams() - xmitsInProgress; DatanodeCommand[] cmds = blockManager.getDatanodeManager().handleHeartbeat( nodeReg, reports, blockPoolId, cacheCapacity, cacheUsed, xceiverCount, maxTransfer, failedVolumes, volumeFailureSummary); long blockReportLeaseId = 0; if (requestFullBlockReportLease) { blockReportLeaseId = blockManager.requestBlockReportLeaseId(nodeReg); } //create ha status final NNHAStatusHeartbeat haState = new NNHAStatusHeartbeat( haContext.getState().getServiceState(), getFSImage().getLastAppliedOrWrittenTxId()); return new HeartbeatResponse(cmds, haState, rollingUpgradeInfo, blockReportLeaseId); } finally { readUnlock(); } } /** * Returns whether or not there were available resources at the last check of * resources. * * @return true if there were sufficient resources available, false otherwise. */ boolean nameNodeHasResourcesAvailable() { return hasResourcesAvailable; } /** * Perform resource checks and cache the results. */ void checkAvailableResources() { Preconditions.checkState(nnResourceChecker != null, "nnResourceChecker not initialized"); hasResourcesAvailable = nnResourceChecker.hasAvailableDiskSpace(); } /** * Close file. * @param path * @param file */ private void closeFile(String path, INodeFile file) { assert hasWriteLock(); waitForLoadingFSImage(); // file is closed getEditLog().logCloseFile(path, file); NameNode.stateChangeLog.debug("closeFile: {} with {} bloks is persisted" + " to the file system", path, file.getBlocks().length); } /** * Periodically calls hasAvailableResources of NameNodeResourceChecker, and if * there are found to be insufficient resources available, causes the NN to * enter safe mode. If resources are later found to have returned to * acceptable levels, this daemon will cause the NN to exit safe mode. */ class NameNodeResourceMonitor implements Runnable { boolean shouldNNRmRun = true; @Override public void run () { try { while (fsRunning && shouldNNRmRun) { checkAvailableResources(); if(!nameNodeHasResourcesAvailable()) { String lowResourcesMsg = "NameNode low on available disk space. "; if (!isInSafeMode()) { LOG.warn(lowResourcesMsg + "Entering safe mode."); } else { LOG.warn(lowResourcesMsg + "Already in safe mode."); } enterSafeMode(true); } try { Thread.sleep(resourceRecheckInterval); } catch (InterruptedException ie) { // Deliberately ignore } } } catch (Exception e) { FSNamesystem.LOG.error("Exception in NameNodeResourceMonitor: ", e); } } public void stopMonitor() { shouldNNRmRun = false; } } class NameNodeEditLogRoller implements Runnable { private boolean shouldRun = true; private final long rollThreshold; private final long sleepIntervalMs; public NameNodeEditLogRoller(long rollThreshold, int sleepIntervalMs) { this.rollThreshold = rollThreshold; this.sleepIntervalMs = sleepIntervalMs; } @Override public void run() { while (fsRunning && shouldRun) { try { FSEditLog editLog = getFSImage().getEditLog(); long numEdits = editLog.getLastWrittenTxId() - editLog.getCurSegmentTxId(); if (numEdits > rollThreshold) { FSNamesystem.LOG.info("NameNode rolling its own edit log because" + " number of edits in open segment exceeds threshold of " + rollThreshold); rollEditLog(); } } catch (Exception e) { FSNamesystem.LOG.error("Swallowing exception in " + NameNodeEditLogRoller.class.getSimpleName() + ":", e); } try { Thread.sleep(sleepIntervalMs); } catch (InterruptedException e) { FSNamesystem.LOG.info(NameNodeEditLogRoller.class.getSimpleName() + " was interrupted, exiting"); break; } } } public void stop() { shouldRun = false; } } /** * Daemon to periodically scan the namespace for lazyPersist files * with missing blocks and unlink them. */ class LazyPersistFileScrubber implements Runnable { private volatile boolean shouldRun = true; final int scrubIntervalSec; public LazyPersistFileScrubber(final int scrubIntervalSec) { this.scrubIntervalSec = scrubIntervalSec; } /** * Periodically go over the list of lazyPersist files with missing * blocks and unlink them from the namespace. */ private void clearCorruptLazyPersistFiles() throws IOException { BlockStoragePolicy lpPolicy = blockManager.getStoragePolicy("LAZY_PERSIST"); List<BlockCollection> filesToDelete = new ArrayList<>(); boolean changed = false; writeLock(); try { final Iterator<BlockInfo> it = blockManager.getCorruptReplicaBlockIterator(); while (it.hasNext()) { Block b = it.next(); BlockInfo blockInfo = blockManager.getStoredBlock(b); if (blockInfo.getBlockCollection().getStoragePolicyID() == lpPolicy.getId()) { filesToDelete.add(blockInfo.getBlockCollection()); } } for (BlockCollection bc : filesToDelete) { LOG.warn("Removing lazyPersist file " + bc.getName() + " with no replicas."); BlocksMapUpdateInfo toRemoveBlocks = FSDirDeleteOp.deleteInternal( FSNamesystem.this, bc.getName(), INodesInPath.fromINode((INodeFile) bc), false); changed |= toRemoveBlocks != null; if (toRemoveBlocks != null) { removeBlocks(toRemoveBlocks); // Incremental deletion of blocks } } } finally { writeUnlock(); } if (changed) { getEditLog().logSync(); } } @Override public void run() { while (fsRunning && shouldRun) { try { if (!isInSafeMode()) { clearCorruptLazyPersistFiles(); } else { if (FSNamesystem.LOG.isDebugEnabled()) { FSNamesystem.LOG .debug("Namenode is in safemode, skipping scrubbing of corrupted lazy-persist files."); } } Thread.sleep(scrubIntervalSec * 1000); } catch (InterruptedException e) { FSNamesystem.LOG.info( "LazyPersistFileScrubber was interrupted, exiting"); break; } catch (Exception e) { FSNamesystem.LOG.error( "Ignoring exception in LazyPersistFileScrubber:", e); } } } public void stop() { shouldRun = false; } } public FSImage getFSImage() { return fsImage; } public FSEditLog getEditLog() { return getFSImage().getEditLog(); } @Metric({"MissingBlocks", "Number of missing blocks"}) public long getMissingBlocksCount() { // not locking return blockManager.getMissingBlocksCount(); } @Metric({"MissingReplOneBlocks", "Number of missing blocks " + "with replication factor 1"}) public long getMissingReplOneBlocksCount() { // not locking return blockManager.getMissingReplOneBlocksCount(); } @Metric({"ExpiredHeartbeats", "Number of expired heartbeats"}) public int getExpiredHeartbeats() { return datanodeStatistics.getExpiredHeartbeats(); } @Metric({"TransactionsSinceLastCheckpoint", "Number of transactions since last checkpoint"}) public long getTransactionsSinceLastCheckpoint() { return getFSImage().getLastAppliedOrWrittenTxId() - getFSImage().getStorage().getMostRecentCheckpointTxId(); } @Metric({"TransactionsSinceLastLogRoll", "Number of transactions since last edit log roll"}) public long getTransactionsSinceLastLogRoll() { if (isInStandbyState() || !getEditLog().isSegmentOpen()) { return 0; } else { return getEditLog().getLastWrittenTxId() - getEditLog().getCurSegmentTxId() + 1; } } @Metric({"LastWrittenTransactionId", "Transaction ID written to the edit log"}) public long getLastWrittenTransactionId() { return getEditLog().getLastWrittenTxId(); } @Metric({"LastCheckpointTime", "Time in milliseconds since the epoch of the last checkpoint"}) public long getLastCheckpointTime() { return getFSImage().getStorage().getMostRecentCheckpointTime(); } /** @see ClientProtocol#getStats() */ long[] getStats() { final long[] stats = datanodeStatistics.getStats(); stats[ClientProtocol.GET_STATS_UNDER_REPLICATED_IDX] = getUnderReplicatedBlocks(); stats[ClientProtocol.GET_STATS_CORRUPT_BLOCKS_IDX] = getCorruptReplicaBlocks(); stats[ClientProtocol.GET_STATS_MISSING_BLOCKS_IDX] = getMissingBlocksCount(); stats[ClientProtocol.GET_STATS_MISSING_REPL_ONE_BLOCKS_IDX] = getMissingReplOneBlocksCount(); return stats; } @Override // FSNamesystemMBean @Metric({"CapacityTotal", "Total raw capacity of data nodes in bytes"}) public long getCapacityTotal() { return datanodeStatistics.getCapacityTotal(); } @Metric({"CapacityTotalGB", "Total raw capacity of data nodes in GB"}) public float getCapacityTotalGB() { return DFSUtil.roundBytesToGB(getCapacityTotal()); } @Override // FSNamesystemMBean @Metric({"CapacityUsed", "Total used capacity across all data nodes in bytes"}) public long getCapacityUsed() { return datanodeStatistics.getCapacityUsed(); } @Metric({"CapacityUsedGB", "Total used capacity across all data nodes in GB"}) public float getCapacityUsedGB() { return DFSUtil.roundBytesToGB(getCapacityUsed()); } @Override // FSNamesystemMBean @Metric({"CapacityRemaining", "Remaining capacity in bytes"}) public long getCapacityRemaining() { return datanodeStatistics.getCapacityRemaining(); } @Metric({"CapacityRemainingGB", "Remaining capacity in GB"}) public float getCapacityRemainingGB() { return DFSUtil.roundBytesToGB(getCapacityRemaining()); } @Metric({"CapacityUsedNonDFS", "Total space used by data nodes for non DFS purposes in bytes"}) public long getCapacityUsedNonDFS() { return datanodeStatistics.getCapacityUsedNonDFS(); } /** * Total number of connections. */ @Override // FSNamesystemMBean @Metric public int getTotalLoad() { return datanodeStatistics.getXceiverCount(); } @Metric({ "SnapshottableDirectories", "Number of snapshottable directories" }) public int getNumSnapshottableDirs() { return this.snapshotManager.getNumSnapshottableDirs(); } @Metric({ "Snapshots", "The number of snapshots" }) public int getNumSnapshots() { return this.snapshotManager.getNumSnapshots(); } @Override public String getSnapshotStats() { Map<String, Object> info = new HashMap<String, Object>(); info.put("SnapshottableDirectories", this.getNumSnapshottableDirs()); info.put("Snapshots", this.getNumSnapshots()); return JSON.toString(info); } @Override // FSNamesystemMBean @Metric({ "NumEncryptionZones", "The number of encryption zones" }) public int getNumEncryptionZones() { return dir.ezManager.getNumEncryptionZones(); } int getNumberOfDatanodes(DatanodeReportType type) { readLock(); try { return getBlockManager().getDatanodeManager().getDatanodeListForReport( type).size(); } finally { readUnlock(); } } DatanodeInfo[] datanodeReport(final DatanodeReportType type ) throws AccessControlException, StandbyException { checkSuperuserPrivilege(); checkOperation(OperationCategory.UNCHECKED); readLock(); try { checkOperation(OperationCategory.UNCHECKED); final DatanodeManager dm = getBlockManager().getDatanodeManager(); final List<DatanodeDescriptor> results = dm.getDatanodeListForReport(type); DatanodeInfo[] arr = new DatanodeInfo[results.size()]; for (int i=0; i<arr.length; i++) { arr[i] = new DatanodeInfo(results.get(i)); } return arr; } finally { readUnlock(); } } DatanodeStorageReport[] getDatanodeStorageReport(final DatanodeReportType type ) throws AccessControlException, StandbyException { checkSuperuserPrivilege(); checkOperation(OperationCategory.UNCHECKED); readLock(); try { checkOperation(OperationCategory.UNCHECKED); final DatanodeManager dm = getBlockManager().getDatanodeManager(); final List<DatanodeDescriptor> datanodes = dm.getDatanodeListForReport(type); DatanodeStorageReport[] reports = new DatanodeStorageReport[datanodes.size()]; for (int i = 0; i < reports.length; i++) { final DatanodeDescriptor d = datanodes.get(i); reports[i] = new DatanodeStorageReport(new DatanodeInfo(d), d.getStorageReports()); } return reports; } finally { readUnlock(); } } /** * Save namespace image. * This will save current namespace into fsimage file and empty edits file. * Requires superuser privilege and safe mode. * * @throws AccessControlException if superuser privilege is violated. * @throws IOException if */ void saveNamespace() throws AccessControlException, IOException { checkOperation(OperationCategory.UNCHECKED); checkSuperuserPrivilege(); cpLock(); // Block if a checkpointing is in progress on standby. readLock(); try { checkOperation(OperationCategory.UNCHECKED); if (!isInSafeMode()) { throw new IOException("Safe mode should be turned ON " + "in order to create namespace image."); } getFSImage().saveNamespace(this); } finally { readUnlock(); cpUnlock(); } LOG.info("New namespace image has been created"); } /** * Enables/Disables/Checks restoring failed storage replicas if the storage becomes available again. * Requires superuser privilege. * * @throws AccessControlException if superuser privilege is violated. */ boolean restoreFailedStorage(String arg) throws AccessControlException, StandbyException { checkSuperuserPrivilege(); checkOperation(OperationCategory.UNCHECKED); cpLock(); // Block if a checkpointing is in progress on standby. writeLock(); try { checkOperation(OperationCategory.UNCHECKED); // if it is disabled - enable it and vice versa. if(arg.equals("check")) return getFSImage().getStorage().getRestoreFailedStorage(); boolean val = arg.equals("true"); // false if not getFSImage().getStorage().setRestoreFailedStorage(val); return val; } finally { writeUnlock(); cpUnlock(); } } Date getStartTime() { return new Date(startTime); } void finalizeUpgrade() throws IOException { checkSuperuserPrivilege(); checkOperation(OperationCategory.UNCHECKED); cpLock(); // Block if a checkpointing is in progress on standby. writeLock(); try { checkOperation(OperationCategory.UNCHECKED); getFSImage().finalizeUpgrade(this.isHaEnabled() && inActiveState()); } finally { writeUnlock(); cpUnlock(); } } void refreshNodes() throws IOException { checkOperation(OperationCategory.UNCHECKED); checkSuperuserPrivilege(); getBlockManager().getDatanodeManager().refreshNodes(new HdfsConfiguration()); } void setBalancerBandwidth(long bandwidth) throws IOException { checkOperation(OperationCategory.UNCHECKED); checkSuperuserPrivilege(); getBlockManager().getDatanodeManager().setBalancerBandwidth(bandwidth); } /** * SafeModeInfo contains information related to the safe mode. * <p> * An instance of {@link SafeModeInfo} is created when the name node * enters safe mode. * <p> * During name node startup {@link SafeModeInfo} counts the number of * <em>safe blocks</em>, those that have at least the minimal number of * replicas, and calculates the ratio of safe blocks to the total number * of blocks in the system, which is the size of blocks in * {@link FSNamesystem#blockManager}. When the ratio reaches the * {@link #threshold} it starts the SafeModeMonitor daemon in order * to monitor whether the safe mode {@link #extension} is passed. * Then it leaves safe mode and destroys itself. * <p> * If safe mode is turned on manually then the number of safe blocks is * not tracked because the name node is not intended to leave safe mode * automatically in the case. * * @see ClientProtocol#setSafeMode(HdfsConstants.SafeModeAction, boolean) */ public class SafeModeInfo { // configuration fields /** Safe mode threshold condition %.*/ private final double threshold; /** Safe mode minimum number of datanodes alive */ private final int datanodeThreshold; /** * Safe mode extension after the threshold. * Make it volatile so that getSafeModeTip can read the latest value * without taking a lock. */ private volatile int extension; /** Min replication required by safe mode. */ private final int safeReplication; /** threshold for populating needed replication queues */ private final double replQueueThreshold; // internal fields /** Time when threshold was reached. * <br> -1 safe mode is off * <br> 0 safe mode is on, and threshold is not reached yet * <br> >0 safe mode is on, but we are in extension period */ private long reached = -1; private long reachedTimestamp = -1; /** Total number of blocks. */ int blockTotal; /** Number of safe blocks. */ int blockSafe; /** Number of blocks needed to satisfy safe mode threshold condition */ private int blockThreshold; /** Number of blocks needed before populating replication queues */ private int blockReplQueueThreshold; /** time of the last status printout */ private long lastStatusReport = 0; /** * Was safemode entered automatically because available resources were low. * Make it volatile so that getSafeModeTip can read the latest value * without taking a lock. */ private volatile boolean resourcesLow = false; /** Should safemode adjust its block totals as blocks come in */ private boolean shouldIncrementallyTrackBlocks = false; /** counter for tracking startup progress of reported blocks */ private Counter awaitingReportedBlocksCounter; /** * Creates SafeModeInfo when the name node enters * automatic safe mode at startup. * * @param conf configuration */ private SafeModeInfo(Configuration conf) { this.threshold = conf.getFloat(DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_DEFAULT); if(threshold > 1.0) { LOG.warn("The threshold value should't be greater than 1, threshold: " + threshold); } this.datanodeThreshold = conf.getInt( DFS_NAMENODE_SAFEMODE_MIN_DATANODES_KEY, DFS_NAMENODE_SAFEMODE_MIN_DATANODES_DEFAULT); this.extension = conf.getInt(DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, 0); this.safeReplication = conf.getInt(DFS_NAMENODE_REPLICATION_MIN_KEY, DFS_NAMENODE_REPLICATION_MIN_DEFAULT); LOG.info(DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY + " = " + threshold); LOG.info(DFS_NAMENODE_SAFEMODE_MIN_DATANODES_KEY + " = " + datanodeThreshold); LOG.info(DFS_NAMENODE_SAFEMODE_EXTENSION_KEY + " = " + extension); // default to safe mode threshold (i.e., don't populate queues before leaving safe mode) this.replQueueThreshold = conf.getFloat(DFS_NAMENODE_REPL_QUEUE_THRESHOLD_PCT_KEY, (float) threshold); this.blockTotal = 0; this.blockSafe = 0; } /** * In the HA case, the StandbyNode can be in safemode while the namespace * is modified by the edit log tailer. In this case, the number of total * blocks changes as edits are processed (eg blocks are added and deleted). * However, we don't want to do the incremental tracking during the * startup-time loading process -- only once the initial total has been * set after the image has been loaded. */ private boolean shouldIncrementallyTrackBlocks() { return shouldIncrementallyTrackBlocks; } /** * Creates SafeModeInfo when safe mode is entered manually, or because * available resources are low. * * The {@link #threshold} is set to 1.5 so that it could never be reached. * {@link #blockTotal} is set to -1 to indicate that safe mode is manual. * * @see SafeModeInfo */ private SafeModeInfo(boolean resourcesLow) { this.threshold = 1.5f; // this threshold can never be reached this.datanodeThreshold = Integer.MAX_VALUE; this.extension = Integer.MAX_VALUE; this.safeReplication = Short.MAX_VALUE + 1; // more than maxReplication this.replQueueThreshold = 1.5f; // can never be reached this.blockTotal = -1; this.blockSafe = -1; this.resourcesLow = resourcesLow; enter(); reportStatus("STATE* Safe mode is ON.", true); } /** * Check if safe mode is on. * @return true if in safe mode */ private synchronized boolean isOn() { doConsistencyCheck(); return this.reached >= 0; } /** * Enter safe mode. */ private void enter() { this.reached = 0; this.reachedTimestamp = 0; } /** * Leave safe mode. * <p> * Check for invalid, under- & over-replicated blocks in the end of startup. */ private synchronized void leave() { // if not done yet, initialize replication queues. // In the standby, do not populate repl queues if (!isPopulatingReplQueues() && shouldPopulateReplQueues()) { initializeReplQueues(); } long timeInSafemode = now() - startTime; NameNode.stateChangeLog.info("STATE* Leaving safe mode after " + timeInSafemode/1000 + " secs"); NameNode.getNameNodeMetrics().setSafeModeTime((int) timeInSafemode); //Log the following only once (when transitioning from ON -> OFF) if (reached >= 0) { NameNode.stateChangeLog.info("STATE* Safe mode is OFF"); } reached = -1; reachedTimestamp = -1; safeMode = null; final NetworkTopology nt = blockManager.getDatanodeManager().getNetworkTopology(); NameNode.stateChangeLog.info("STATE* Network topology has " + nt.getNumOfRacks() + " racks and " + nt.getNumOfLeaves() + " datanodes"); NameNode.stateChangeLog.info("STATE* UnderReplicatedBlocks has " + blockManager.numOfUnderReplicatedBlocks() + " blocks"); startSecretManagerIfNecessary(); // If startup has not yet completed, end safemode phase. StartupProgress prog = NameNode.getStartupProgress(); if (prog.getStatus(Phase.SAFEMODE) != Status.COMPLETE) { prog.endStep(Phase.SAFEMODE, STEP_AWAITING_REPORTED_BLOCKS); prog.endPhase(Phase.SAFEMODE); } } /** * Check whether we have reached the threshold for * initializing replication queues. */ private synchronized boolean canInitializeReplQueues() { return shouldPopulateReplQueues() && blockSafe >= blockReplQueueThreshold; } /** * Safe mode can be turned off iff * the threshold is reached and * the extension time have passed. * @return true if can leave or false otherwise. */ private synchronized boolean canLeave() { if (reached == 0) { return false; } if (monotonicNow() - reached < extension) { reportStatus("STATE* Safe mode ON, in safe mode extension.", false); return false; } if (needEnter()) { reportStatus("STATE* Safe mode ON, thresholds not met.", false); return false; } return true; } /** * There is no need to enter safe mode * if DFS is empty or {@link #threshold} == 0 */ private boolean needEnter() { return (threshold != 0 && blockSafe < blockThreshold) || (datanodeThreshold != 0 && getNumLiveDataNodes() < datanodeThreshold) || (!nameNodeHasResourcesAvailable()); } /** * Check and trigger safe mode if needed. */ private void checkMode() { // Have to have write-lock since leaving safemode initializes // repl queues, which requires write lock assert hasWriteLock(); if (inTransitionToActive()) { return; } // if smmthread is already running, the block threshold must have been // reached before, there is no need to enter the safe mode again if (smmthread == null && needEnter()) { enter(); // check if we are ready to initialize replication queues if (canInitializeReplQueues() && !isPopulatingReplQueues() && !haEnabled) { initializeReplQueues(); } reportStatus("STATE* Safe mode ON.", false); return; } // the threshold is reached or was reached before if (!isOn() || // safe mode is off extension <= 0 || threshold <= 0) { // don't need to wait this.leave(); // leave safe mode return; } if (reached > 0) { // threshold has already been reached before reportStatus("STATE* Safe mode ON.", false); return; } // start monitor reached = monotonicNow(); reachedTimestamp = now(); if (smmthread == null) { smmthread = new Daemon(new SafeModeMonitor()); smmthread.start(); reportStatus("STATE* Safe mode extension entered.", true); } // check if we are ready to initialize replication queues if (canInitializeReplQueues() && !isPopulatingReplQueues() && !haEnabled) { initializeReplQueues(); } } /** * Set total number of blocks. */ private synchronized void setBlockTotal(int total) { this.blockTotal = total; this.blockThreshold = (int) (blockTotal * threshold); this.blockReplQueueThreshold = (int) (blockTotal * replQueueThreshold); if (haEnabled) { // After we initialize the block count, any further namespace // modifications done while in safe mode need to keep track // of the number of total blocks in the system. this.shouldIncrementallyTrackBlocks = true; } if(blockSafe < 0) this.blockSafe = 0; checkMode(); } /** * Increment number of safe blocks if current block has * reached minimal replication. * @param replication current replication */ private synchronized void incrementSafeBlockCount(short replication) { if (replication == safeReplication) { this.blockSafe++; // Report startup progress only if we haven't completed startup yet. StartupProgress prog = NameNode.getStartupProgress(); if (prog.getStatus(Phase.SAFEMODE) != Status.COMPLETE) { if (this.awaitingReportedBlocksCounter == null) { this.awaitingReportedBlocksCounter = prog.getCounter(Phase.SAFEMODE, STEP_AWAITING_REPORTED_BLOCKS); } this.awaitingReportedBlocksCounter.increment(); } checkMode(); } } /** * Decrement number of safe blocks if current block has * fallen below minimal replication. * @param replication current replication */ private synchronized void decrementSafeBlockCount(short replication) { if (replication == safeReplication-1) { this.blockSafe--; //blockSafe is set to -1 in manual / low resources safemode assert blockSafe >= 0 || isManual() || areResourcesLow(); checkMode(); } } /** * Check if safe mode was entered manually */ private boolean isManual() { return extension == Integer.MAX_VALUE; } /** * Set manual safe mode. */ private synchronized void setManual() { extension = Integer.MAX_VALUE; } /** * Check if safe mode was entered due to resources being low. */ private boolean areResourcesLow() { return resourcesLow; } /** * Set that resources are low for this instance of safe mode. */ private void setResourcesLow() { resourcesLow = true; } /** * A tip on how safe mode is to be turned off: manually or automatically. */ String getTurnOffTip() { if(!isOn()) { return "Safe mode is OFF."; } //Manual OR low-resource safemode. (Admin intervention required) String adminMsg = "It was turned on manually. "; if (areResourcesLow()) { adminMsg = "Resources are low on NN. Please add or free up more " + "resources then turn off safe mode manually. NOTE: If you turn off" + " safe mode before adding resources, " + "the NN will immediately return to safe mode. "; } if (isManual() || areResourcesLow()) { return adminMsg + "Use \"hdfs dfsadmin -safemode leave\" to turn safe mode off."; } boolean thresholdsMet = true; int numLive = getNumLiveDataNodes(); String msg = ""; if (blockSafe < blockThreshold) { msg += String.format( "The reported blocks %d needs additional %d" + " blocks to reach the threshold %.4f of total blocks %d.%n", blockSafe, (blockThreshold - blockSafe), threshold, blockTotal); thresholdsMet = false; } else { msg += String.format("The reported blocks %d has reached the threshold" + " %.4f of total blocks %d. ", blockSafe, threshold, blockTotal); } if (numLive < datanodeThreshold) { msg += String.format( "The number of live datanodes %d needs an additional %d live " + "datanodes to reach the minimum number %d.%n", numLive, (datanodeThreshold - numLive), datanodeThreshold); thresholdsMet = false; } else { msg += String.format("The number of live datanodes %d has reached " + "the minimum number %d. ", numLive, datanodeThreshold); } msg += (reached > 0) ? "In safe mode extension. " : ""; msg += "Safe mode will be turned off automatically "; if (!thresholdsMet) { msg += "once the thresholds have been reached."; } else if (reached + extension - monotonicNow() > 0) { msg += ("in " + (reached + extension - monotonicNow()) / 1000 + " seconds."); } else { msg += "soon."; } return msg; } /** * Print status every 20 seconds. */ private void reportStatus(String msg, boolean rightNow) { long curTime = now(); if(!rightNow && (curTime - lastStatusReport < 20 * 1000)) return; NameNode.stateChangeLog.info(msg + " \n" + getTurnOffTip()); lastStatusReport = curTime; } @Override public String toString() { String resText = "Current safe blocks = " + blockSafe + ". Target blocks = " + blockThreshold + " for threshold = %" + threshold + ". Minimal replication = " + safeReplication + "."; if (reached > 0) resText += " Threshold was reached " + new Date(reachedTimestamp) + "."; return resText; } /** * Checks consistency of the class state. * This is costly so only runs if asserts are enabled. */ private void doConsistencyCheck() { boolean assertsOn = false; assert assertsOn = true; // set to true if asserts are on if (!assertsOn) return; if (blockTotal == -1 && blockSafe == -1) { return; // manual safe mode } int activeBlocks = blockManager.getActiveBlockCount(); if ((blockTotal != activeBlocks) && !(blockSafe >= 0 && blockSafe <= blockTotal)) { throw new AssertionError( " SafeMode: Inconsistent filesystem state: " + "SafeMode data: blockTotal=" + blockTotal + " blockSafe=" + blockSafe + "; " + "BlockManager data: active=" + activeBlocks); } } private synchronized void adjustBlockTotals(int deltaSafe, int deltaTotal) { if (!shouldIncrementallyTrackBlocks) { return; } assert haEnabled; if (LOG.isDebugEnabled()) { LOG.debug("Adjusting block totals from " + blockSafe + "/" + blockTotal + " to " + (blockSafe + deltaSafe) + "/" + (blockTotal + deltaTotal)); } assert blockSafe + deltaSafe >= 0 : "Can't reduce blockSafe " + blockSafe + " by " + deltaSafe + ": would be negative"; assert blockTotal + deltaTotal >= 0 : "Can't reduce blockTotal " + blockTotal + " by " + deltaTotal + ": would be negative"; blockSafe += deltaSafe; setBlockTotal(blockTotal + deltaTotal); } } /** * Periodically check whether it is time to leave safe mode. * This thread starts when the threshold level is reached. * */ class SafeModeMonitor implements Runnable { /** interval in msec for checking safe mode: {@value} */ private static final long recheckInterval = 1000; /** */ @Override public void run() { while (fsRunning) { writeLock(); try { if (safeMode == null) { // Not in safe mode. break; } if (safeMode.canLeave()) { // Leave safe mode. safeMode.leave(); smmthread = null; break; } } finally { writeUnlock(); } try { Thread.sleep(recheckInterval); } catch (InterruptedException ie) { // Ignored } } if (!fsRunning) { LOG.info("NameNode is being shutdown, exit SafeModeMonitor thread"); } } } boolean setSafeMode(SafeModeAction action) throws IOException { if (action != SafeModeAction.SAFEMODE_GET) { checkSuperuserPrivilege(); switch(action) { case SAFEMODE_LEAVE: // leave safe mode leaveSafeMode(); break; case SAFEMODE_ENTER: // enter safe mode enterSafeMode(false); break; default: LOG.error("Unexpected safe mode action"); } } return isInSafeMode(); } @Override public void checkSafeMode() { // safeMode is volatile, and may be set to null at any time SafeModeInfo safeMode = this.safeMode; if (safeMode != null) { safeMode.checkMode(); } } @Override public boolean isInSafeMode() { // safeMode is volatile, and may be set to null at any time SafeModeInfo safeMode = this.safeMode; if (safeMode == null) return false; return safeMode.isOn(); } @Override public boolean isInStartupSafeMode() { // safeMode is volatile, and may be set to null at any time SafeModeInfo safeMode = this.safeMode; if (safeMode == null) return false; // If the NN is in safemode, and not due to manual / low resources, we // assume it must be because of startup. If the NN had low resources during // startup, we assume it came out of startup safemode and it is now in low // resources safemode return !safeMode.isManual() && !safeMode.areResourcesLow() && safeMode.isOn(); } /** * Check if replication queues are to be populated * @return true when node is HAState.Active and not in the very first safemode */ @Override public boolean isPopulatingReplQueues() { if (!shouldPopulateReplQueues()) { return false; } return initializedReplQueues; } private boolean shouldPopulateReplQueues() { if(haContext == null || haContext.getState() == null) return false; return haContext.getState().shouldPopulateReplQueues(); } @Override public void incrementSafeBlockCount(int replication) { // safeMode is volatile, and may be set to null at any time SafeModeInfo safeMode = this.safeMode; if (safeMode == null) return; safeMode.incrementSafeBlockCount((short)replication); } @Override public void decrementSafeBlockCount(BlockInfo b) { // safeMode is volatile, and may be set to null at any time SafeModeInfo safeMode = this.safeMode; if (safeMode == null) // mostly true return; BlockInfo storedBlock = getStoredBlock(b); if (storedBlock.isComplete()) { safeMode.decrementSafeBlockCount((short)blockManager.countNodes(b).liveReplicas()); } } /** * Adjust the total number of blocks safe and expected during safe mode. * If safe mode is not currently on, this is a no-op. * @param deltaSafe the change in number of safe blocks * @param deltaTotal the change i nnumber of total blocks expected */ @Override public void adjustSafeModeBlockTotals(int deltaSafe, int deltaTotal) { // safeMode is volatile, and may be set to null at any time SafeModeInfo safeMode = this.safeMode; if (safeMode == null) return; safeMode.adjustBlockTotals(deltaSafe, deltaTotal); } /** * Set the total number of blocks in the system. */ public void setBlockTotal(long completeBlocksTotal) { // safeMode is volatile, and may be set to null at any time SafeModeInfo safeMode = this.safeMode; if (safeMode == null) return; safeMode.setBlockTotal((int) completeBlocksTotal); } /** * Get the total number of blocks in the system. */ @Override // FSNamesystemMBean @Metric public long getBlocksTotal() { return blockManager.getTotalBlocks(); } /** * Get the number of files under construction in the system. */ @Metric({ "NumFilesUnderConstruction", "Number of files under construction" }) public long getNumFilesUnderConstruction() { return leaseManager.countPath(); } /** * Get the total number of active clients holding lease in the system. */ @Metric({ "NumActiveClients", "Number of active clients holding lease" }) public long getNumActiveClients() { return leaseManager.countLease(); } /** * Get the total number of COMPLETE blocks in the system. * For safe mode only complete blocks are counted. * This is invoked only during NN startup and checkpointing. */ public long getCompleteBlocksTotal() { // Calculate number of blocks under construction long numUCBlocks = 0; readLock(); try { numUCBlocks = leaseManager.getNumUnderConstructionBlocks(); return getBlocksTotal() - numUCBlocks; } finally { readUnlock(); } } /** * Enter safe mode. If resourcesLow is false, then we assume it is manual * @throws IOException */ void enterSafeMode(boolean resourcesLow) throws IOException { writeLock(); try { // Stop the secret manager, since rolling the master key would // try to write to the edit log stopSecretManager(); // Ensure that any concurrent operations have been fully synced // before entering safe mode. This ensures that the FSImage // is entirely stable on disk as soon as we're in safe mode. boolean isEditlogOpenForWrite = getEditLog().isOpenForWrite(); // Before Editlog is in OpenForWrite mode, editLogStream will be null. So, // logSyncAll call can be called only when Edlitlog is in OpenForWrite mode if (isEditlogOpenForWrite) { getEditLog().logSyncAll(); } if (!isInSafeMode()) { safeMode = new SafeModeInfo(resourcesLow); return; } if (resourcesLow) { safeMode.setResourcesLow(); } else { safeMode.setManual(); } if (isEditlogOpenForWrite) { getEditLog().logSyncAll(); } NameNode.stateChangeLog.info("STATE* Safe mode is ON" + safeMode.getTurnOffTip()); } finally { writeUnlock(); } } /** * Leave safe mode. */ void leaveSafeMode() { writeLock(); try { if (!isInSafeMode()) { NameNode.stateChangeLog.info("STATE* Safe mode is already OFF"); return; } safeMode.leave(); } finally { writeUnlock(); } } String getSafeModeTip() { // There is no need to take readLock. // Don't use isInSafeMode as this.safeMode might be set to null. // after isInSafeMode returns. boolean inSafeMode; SafeModeInfo safeMode = this.safeMode; if (safeMode == null) { inSafeMode = false; } else { inSafeMode = safeMode.isOn(); } if (!inSafeMode) { return ""; } else { return safeMode.getTurnOffTip(); } } CheckpointSignature rollEditLog() throws IOException { checkSuperuserPrivilege(); checkOperation(OperationCategory.JOURNAL); writeLock(); try { checkOperation(OperationCategory.JOURNAL); checkNameNodeSafeMode("Log not rolled"); if (Server.isRpcInvocation()) { LOG.info("Roll Edit Log from " + Server.getRemoteAddress()); } return getFSImage().rollEditLog(getEffectiveLayoutVersion()); } finally { writeUnlock(); } } NamenodeCommand startCheckpoint(NamenodeRegistration backupNode, NamenodeRegistration activeNamenode) throws IOException { checkOperation(OperationCategory.CHECKPOINT); writeLock(); try { checkOperation(OperationCategory.CHECKPOINT); checkNameNodeSafeMode("Checkpoint not started"); LOG.info("Start checkpoint for " + backupNode.getAddress()); NamenodeCommand cmd = getFSImage().startCheckpoint(backupNode, activeNamenode, getEffectiveLayoutVersion()); getEditLog().logSync(); return cmd; } finally { writeUnlock(); } } public void processIncrementalBlockReport(final DatanodeID nodeID, final StorageReceivedDeletedBlocks srdb) throws IOException { writeLock(); try { blockManager.processIncrementalBlockReport(nodeID, srdb); } finally { writeUnlock(); } } void endCheckpoint(NamenodeRegistration registration, CheckpointSignature sig) throws IOException { checkOperation(OperationCategory.CHECKPOINT); readLock(); try { checkOperation(OperationCategory.CHECKPOINT); checkNameNodeSafeMode("Checkpoint not ended"); LOG.info("End checkpoint for " + registration.getAddress()); getFSImage().endCheckpoint(sig); } finally { readUnlock(); } } PermissionStatus createFsOwnerPermissions(FsPermission permission) { return new PermissionStatus(fsOwner.getShortUserName(), supergroup, permission); } @Override public void checkSuperuserPrivilege() throws AccessControlException { if (isPermissionEnabled) { FSPermissionChecker pc = getPermissionChecker(); pc.checkSuperuserPrivilege(); } } /** * Check to see if we have exceeded the limit on the number * of inodes. */ void checkFsObjectLimit() throws IOException { if (maxFsObjects != 0 && maxFsObjects <= dir.totalInodes() + getBlocksTotal()) { throw new IOException("Exceeded the configured number of objects " + maxFsObjects + " in the filesystem."); } } /** * Get the total number of objects in the system. */ @Override // FSNamesystemMBean public long getMaxObjects() { return maxFsObjects; } @Override // FSNamesystemMBean @Metric public long getFilesTotal() { // There is no need to take fSNamesystem's lock as // FSDirectory has its own lock. return this.dir.totalInodes(); } @Override // FSNamesystemMBean @Metric public long getPendingReplicationBlocks() { return blockManager.getPendingReplicationBlocksCount(); } @Override // FSNamesystemMBean @Metric public long getUnderReplicatedBlocks() { return blockManager.getUnderReplicatedBlocksCount(); } /** Returns number of blocks with corrupt replicas */ @Metric({"CorruptBlocks", "Number of blocks with corrupt replicas"}) public long getCorruptReplicaBlocks() { return blockManager.getCorruptReplicaBlocksCount(); } @Override // FSNamesystemMBean @Metric public long getScheduledReplicationBlocks() { return blockManager.getScheduledReplicationBlocksCount(); } @Override @Metric public long getPendingDeletionBlocks() { return blockManager.getPendingDeletionBlocksCount(); } @Override public long getBlockDeletionStartTime() { return startTime + blockManager.getStartupDelayBlockDeletionInMs(); } @Metric public long getExcessBlocks() { return blockManager.getExcessBlocksCount(); } // HA-only metric @Metric public long getPostponedMisreplicatedBlocks() { return blockManager.getPostponedMisreplicatedBlocksCount(); } // HA-only metric @Metric public int getPendingDataNodeMessageCount() { return blockManager.getPendingDataNodeMessageCount(); } // HA-only metric @Metric public String getHAState() { return haContext.getState().toString(); } // HA-only metric @Metric public long getMillisSinceLastLoadedEdits() { if (isInStandbyState() && editLogTailer != null) { return monotonicNow() - editLogTailer.getLastLoadTimeMs(); } else { return 0; } } @Metric public int getBlockCapacity() { return blockManager.getCapacity(); } @Override // FSNamesystemMBean public String getFSState() { return isInSafeMode() ? "safeMode" : "Operational"; } private ObjectName mbeanName; private ObjectName mxbeanName; /** * Register the FSNamesystem MBean using the name * "hadoop:service=NameNode,name=FSNamesystemState" */ private void registerMBean() { // We can only implement one MXBean interface, so we keep the old one. try { StandardMBean bean = new StandardMBean(this, FSNamesystemMBean.class); mbeanName = MBeans.register("NameNode", "FSNamesystemState", bean); } catch (NotCompliantMBeanException e) { throw new RuntimeException("Bad MBean setup", e); } LOG.info("Registered FSNamesystemState MBean"); } /** * shutdown FSNamesystem */ void shutdown() { if (snapshotManager != null) { snapshotManager.shutdown(); } if (mbeanName != null) { MBeans.unregister(mbeanName); mbeanName = null; } if (mxbeanName != null) { MBeans.unregister(mxbeanName); mxbeanName = null; } if (dir != null) { dir.shutdown(); } if (blockManager != null) { blockManager.shutdown(); } } @Override // FSNamesystemMBean public int getNumLiveDataNodes() { return getBlockManager().getDatanodeManager().getNumLiveDataNodes(); } @Override // FSNamesystemMBean public int getNumDeadDataNodes() { return getBlockManager().getDatanodeManager().getNumDeadDataNodes(); } @Override // FSNamesystemMBean public int getNumDecomLiveDataNodes() { final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>(); getBlockManager().getDatanodeManager().fetchDatanodes(live, null, true); int liveDecommissioned = 0; for (DatanodeDescriptor node : live) { liveDecommissioned += node.isDecommissioned() ? 1 : 0; } return liveDecommissioned; } @Override // FSNamesystemMBean public int getNumDecomDeadDataNodes() { final List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>(); getBlockManager().getDatanodeManager().fetchDatanodes(null, dead, true); int deadDecommissioned = 0; for (DatanodeDescriptor node : dead) { deadDecommissioned += node.isDecommissioned() ? 1 : 0; } return deadDecommissioned; } @Override // FSNamesystemMBean public int getVolumeFailuresTotal() { List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>(); getBlockManager().getDatanodeManager().fetchDatanodes(live, null, true); int volumeFailuresTotal = 0; for (DatanodeDescriptor node: live) { volumeFailuresTotal += node.getVolumeFailures(); } return volumeFailuresTotal; } @Override // FSNamesystemMBean public long getEstimatedCapacityLostTotal() { List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>(); getBlockManager().getDatanodeManager().fetchDatanodes(live, null, true); long estimatedCapacityLostTotal = 0; for (DatanodeDescriptor node: live) { VolumeFailureSummary volumeFailureSummary = node.getVolumeFailureSummary(); if (volumeFailureSummary != null) { estimatedCapacityLostTotal += volumeFailureSummary.getEstimatedCapacityLostTotal(); } } return estimatedCapacityLostTotal; } @Override // FSNamesystemMBean public int getNumDecommissioningDataNodes() { return getBlockManager().getDatanodeManager().getDecommissioningNodes() .size(); } @Override // FSNamesystemMBean @Metric({"StaleDataNodes", "Number of datanodes marked stale due to delayed heartbeat"}) public int getNumStaleDataNodes() { return getBlockManager().getDatanodeManager().getNumStaleNodes(); } /** * Storages are marked as "content stale" after NN restart or fails over and * before NN receives the first Heartbeat followed by the first Blockreport. */ @Override // FSNamesystemMBean public int getNumStaleStorages() { return getBlockManager().getDatanodeManager().getNumStaleStorages(); } @Override // FSNamesystemMBean public String getTopUserOpCounts() { if (!topConf.isEnabled) { return null; } Date now = new Date(); final List<RollingWindowManager.TopWindow> topWindows = topMetrics.getTopWindows(); Map<String, Object> topMap = new TreeMap<String, Object>(); topMap.put("windows", topWindows); topMap.put("timestamp", DFSUtil.dateToIso8601String(now)); ObjectMapper mapper = new ObjectMapper(); try { return mapper.writeValueAsString(topMap); } catch (IOException e) { LOG.warn("Failed to fetch TopUser metrics", e); } return null; } /** * Increments, logs and then returns the stamp */ long nextGenerationStamp(boolean legacyBlock) throws IOException, SafeModeException { assert hasWriteLock(); checkNameNodeSafeMode("Cannot get next generation stamp"); long gs = blockIdManager.nextGenerationStamp(legacyBlock); if (legacyBlock) { getEditLog().logGenerationStampV1(gs); } else { getEditLog().logGenerationStampV2(gs); } // NB: callers sync the log return gs; } /** * Increments, logs and then returns the block ID */ private long nextBlockId() throws IOException { assert hasWriteLock(); checkNameNodeSafeMode("Cannot get next block ID"); final long blockId = blockIdManager.nextBlockId(); getEditLog().logAllocateBlockId(blockId); // NB: callers sync the log return blockId; } private boolean isFileDeleted(INodeFile file) { // Not in the inodeMap or in the snapshot but marked deleted. if (dir.getInode(file.getId()) == null) { return true; } // look at the path hierarchy to see if one parent is deleted by recursive // deletion INode tmpChild = file; INodeDirectory tmpParent = file.getParent(); while (true) { if (tmpParent == null) { return true; } INode childINode = tmpParent.getChild(tmpChild.getLocalNameBytes(), Snapshot.CURRENT_STATE_ID); if (childINode == null || !childINode.equals(tmpChild)) { // a newly created INode with the same name as an already deleted one // would be a different INode than the deleted one return true; } if (tmpParent.isRoot()) { break; } tmpChild = tmpParent; tmpParent = tmpParent.getParent(); } if (file.isWithSnapshot() && file.getFileWithSnapshotFeature().isCurrentFileDeleted()) { return true; } return false; } private INodeFile checkUCBlock(ExtendedBlock block, String clientName) throws IOException { assert hasWriteLock(); checkNameNodeSafeMode("Cannot get a new generation stamp and an " + "access token for block " + block); // check stored block state BlockInfo storedBlock = getStoredBlock(ExtendedBlock.getLocalBlock(block)); if (storedBlock == null || storedBlock.getBlockUCState() != BlockUCState.UNDER_CONSTRUCTION) { throw new IOException(block + " does not exist or is not under Construction" + storedBlock); } // check file inode final INodeFile file = ((INode)storedBlock.getBlockCollection()).asFile(); if (file == null || !file.isUnderConstruction() || isFileDeleted(file)) { throw new IOException("The file " + storedBlock + " belonged to does not exist or it is not under construction."); } // check lease if (clientName == null || !clientName.equals(file.getFileUnderConstructionFeature() .getClientName())) { throw new LeaseExpiredException("Lease mismatch: " + block + " is accessed by a non lease holder " + clientName); } return file; } /** * Client is reporting some bad block locations. */ void reportBadBlocks(LocatedBlock[] blocks) throws IOException { checkOperation(OperationCategory.WRITE); NameNode.stateChangeLog.info("*DIR* reportBadBlocks"); writeLock(); try { checkOperation(OperationCategory.WRITE); for (int i = 0; i < blocks.length; i++) { ExtendedBlock blk = blocks[i].getBlock(); DatanodeInfo[] nodes = blocks[i].getLocations(); String[] storageIDs = blocks[i].getStorageIDs(); for (int j = 0; j < nodes.length; j++) { blockManager.findAndMarkBlockAsCorrupt(blk, nodes[j], storageIDs == null ? null: storageIDs[j], "client machine reported it"); } } } finally { writeUnlock(); } } /** * Get a new generation stamp together with an access token for * a block under construction * * This method is called for recovering a failed pipeline or setting up * a pipeline to append to a block. * * @param block a block * @param clientName the name of a client * @return a located block with a new generation stamp and an access token * @throws IOException if any error occurs */ LocatedBlock updateBlockForPipeline(ExtendedBlock block, String clientName) throws IOException { LocatedBlock locatedBlock; checkOperation(OperationCategory.WRITE); writeLock(); try { checkOperation(OperationCategory.WRITE); // check vadility of parameters checkUCBlock(block, clientName); // get a new generation stamp and an access token block.setGenerationStamp(nextGenerationStamp(blockIdManager.isLegacyBlock(block.getLocalBlock()))); locatedBlock = new LocatedBlock(block, new DatanodeInfo[0]); blockManager.setBlockToken(locatedBlock, BlockTokenIdentifier.AccessMode.WRITE); } finally { writeUnlock(); } // Ensure we record the new generation stamp getEditLog().logSync(); return locatedBlock; } /** * Update a pipeline for a block under construction * * @param clientName the name of the client * @param oldBlock and old block * @param newBlock a new block with a new generation stamp and length * @param newNodes datanodes in the pipeline * @throws IOException if any error occurs */ void updatePipeline( String clientName, ExtendedBlock oldBlock, ExtendedBlock newBlock, DatanodeID[] newNodes, String[] newStorageIDs, boolean logRetryCache) throws IOException { LOG.info("updatePipeline(" + oldBlock.getLocalBlock() + ", newGS=" + newBlock.getGenerationStamp() + ", newLength=" + newBlock.getNumBytes() + ", newNodes=" + Arrays.asList(newNodes) + ", client=" + clientName + ")"); waitForLoadingFSImage(); writeLock(); try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Pipeline not updated"); assert newBlock.getBlockId()==oldBlock.getBlockId() : newBlock + " and " + oldBlock + " has different block identifier"; updatePipelineInternal(clientName, oldBlock, newBlock, newNodes, newStorageIDs, logRetryCache); } finally { writeUnlock(); } getEditLog().logSync(); LOG.info("updatePipeline(" + oldBlock.getLocalBlock() + " => " + newBlock.getLocalBlock() + ") success"); } private void updatePipelineInternal(String clientName, ExtendedBlock oldBlock, ExtendedBlock newBlock, DatanodeID[] newNodes, String[] newStorageIDs, boolean logRetryCache) throws IOException { assert hasWriteLock(); // check the vadility of the block and lease holder name final INodeFile pendingFile = checkUCBlock(oldBlock, clientName); final BlockInfoContiguousUnderConstruction blockinfo = (BlockInfoContiguousUnderConstruction)pendingFile.getLastBlock(); // check new GS & length: this is not expected if (newBlock.getGenerationStamp() <= blockinfo.getGenerationStamp() || newBlock.getNumBytes() < blockinfo.getNumBytes()) { String msg = "Update " + oldBlock + " (len = " + blockinfo.getNumBytes() + ") to an older state: " + newBlock + " (len = " + newBlock.getNumBytes() +")"; LOG.warn(msg); throw new IOException(msg); } // Update old block with the new generation stamp and new length blockinfo.setNumBytes(newBlock.getNumBytes()); blockinfo.setGenerationStampAndVerifyReplicas(newBlock.getGenerationStamp()); // find the DatanodeDescriptor objects final DatanodeStorageInfo[] storages = blockManager.getDatanodeManager() .getDatanodeStorageInfos(newNodes, newStorageIDs); blockinfo.setExpectedLocations(storages); String src = pendingFile.getFullPathName(); FSDirWriteFileOp.persistBlocks(dir, src, pendingFile, logRetryCache); } /** * Register a Backup name-node, verifying that it belongs * to the correct namespace, and adding it to the set of * active journals if necessary. * * @param bnReg registration of the new BackupNode * @param nnReg registration of this NameNode * @throws IOException if the namespace IDs do not match */ void registerBackupNode(NamenodeRegistration bnReg, NamenodeRegistration nnReg) throws IOException { writeLock(); try { if(getFSImage().getStorage().getNamespaceID() != bnReg.getNamespaceID()) throw new IOException("Incompatible namespaceIDs: " + " Namenode namespaceID = " + getFSImage().getStorage().getNamespaceID() + "; " + bnReg.getRole() + " node namespaceID = " + bnReg.getNamespaceID()); if (bnReg.getRole() == NamenodeRole.BACKUP) { getFSImage().getEditLog().registerBackupNode( bnReg, nnReg); } } finally { writeUnlock(); } } /** * Release (unregister) backup node. * <p> * Find and remove the backup stream corresponding to the node. * @throws IOException */ void releaseBackupNode(NamenodeRegistration registration) throws IOException { checkOperation(OperationCategory.WRITE); writeLock(); try { checkOperation(OperationCategory.WRITE); if(getFSImage().getStorage().getNamespaceID() != registration.getNamespaceID()) throw new IOException("Incompatible namespaceIDs: " + " Namenode namespaceID = " + getFSImage().getStorage().getNamespaceID() + "; " + registration.getRole() + " node namespaceID = " + registration.getNamespaceID()); getEditLog().releaseBackupStream(registration); } finally { writeUnlock(); } } static class CorruptFileBlockInfo { final String path; final Block block; public CorruptFileBlockInfo(String p, Block b) { path = p; block = b; } @Override public String toString() { return block.getBlockName() + "\t" + path; } } /** * @param path Restrict corrupt files to this portion of namespace. * @param cookieTab Support for continuation; cookieTab tells where * to start from * @return a list in which each entry describes a corrupt file/block * @throws IOException */ Collection<CorruptFileBlockInfo> listCorruptFileBlocks(String path, String[] cookieTab) throws IOException { checkSuperuserPrivilege(); checkOperation(OperationCategory.READ); int count = 0; ArrayList<CorruptFileBlockInfo> corruptFiles = new ArrayList<CorruptFileBlockInfo>(); if (cookieTab == null) { cookieTab = new String[] { null }; } // Do a quick check if there are any corrupt files without taking the lock if (blockManager.getMissingBlocksCount() == 0) { if (cookieTab[0] == null) { cookieTab[0] = String.valueOf(getIntCookie(cookieTab[0])); } if (LOG.isDebugEnabled()) { LOG.debug("there are no corrupt file blocks."); } return corruptFiles; } readLock(); try { checkOperation(OperationCategory.READ); if (!isPopulatingReplQueues()) { throw new IOException("Cannot run listCorruptFileBlocks because " + "replication queues have not been initialized."); } // print a limited # of corrupt files per call final Iterator<BlockInfo> blkIterator = blockManager.getCorruptReplicaBlockIterator(); int skip = getIntCookie(cookieTab[0]); for (int i = 0; i < skip && blkIterator.hasNext(); i++) { blkIterator.next(); } while (blkIterator.hasNext()) { BlockInfo blk = blkIterator.next(); final INode inode = (INode)blockManager.getBlockCollection(blk); skip++; if (inode != null && blockManager.countNodes(blk).liveReplicas() == 0) { String src = FSDirectory.getFullPathName(inode); if (src.startsWith(path)){ corruptFiles.add(new CorruptFileBlockInfo(src, blk)); count++; if (count >= DEFAULT_MAX_CORRUPT_FILEBLOCKS_RETURNED) break; } } } cookieTab[0] = String.valueOf(skip); if (LOG.isDebugEnabled()) { LOG.debug("list corrupt file blocks returned: " + count); } return corruptFiles; } finally { readUnlock(); } } /** * Convert string cookie to integer. */ private static int getIntCookie(String cookie){ int c; if(cookie == null){ c = 0; } else { try{ c = Integer.parseInt(cookie); }catch (NumberFormatException e) { c = 0; } } c = Math.max(0, c); return c; } /** * Create delegation token secret manager */ private DelegationTokenSecretManager createDelegationTokenSecretManager( Configuration conf) { return new DelegationTokenSecretManager(conf.getLong( DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_KEY, DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT), conf.getLong(DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_DEFAULT), conf.getLong(DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_DEFAULT), DELEGATION_TOKEN_REMOVER_SCAN_INTERVAL, conf.getBoolean(DFS_NAMENODE_AUDIT_LOG_TOKEN_TRACKING_ID_KEY, DFS_NAMENODE_AUDIT_LOG_TOKEN_TRACKING_ID_DEFAULT), this); } /** * Returns the DelegationTokenSecretManager instance in the namesystem. * @return delegation token secret manager object */ DelegationTokenSecretManager getDelegationTokenSecretManager() { return dtSecretManager; } /** * @param renewer Renewer information * @return delegation toek * @throws IOException on error */ Token<DelegationTokenIdentifier> getDelegationToken(Text renewer) throws IOException { Token<DelegationTokenIdentifier> token; checkOperation(OperationCategory.WRITE); writeLock(); try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot issue delegation token"); if (!isAllowedDelegationTokenOp()) { throw new IOException( "Delegation Token can be issued only with kerberos or web authentication"); } if (dtSecretManager == null || !dtSecretManager.isRunning()) { LOG.warn("trying to get DT with no secret manager running"); return null; } UserGroupInformation ugi = getRemoteUser(); String user = ugi.getUserName(); Text owner = new Text(user); Text realUser = null; if (ugi.getRealUser() != null) { realUser = new Text(ugi.getRealUser().getUserName()); } DelegationTokenIdentifier dtId = new DelegationTokenIdentifier(owner, renewer, realUser); token = new Token<DelegationTokenIdentifier>( dtId, dtSecretManager); long expiryTime = dtSecretManager.getTokenExpiryTime(dtId); getEditLog().logGetDelegationToken(dtId, expiryTime); } finally { writeUnlock(); } getEditLog().logSync(); return token; } /** * * @param token token to renew * @return new expiryTime of the token * @throws InvalidToken if {@code token} is invalid * @throws IOException on other errors */ long renewDelegationToken(Token<DelegationTokenIdentifier> token) throws InvalidToken, IOException { long expiryTime; checkOperation(OperationCategory.WRITE); writeLock(); try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot renew delegation token"); if (!isAllowedDelegationTokenOp()) { throw new IOException( "Delegation Token can be renewed only with kerberos or web authentication"); } String renewer = getRemoteUser().getShortUserName(); expiryTime = dtSecretManager.renewToken(token, renewer); DelegationTokenIdentifier id = new DelegationTokenIdentifier(); ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier()); DataInputStream in = new DataInputStream(buf); id.readFields(in); getEditLog().logRenewDelegationToken(id, expiryTime); } finally { writeUnlock(); } getEditLog().logSync(); return expiryTime; } /** * * @param token token to cancel * @throws IOException on error */ void cancelDelegationToken(Token<DelegationTokenIdentifier> token) throws IOException { checkOperation(OperationCategory.WRITE); writeLock(); try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot cancel delegation token"); String canceller = getRemoteUser().getUserName(); DelegationTokenIdentifier id = dtSecretManager .cancelToken(token, canceller); getEditLog().logCancelDelegationToken(id); } finally { writeUnlock(); } getEditLog().logSync(); } /** * @param out save state of the secret manager * @param sdPath String storage directory path */ void saveSecretManagerStateCompat(DataOutputStream out, String sdPath) throws IOException { dtSecretManager.saveSecretManagerStateCompat(out, sdPath); } SecretManagerState saveSecretManagerState() { return dtSecretManager.saveSecretManagerState(); } /** * @param in load the state of secret manager from input stream */ void loadSecretManagerStateCompat(DataInput in) throws IOException { dtSecretManager.loadSecretManagerStateCompat(in); } void loadSecretManagerState(SecretManagerSection s, List<SecretManagerSection.DelegationKey> keys, List<SecretManagerSection.PersistToken> tokens) throws IOException { dtSecretManager.loadSecretManagerState(new SecretManagerState(s, keys, tokens)); } /** * Log the updateMasterKey operation to edit logs * * @param key new delegation key. */ public void logUpdateMasterKey(DelegationKey key) { assert !isInSafeMode() : "this should never be called while in safemode, since we stop " + "the DT manager before entering safemode!"; // No need to hold FSN lock since we don't access any internal // structures, and this is stopped before the FSN shuts itself // down, etc. getEditLog().logUpdateMasterKey(key); getEditLog().logSync(); } /** * Log the cancellation of expired tokens to edit logs * * @param id token identifier to cancel */ public void logExpireDelegationToken(DelegationTokenIdentifier id) { assert !isInSafeMode() : "this should never be called while in safemode, since we stop " + "the DT manager before entering safemode!"; // No need to hold FSN lock since we don't access any internal // structures, and this is stopped before the FSN shuts itself // down, etc. getEditLog().logCancelDelegationToken(id); } private void logReassignLease(String leaseHolder, String src, String newHolder) { assert hasWriteLock(); getEditLog().logReassignLease(leaseHolder, src, newHolder); } /** * * @return true if delegation token operation is allowed */ private boolean isAllowedDelegationTokenOp() throws IOException { AuthenticationMethod authMethod = getConnectionAuthenticationMethod(); if (UserGroupInformation.isSecurityEnabled() && (authMethod != AuthenticationMethod.KERBEROS) && (authMethod != AuthenticationMethod.KERBEROS_SSL) && (authMethod != AuthenticationMethod.CERTIFICATE)) { return false; } return true; } /** * Returns authentication method used to establish the connection * @return AuthenticationMethod used to establish connection * @throws IOException */ private AuthenticationMethod getConnectionAuthenticationMethod() throws IOException { UserGroupInformation ugi = getRemoteUser(); AuthenticationMethod authMethod = ugi.getAuthenticationMethod(); if (authMethod == AuthenticationMethod.PROXY) { authMethod = ugi.getRealUser().getAuthenticationMethod(); } return authMethod; } /** * Client invoked methods are invoked over RPC and will be in * RPC call context even if the client exits. */ boolean isExternalInvocation() { return Server.isRpcInvocation() || NamenodeWebHdfsMethods.isWebHdfsInvocation(); } private static InetAddress getRemoteIp() { InetAddress ip = Server.getRemoteIp(); if (ip != null) { return ip; } return NamenodeWebHdfsMethods.getRemoteIp(); } // optimize ugi lookup for RPC operations to avoid a trip through // UGI.getCurrentUser which is synch'ed private static UserGroupInformation getRemoteUser() throws IOException { return NameNode.getRemoteUser(); } /** * Log fsck event in the audit log */ void logFsckEvent(String src, InetAddress remoteAddress) throws IOException { if (isAuditEnabled()) { logAuditEvent(true, getRemoteUser(), remoteAddress, "fsck", src, null, null); } } /** * Register NameNodeMXBean */ private void registerMXBean() { mxbeanName = MBeans.register("NameNode", "NameNodeInfo", this); } /** * Class representing Namenode information for JMX interfaces */ @Override // NameNodeMXBean public String getVersion() { return VersionInfo.getVersion() + ", r" + VersionInfo.getRevision(); } @Override // NameNodeMXBean public long getUsed() { return this.getCapacityUsed(); } @Override // NameNodeMXBean public long getFree() { return this.getCapacityRemaining(); } @Override // NameNodeMXBean public long getTotal() { return this.getCapacityTotal(); } @Override // NameNodeMXBean public String getSafemode() { if (!this.isInSafeMode()) return ""; return "Safe mode is ON. " + this.getSafeModeTip(); } @Override // NameNodeMXBean public boolean isUpgradeFinalized() { return this.getFSImage().isUpgradeFinalized(); } @Override // NameNodeMXBean public long getNonDfsUsedSpace() { return datanodeStatistics.getCapacityUsedNonDFS(); } @Override // NameNodeMXBean public float getPercentUsed() { return datanodeStatistics.getCapacityUsedPercent(); } @Override // NameNodeMXBean public long getBlockPoolUsedSpace() { return datanodeStatistics.getBlockPoolUsed(); } @Override // NameNodeMXBean public float getPercentBlockPoolUsed() { return datanodeStatistics.getPercentBlockPoolUsed(); } @Override // NameNodeMXBean public float getPercentRemaining() { return datanodeStatistics.getCapacityRemainingPercent(); } @Override // NameNodeMXBean public long getCacheCapacity() { return datanodeStatistics.getCacheCapacity(); } @Override // NameNodeMXBean public long getCacheUsed() { return datanodeStatistics.getCacheUsed(); } @Override // NameNodeMXBean public long getTotalBlocks() { return getBlocksTotal(); } @Override // NameNodeMXBean @Metric public long getTotalFiles() { return getFilesTotal(); } @Override // NameNodeMXBean public long getNumberOfMissingBlocks() { return getMissingBlocksCount(); } @Override // NameNodeMXBean public long getNumberOfMissingBlocksWithReplicationFactorOne() { return getMissingReplOneBlocksCount(); } @Override // NameNodeMXBean public int getThreads() { return ManagementFactory.getThreadMXBean().getThreadCount(); } /** * Returned information is a JSON representation of map with host name as the * key and value is a map of live node attribute keys to its values */ @Override // NameNodeMXBean public String getLiveNodes() { final Map<String, Map<String,Object>> info = new HashMap<String, Map<String,Object>>(); final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>(); blockManager.getDatanodeManager().fetchDatanodes(live, null, true); for (DatanodeDescriptor node : live) { ImmutableMap.Builder<String, Object> innerinfo = ImmutableMap.<String,Object>builder(); innerinfo .put("infoAddr", node.getInfoAddr()) .put("infoSecureAddr", node.getInfoSecureAddr()) .put("xferaddr", node.getXferAddr()) .put("lastContact", getLastContact(node)) .put("usedSpace", getDfsUsed(node)) .put("adminState", node.getAdminState().toString()) .put("nonDfsUsedSpace", node.getNonDfsUsed()) .put("capacity", node.getCapacity()) .put("numBlocks", node.numBlocks()) .put("version", node.getSoftwareVersion()) .put("used", node.getDfsUsed()) .put("remaining", node.getRemaining()) .put("blockScheduled", node.getBlocksScheduled()) .put("blockPoolUsed", node.getBlockPoolUsed()) .put("blockPoolUsedPercent", node.getBlockPoolUsedPercent()) .put("volfails", node.getVolumeFailures()); VolumeFailureSummary volumeFailureSummary = node.getVolumeFailureSummary(); if (volumeFailureSummary != null) { innerinfo .put("failedStorageLocations", volumeFailureSummary.getFailedStorageLocations()) .put("lastVolumeFailureDate", volumeFailureSummary.getLastVolumeFailureDate()) .put("estimatedCapacityLostTotal", volumeFailureSummary.getEstimatedCapacityLostTotal()); } info.put(node.getHostName() + ":" + node.getXferPort(), innerinfo.build()); } return JSON.toString(info); } /** * Returned information is a JSON representation of map with host name as the * key and value is a map of dead node attribute keys to its values */ @Override // NameNodeMXBean public String getDeadNodes() { final Map<String, Map<String, Object>> info = new HashMap<String, Map<String, Object>>(); final List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>(); blockManager.getDatanodeManager().fetchDatanodes(null, dead, true); for (DatanodeDescriptor node : dead) { Map<String, Object> innerinfo = ImmutableMap.<String, Object>builder() .put("lastContact", getLastContact(node)) .put("decommissioned", node.isDecommissioned()) .put("xferaddr", node.getXferAddr()) .build(); info.put(node.getHostName() + ":" + node.getXferPort(), innerinfo); } return JSON.toString(info); } /** * Returned information is a JSON representation of map with host name as the * key and value is a map of decommissioning node attribute keys to its * values */ @Override // NameNodeMXBean public String getDecomNodes() { final Map<String, Map<String, Object>> info = new HashMap<String, Map<String, Object>>(); final List<DatanodeDescriptor> decomNodeList = blockManager.getDatanodeManager( ).getDecommissioningNodes(); for (DatanodeDescriptor node : decomNodeList) { Map<String, Object> innerinfo = ImmutableMap .<String, Object> builder() .put("xferaddr", node.getXferAddr()) .put("underReplicatedBlocks", node.decommissioningStatus.getUnderReplicatedBlocks()) .put("decommissionOnlyReplicas", node.decommissioningStatus.getDecommissionOnlyReplicas()) .put("underReplicateInOpenFiles", node.decommissioningStatus.getUnderReplicatedInOpenFiles()) .build(); info.put(node.getHostName() + ":" + node.getXferPort(), innerinfo); } return JSON.toString(info); } private long getLastContact(DatanodeDescriptor alivenode) { return (monotonicNow() - alivenode.getLastUpdateMonotonic())/1000; } private long getDfsUsed(DatanodeDescriptor alivenode) { return alivenode.getDfsUsed(); } @Override // NameNodeMXBean public String getClusterId() { return getFSImage().getStorage().getClusterID(); } @Override // NameNodeMXBean public String getBlockPoolId() { return blockPoolId; } @Override // NameNodeMXBean public String getNameDirStatuses() { Map<String, Map<File, StorageDirType>> statusMap = new HashMap<String, Map<File, StorageDirType>>(); Map<File, StorageDirType> activeDirs = new HashMap<File, StorageDirType>(); for (Iterator<StorageDirectory> it = getFSImage().getStorage().dirIterator(); it.hasNext();) { StorageDirectory st = it.next(); activeDirs.put(st.getRoot(), st.getStorageDirType()); } statusMap.put("active", activeDirs); List<Storage.StorageDirectory> removedStorageDirs = getFSImage().getStorage().getRemovedStorageDirs(); Map<File, StorageDirType> failedDirs = new HashMap<File, StorageDirType>(); for (StorageDirectory st : removedStorageDirs) { failedDirs.put(st.getRoot(), st.getStorageDirType()); } statusMap.put("failed", failedDirs); return JSON.toString(statusMap); } @Override // NameNodeMXBean public String getNodeUsage() { float median = 0; float max = 0; float min = 0; float dev = 0; final Map<String, Map<String,Object>> info = new HashMap<String, Map<String,Object>>(); final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>(); blockManager.getDatanodeManager().fetchDatanodes(live, null, true); for (Iterator<DatanodeDescriptor> it = live.iterator(); it.hasNext();) { DatanodeDescriptor node = it.next(); if (node.isDecommissionInProgress() || node.isDecommissioned()) { it.remove(); } } if (live.size() > 0) { float totalDfsUsed = 0; float[] usages = new float[live.size()]; int i = 0; for (DatanodeDescriptor dn : live) { usages[i++] = dn.getDfsUsedPercent(); totalDfsUsed += dn.getDfsUsedPercent(); } totalDfsUsed /= live.size(); Arrays.sort(usages); median = usages[usages.length / 2]; max = usages[usages.length - 1]; min = usages[0]; for (i = 0; i < usages.length; i++) { dev += (usages[i] - totalDfsUsed) * (usages[i] - totalDfsUsed); } dev = (float) Math.sqrt(dev / usages.length); } final Map<String, Object> innerInfo = new HashMap<String, Object>(); innerInfo.put("min", StringUtils.format("%.2f%%", min)); innerInfo.put("median", StringUtils.format("%.2f%%", median)); innerInfo.put("max", StringUtils.format("%.2f%%", max)); innerInfo.put("stdDev", StringUtils.format("%.2f%%", dev)); info.put("nodeUsage", innerInfo); return JSON.toString(info); } @Override // NameNodeMXBean public String getNameJournalStatus() { List<Map<String, String>> jasList = new ArrayList<Map<String, String>>(); FSEditLog log = getFSImage().getEditLog(); if (log != null) { boolean openForWrite = log.isOpenForWrite(); for (JournalAndStream jas : log.getJournals()) { final Map<String, String> jasMap = new HashMap<String, String>(); String manager = jas.getManager().toString(); jasMap.put("required", String.valueOf(jas.isRequired())); jasMap.put("disabled", String.valueOf(jas.isDisabled())); jasMap.put("manager", manager); if (jas.isDisabled()) { jasMap.put("stream", "Failed"); } else if (openForWrite) { EditLogOutputStream elos = jas.getCurrentStream(); if (elos != null) { jasMap.put("stream", elos.generateReport()); } else { jasMap.put("stream", "not currently writing"); } } else { jasMap.put("stream", "open for read"); } jasList.add(jasMap); } } return JSON.toString(jasList); } @Override // NameNodeMxBean public String getJournalTransactionInfo() { Map<String, String> txnIdMap = new HashMap<String, String>(); txnIdMap.put("LastAppliedOrWrittenTxId", Long.toString(this.getFSImage().getLastAppliedOrWrittenTxId())); txnIdMap.put("MostRecentCheckpointTxId", Long.toString(this.getFSImage().getMostRecentCheckpointTxId())); return JSON.toString(txnIdMap); } @Override // NameNodeMXBean public String getNNStarted() { return getStartTime().toString(); } @Override // NameNodeMXBean public String getCompileInfo() { return VersionInfo.getDate() + " by " + VersionInfo.getUser() + " from " + VersionInfo.getBranch(); } /** @return the block manager. */ public BlockManager getBlockManager() { return blockManager; } public BlockIdManager getBlockIdManager() { return blockIdManager; } /** @return the FSDirectory. */ public FSDirectory getFSDirectory() { return dir; } /** Set the FSDirectory. */ @VisibleForTesting public void setFSDirectory(FSDirectory dir) { this.dir = dir; } /** @return the cache manager. */ @Override public CacheManager getCacheManager() { return cacheManager; } @Override // NameNodeMXBean public String getCorruptFiles() { List<String> list = new ArrayList<String>(); Collection<FSNamesystem.CorruptFileBlockInfo> corruptFileBlocks; try { corruptFileBlocks = listCorruptFileBlocks("/", null); int corruptFileCount = corruptFileBlocks.size(); if (corruptFileCount != 0) { for (FSNamesystem.CorruptFileBlockInfo c : corruptFileBlocks) { list.add(c.toString()); } } } catch (IOException e) { LOG.warn("Get corrupt file blocks returned error: " + e.getMessage()); } return JSON.toString(list); } @Override //NameNodeMXBean public int getDistinctVersionCount() { return blockManager.getDatanodeManager().getDatanodesSoftwareVersions() .size(); } @Override //NameNodeMXBean public Map<String, Integer> getDistinctVersions() { return blockManager.getDatanodeManager().getDatanodesSoftwareVersions(); } @Override //NameNodeMXBean public String getSoftwareVersion() { return VersionInfo.getVersion(); } /** * Verifies that the given identifier and password are valid and match. * @param identifier Token identifier. * @param password Password in the token. */ public synchronized void verifyToken(DelegationTokenIdentifier identifier, byte[] password) throws InvalidToken, RetriableException { try { getDelegationTokenSecretManager().verifyToken(identifier, password); } catch (InvalidToken it) { if (inTransitionToActive()) { throw new RetriableException(it); } throw it; } } @Override public boolean isGenStampInFuture(Block block) { return blockIdManager.isGenStampInFuture(block); } @VisibleForTesting public EditLogTailer getEditLogTailer() { return editLogTailer; } @VisibleForTesting public void setEditLogTailerForTests(EditLogTailer tailer) { this.editLogTailer = tailer; } @VisibleForTesting void setFsLockForTests(ReentrantReadWriteLock lock) { this.fsLock.coarseLock = lock; } @VisibleForTesting public ReentrantReadWriteLock getFsLockForTests() { return fsLock.coarseLock; } @VisibleForTesting public ReentrantLock getCpLockForTests() { return cpLock; } @VisibleForTesting public SafeModeInfo getSafeModeInfoForTests() { return safeMode; } @VisibleForTesting public void setNNResourceChecker(NameNodeResourceChecker nnResourceChecker) { this.nnResourceChecker = nnResourceChecker; } public SnapshotManager getSnapshotManager() { return snapshotManager; } /** Allow snapshot on a directory. */ void allowSnapshot(String path) throws IOException { checkOperation(OperationCategory.WRITE); boolean success = false; writeLock(); try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot allow snapshot for " + path); checkSuperuserPrivilege(); FSDirSnapshotOp.allowSnapshot(dir, snapshotManager, path); success = true; } finally { writeUnlock(); } getEditLog().logSync(); logAuditEvent(success, "allowSnapshot", path, null, null); } /** Disallow snapshot on a directory. */ void disallowSnapshot(String path) throws IOException { checkOperation(OperationCategory.WRITE); boolean success = false; writeLock(); try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot disallow snapshot for " + path); checkSuperuserPrivilege(); FSDirSnapshotOp.disallowSnapshot(dir, snapshotManager, path); success = true; } finally { writeUnlock(); } getEditLog().logSync(); logAuditEvent(success, "disallowSnapshot", path, null, null); } /** * Create a snapshot * @param snapshotRoot The directory path where the snapshot is taken * @param snapshotName The name of the snapshot */ String createSnapshot(String snapshotRoot, String snapshotName, boolean logRetryCache) throws IOException { String snapshotPath = null; writeLock(); try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot create snapshot for " + snapshotRoot); snapshotPath = FSDirSnapshotOp.createSnapshot(dir, snapshotManager, snapshotRoot, snapshotName, logRetryCache); } finally { writeUnlock(); } getEditLog().logSync(); logAuditEvent(snapshotPath != null, "createSnapshot", snapshotRoot, snapshotPath, null); return snapshotPath; } /** * Rename a snapshot * @param path The directory path where the snapshot was taken * @param snapshotOldName Old snapshot name * @param snapshotNewName New snapshot name * @throws SafeModeException * @throws IOException */ void renameSnapshot( String path, String snapshotOldName, String snapshotNewName, boolean logRetryCache) throws IOException { boolean success = false; writeLock(); try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot rename snapshot for " + path); FSDirSnapshotOp.renameSnapshot(dir, snapshotManager, path, snapshotOldName, snapshotNewName, logRetryCache); success = true; } finally { writeUnlock(); } getEditLog().logSync(); String oldSnapshotRoot = Snapshot.getSnapshotPath(path, snapshotOldName); String newSnapshotRoot = Snapshot.getSnapshotPath(path, snapshotNewName); logAuditEvent(success, "renameSnapshot", oldSnapshotRoot, newSnapshotRoot, null); } /** * Get the list of snapshottable directories that are owned * by the current user. Return all the snapshottable directories if the * current user is a super user. * @return The list of all the current snapshottable directories * @throws IOException */ public SnapshottableDirectoryStatus[] getSnapshottableDirListing() throws IOException { SnapshottableDirectoryStatus[] status = null; checkOperation(OperationCategory.READ); boolean success = false; readLock(); try { checkOperation(OperationCategory.READ); status = FSDirSnapshotOp.getSnapshottableDirListing(dir, snapshotManager); success = true; } finally { readUnlock(); } logAuditEvent(success, "listSnapshottableDirectory", null, null, null); return status; } /** * Get the difference between two snapshots (or between a snapshot and the * current status) of a snapshottable directory. * * @param path The full path of the snapshottable directory. * @param fromSnapshot Name of the snapshot to calculate the diff from. Null * or empty string indicates the current tree. * @param toSnapshot Name of the snapshot to calculated the diff to. Null or * empty string indicates the current tree. * @return A report about the difference between {@code fromSnapshot} and * {@code toSnapshot}. Modified/deleted/created/renamed files and * directories belonging to the snapshottable directories are listed * and labeled as M/-/+/R respectively. * @throws IOException */ SnapshotDiffReport getSnapshotDiffReport(String path, String fromSnapshot, String toSnapshot) throws IOException { SnapshotDiffReport diffs = null; checkOperation(OperationCategory.READ); readLock(); try { checkOperation(OperationCategory.READ); diffs = FSDirSnapshotOp.getSnapshotDiffReport(dir, snapshotManager, path, fromSnapshot, toSnapshot); } finally { readUnlock(); } String fromSnapshotRoot = (fromSnapshot == null || fromSnapshot.isEmpty()) ? path : Snapshot.getSnapshotPath(path, fromSnapshot); String toSnapshotRoot = (toSnapshot == null || toSnapshot.isEmpty()) ? path : Snapshot.getSnapshotPath(path, toSnapshot); logAuditEvent(diffs != null, "computeSnapshotDiff", fromSnapshotRoot, toSnapshotRoot, null); return diffs; } /** * Delete a snapshot of a snapshottable directory * @param snapshotRoot The snapshottable directory * @param snapshotName The name of the to-be-deleted snapshot * @throws SafeModeException * @throws IOException */ void deleteSnapshot(String snapshotRoot, String snapshotName, boolean logRetryCache) throws IOException { boolean success = false; writeLock(); BlocksMapUpdateInfo blocksToBeDeleted = null; try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot delete snapshot for " + snapshotRoot); blocksToBeDeleted = FSDirSnapshotOp.deleteSnapshot(dir, snapshotManager, snapshotRoot, snapshotName, logRetryCache); success = true; } finally { writeUnlock(); } getEditLog().logSync(); // Breaking the pattern as removing blocks have to happen outside of the // global lock if (blocksToBeDeleted != null) { removeBlocks(blocksToBeDeleted); } String rootPath = Snapshot.getSnapshotPath(snapshotRoot, snapshotName); logAuditEvent(success, "deleteSnapshot", rootPath, null, null); } /** * Remove a list of INodeDirectorySnapshottable from the SnapshotManager * @param toRemove the list of INodeDirectorySnapshottable to be removed */ void removeSnapshottableDirs(List<INodeDirectory> toRemove) { if (snapshotManager != null) { snapshotManager.removeSnapshottable(toRemove); } } RollingUpgradeInfo queryRollingUpgrade() throws IOException { checkSuperuserPrivilege(); checkOperation(OperationCategory.READ); readLock(); try { if (!isRollingUpgrade()) { return null; } Preconditions.checkNotNull(rollingUpgradeInfo); boolean hasRollbackImage = this.getFSImage().hasRollbackFSImage(); rollingUpgradeInfo.setCreatedRollbackImages(hasRollbackImage); return rollingUpgradeInfo; } finally { readUnlock(); } } RollingUpgradeInfo startRollingUpgrade() throws IOException { checkSuperuserPrivilege(); checkOperation(OperationCategory.WRITE); writeLock(); try { checkOperation(OperationCategory.WRITE); if (isRollingUpgrade()) { return rollingUpgradeInfo; } long startTime = now(); if (!haEnabled) { // for non-HA, we require NN to be in safemode startRollingUpgradeInternalForNonHA(startTime); } else { // for HA, NN cannot be in safemode checkNameNodeSafeMode("Failed to start rolling upgrade"); startRollingUpgradeInternal(startTime); } getEditLog().logStartRollingUpgrade(rollingUpgradeInfo.getStartTime()); if (haEnabled) { // roll the edit log to make sure the standby NameNode can tail getFSImage().rollEditLog(getEffectiveLayoutVersion()); } } finally { writeUnlock(); } getEditLog().logSync(); if (auditLog.isInfoEnabled() && isExternalInvocation()) { logAuditEvent(true, "startRollingUpgrade", null, null, null); } return rollingUpgradeInfo; } /** * Update internal state to indicate that a rolling upgrade is in progress. * @param startTime rolling upgrade start time */ void startRollingUpgradeInternal(long startTime) throws IOException { checkRollingUpgrade("start rolling upgrade"); getFSImage().checkUpgrade(); setRollingUpgradeInfo(false, startTime); } /** * Update internal state to indicate that a rolling upgrade is in progress for * non-HA setup. This requires the namesystem is in SafeMode and after doing a * checkpoint for rollback the namesystem will quit the safemode automatically */ private void startRollingUpgradeInternalForNonHA(long startTime) throws IOException { Preconditions.checkState(!haEnabled); if (!isInSafeMode()) { throw new IOException("Safe mode should be turned ON " + "in order to create namespace image."); } checkRollingUpgrade("start rolling upgrade"); getFSImage().checkUpgrade(); // in non-HA setup, we do an extra checkpoint to generate a rollback image getFSImage().saveNamespace(this, NameNodeFile.IMAGE_ROLLBACK, null); LOG.info("Successfully saved namespace for preparing rolling upgrade."); // leave SafeMode automatically setSafeMode(SafeModeAction.SAFEMODE_LEAVE); setRollingUpgradeInfo(true, startTime); } void setRollingUpgradeInfo(boolean createdRollbackImages, long startTime) { rollingUpgradeInfo = new RollingUpgradeInfo(blockPoolId, createdRollbackImages, startTime, 0L); } public void setCreatedRollbackImages(boolean created) { if (rollingUpgradeInfo != null) { rollingUpgradeInfo.setCreatedRollbackImages(created); } } public RollingUpgradeInfo getRollingUpgradeInfo() { return rollingUpgradeInfo; } public boolean isNeedRollbackFsImage() { return needRollbackFsImage; } public void setNeedRollbackFsImage(boolean needRollbackFsImage) { this.needRollbackFsImage = needRollbackFsImage; } @Override // NameNodeMXBean public RollingUpgradeInfo.Bean getRollingUpgradeStatus() { if (!isRollingUpgrade()) { return null; } RollingUpgradeInfo upgradeInfo = getRollingUpgradeInfo(); if (upgradeInfo.createdRollbackImages()) { return new RollingUpgradeInfo.Bean(upgradeInfo); } readLock(); try { // check again after acquiring the read lock. upgradeInfo = getRollingUpgradeInfo(); if (upgradeInfo == null) { return null; } if (!upgradeInfo.createdRollbackImages()) { boolean hasRollbackImage = this.getFSImage().hasRollbackFSImage(); upgradeInfo.setCreatedRollbackImages(hasRollbackImage); } } catch (IOException ioe) { LOG.warn("Encountered exception setting Rollback Image", ioe); } finally { readUnlock(); } return new RollingUpgradeInfo.Bean(upgradeInfo); } /** Is rolling upgrade in progress? */ public boolean isRollingUpgrade() { return rollingUpgradeInfo != null && !rollingUpgradeInfo.isFinalized(); } /** * Returns the layout version in effect. Under normal operation, this is the * same as the software's current layout version, defined in * {@link NameNodeLayoutVersion#CURRENT_LAYOUT_VERSION}. During a rolling * upgrade, this can retain the layout version that was persisted to metadata * prior to starting the rolling upgrade, back to a lower bound defined in * {@link NameNodeLayoutVersion#MINIMUM_COMPATIBLE_LAYOUT_VERSION}. New * fsimage files and edit log segments will continue to be written with this * older layout version, so that the files are still readable by the old * software version if the admin chooses to downgrade. * * @return layout version in effect */ public int getEffectiveLayoutVersion() { return getEffectiveLayoutVersion(isRollingUpgrade(), fsImage.getStorage().getLayoutVersion(), NameNodeLayoutVersion.MINIMUM_COMPATIBLE_LAYOUT_VERSION, NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION); } @VisibleForTesting static int getEffectiveLayoutVersion(boolean isRollingUpgrade, int storageLV, int minCompatLV, int currentLV) { if (isRollingUpgrade) { if (storageLV <= minCompatLV) { // The prior layout version satisfies the minimum compatible layout // version of the current software. Keep reporting the prior layout // as the effective one. Downgrade is possible. return storageLV; } } // The current software cannot satisfy the layout version of the prior // software. Proceed with using the current layout version. return currentLV; } /** * Performs a pre-condition check that the layout version in effect is * sufficient to support the requested {@link Feature}. If not, then the * method throws {@link HadoopIllegalArgumentException} to deny the operation. * This exception class is registered as a terse exception, so it prevents * verbose stack traces in the NameNode log. During a rolling upgrade, this * method is used to restrict usage of new features. This prevents writing * new edit log operations that would be unreadable by the old software * version if the admin chooses to downgrade. * * @param f feature to check * @throws HadoopIllegalArgumentException if the current layout version in * effect is insufficient to support the feature */ private void requireEffectiveLayoutVersionForFeature(Feature f) throws HadoopIllegalArgumentException { int lv = getEffectiveLayoutVersion(); if (!NameNodeLayoutVersion.supports(f, lv)) { throw new HadoopIllegalArgumentException(String.format( "Feature %s unsupported at NameNode layout version %d. If a " + "rolling upgrade is in progress, then it must be finalized before " + "using this feature.", f, lv)); } } void checkRollingUpgrade(String action) throws RollingUpgradeException { if (isRollingUpgrade()) { throw new RollingUpgradeException("Failed to " + action + " since a rolling upgrade is already in progress." + " Existing rolling upgrade info:\n" + rollingUpgradeInfo); } } RollingUpgradeInfo finalizeRollingUpgrade() throws IOException { checkSuperuserPrivilege(); checkOperation(OperationCategory.WRITE); writeLock(); try { checkOperation(OperationCategory.WRITE); if (!isRollingUpgrade()) { return null; } checkNameNodeSafeMode("Failed to finalize rolling upgrade"); finalizeRollingUpgradeInternal(now()); getEditLog().logFinalizeRollingUpgrade(rollingUpgradeInfo.getFinalizeTime()); if (haEnabled) { // roll the edit log to make sure the standby NameNode can tail getFSImage().rollEditLog(getEffectiveLayoutVersion()); } getFSImage().updateStorageVersion(); getFSImage().renameCheckpoint(NameNodeFile.IMAGE_ROLLBACK, NameNodeFile.IMAGE); } finally { writeUnlock(); } if (!haEnabled) { // Sync not needed for ha since the edit was rolled after logging. getEditLog().logSync(); } if (auditLog.isInfoEnabled() && isExternalInvocation()) { logAuditEvent(true, "finalizeRollingUpgrade", null, null, null); } return rollingUpgradeInfo; } void finalizeRollingUpgradeInternal(long finalizeTime) { // Set the finalize time rollingUpgradeInfo.finalize(finalizeTime); } long addCacheDirective(CacheDirectiveInfo directive, EnumSet<CacheFlag> flags, boolean logRetryCache) throws IOException { CacheDirectiveInfo effectiveDirective = null; if (!flags.contains(CacheFlag.FORCE)) { cacheManager.waitForRescanIfNeeded(); } writeLock(); try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot add cache directive"); effectiveDirective = FSNDNCacheOp.addCacheDirective(this, cacheManager, directive, flags, logRetryCache); } finally { writeUnlock(); boolean success = effectiveDirective != null; if (success) { getEditLog().logSync(); } String effectiveDirectiveStr = effectiveDirective != null ? effectiveDirective.toString() : null; logAuditEvent(success, "addCacheDirective", effectiveDirectiveStr, null, null); } return effectiveDirective != null ? effectiveDirective.getId() : 0; } void modifyCacheDirective(CacheDirectiveInfo directive, EnumSet<CacheFlag> flags, boolean logRetryCache) throws IOException { boolean success = false; if (!flags.contains(CacheFlag.FORCE)) { cacheManager.waitForRescanIfNeeded(); } writeLock(); try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot add cache directive"); FSNDNCacheOp.modifyCacheDirective(this, cacheManager, directive, flags, logRetryCache); success = true; } finally { writeUnlock(); if (success) { getEditLog().logSync(); } String idStr = "{id: " + directive.getId().toString() + "}"; logAuditEvent(success, "modifyCacheDirective", idStr, directive.toString(), null); } } void removeCacheDirective(long id, boolean logRetryCache) throws IOException { boolean success = false; writeLock(); try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot remove cache directives"); FSNDNCacheOp.removeCacheDirective(this, cacheManager, id, logRetryCache); success = true; } finally { writeUnlock(); String idStr = "{id: " + Long.toString(id) + "}"; logAuditEvent(success, "removeCacheDirective", idStr, null, null); } getEditLog().logSync(); } BatchedListEntries<CacheDirectiveEntry> listCacheDirectives( long startId, CacheDirectiveInfo filter) throws IOException { checkOperation(OperationCategory.READ); BatchedListEntries<CacheDirectiveEntry> results; cacheManager.waitForRescanIfNeeded(); readLock(); boolean success = false; try { checkOperation(OperationCategory.READ); results = FSNDNCacheOp.listCacheDirectives(this, cacheManager, startId, filter); success = true; } finally { readUnlock(); logAuditEvent(success, "listCacheDirectives", filter.toString(), null, null); } return results; } void addCachePool(CachePoolInfo req, boolean logRetryCache) throws IOException { writeLock(); boolean success = false; String poolInfoStr = null; try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot add cache pool" + (req == null ? null : req.getPoolName())); CachePoolInfo info = FSNDNCacheOp.addCachePool(this, cacheManager, req, logRetryCache); poolInfoStr = info.toString(); success = true; } finally { writeUnlock(); logAuditEvent(success, "addCachePool", poolInfoStr, null, null); } getEditLog().logSync(); } void modifyCachePool(CachePoolInfo req, boolean logRetryCache) throws IOException { writeLock(); boolean success = false; try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot modify cache pool" + (req == null ? null : req.getPoolName())); FSNDNCacheOp.modifyCachePool(this, cacheManager, req, logRetryCache); success = true; } finally { writeUnlock(); String poolNameStr = "{poolName: " + (req == null ? null : req.getPoolName()) + "}"; logAuditEvent(success, "modifyCachePool", poolNameStr, req == null ? null : req.toString(), null); } getEditLog().logSync(); } void removeCachePool(String cachePoolName, boolean logRetryCache) throws IOException { writeLock(); boolean success = false; try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot modify cache pool" + cachePoolName); FSNDNCacheOp.removeCachePool(this, cacheManager, cachePoolName, logRetryCache); success = true; } finally { writeUnlock(); String poolNameStr = "{poolName: " + cachePoolName + "}"; logAuditEvent(success, "removeCachePool", poolNameStr, null, null); } getEditLog().logSync(); } BatchedListEntries<CachePoolEntry> listCachePools(String prevKey) throws IOException { BatchedListEntries<CachePoolEntry> results; checkOperation(OperationCategory.READ); boolean success = false; cacheManager.waitForRescanIfNeeded(); readLock(); try { checkOperation(OperationCategory.READ); results = FSNDNCacheOp.listCachePools(this, cacheManager, prevKey); success = true; } finally { readUnlock(); logAuditEvent(success, "listCachePools", null, null, null); } return results; } void modifyAclEntries(final String src, List<AclEntry> aclSpec) throws IOException { HdfsFileStatus auditStat = null; checkOperation(OperationCategory.WRITE); writeLock(); try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot modify ACL entries on " + src); auditStat = FSDirAclOp.modifyAclEntries(dir, src, aclSpec); } catch (AccessControlException e) { logAuditEvent(false, "modifyAclEntries", src); throw e; } finally { writeUnlock(); } getEditLog().logSync(); logAuditEvent(true, "modifyAclEntries", src, null, auditStat); } void removeAclEntries(final String src, List<AclEntry> aclSpec) throws IOException { checkOperation(OperationCategory.WRITE); HdfsFileStatus auditStat = null; writeLock(); try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot remove ACL entries on " + src); auditStat = FSDirAclOp.removeAclEntries(dir, src, aclSpec); } catch (AccessControlException e) { logAuditEvent(false, "removeAclEntries", src); throw e; } finally { writeUnlock(); } getEditLog().logSync(); logAuditEvent(true, "removeAclEntries", src, null, auditStat); } void removeDefaultAcl(final String src) throws IOException { HdfsFileStatus auditStat = null; checkOperation(OperationCategory.WRITE); writeLock(); try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot remove default ACL entries on " + src); auditStat = FSDirAclOp.removeDefaultAcl(dir, src); } catch (AccessControlException e) { logAuditEvent(false, "removeDefaultAcl", src); throw e; } finally { writeUnlock(); } getEditLog().logSync(); logAuditEvent(true, "removeDefaultAcl", src, null, auditStat); } void removeAcl(final String src) throws IOException { HdfsFileStatus auditStat = null; checkOperation(OperationCategory.WRITE); writeLock(); try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot remove ACL on " + src); auditStat = FSDirAclOp.removeAcl(dir, src); } catch (AccessControlException e) { logAuditEvent(false, "removeAcl", src); throw e; } finally { writeUnlock(); } getEditLog().logSync(); logAuditEvent(true, "removeAcl", src, null, auditStat); } void setAcl(final String src, List<AclEntry> aclSpec) throws IOException { HdfsFileStatus auditStat = null; checkOperation(OperationCategory.WRITE); writeLock(); try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot set ACL on " + src); auditStat = FSDirAclOp.setAcl(dir, src, aclSpec); } catch (AccessControlException e) { logAuditEvent(false, "setAcl", src); throw e; } finally { writeUnlock(); } getEditLog().logSync(); logAuditEvent(true, "setAcl", src, null, auditStat); } AclStatus getAclStatus(String src) throws IOException { checkOperation(OperationCategory.READ); boolean success = false; readLock(); try { checkOperation(OperationCategory.READ); final AclStatus ret = FSDirAclOp.getAclStatus(dir, src); success = true; return ret; } finally { readUnlock(); logAuditEvent(success, "getAclStatus", src); } } /** * Create an encryption zone on directory src using the specified key. * * @param src the path of a directory which will be the root of the * encryption zone. The directory must be empty. * @param keyName name of a key which must be present in the configured * KeyProvider. * @throws AccessControlException if the caller is not the superuser. * @throws UnresolvedLinkException if the path can't be resolved. * @throws SafeModeException if the Namenode is in safe mode. */ void createEncryptionZone(final String src, final String keyName, boolean logRetryCache) throws IOException, UnresolvedLinkException, SafeModeException, AccessControlException { try { if (provider == null) { throw new IOException( "Can't create an encryption zone for " + src + " since no key provider is available."); } if (keyName == null || keyName.isEmpty()) { throw new IOException("Must specify a key name when creating an " + "encryption zone"); } KeyProvider.Metadata metadata = provider.getMetadata(keyName); if (metadata == null) { /* * It would be nice if we threw something more specific than * IOException when the key is not found, but the KeyProvider API * doesn't provide for that. If that API is ever changed to throw * something more specific (e.g. UnknownKeyException) then we can * update this to match it, or better yet, just rethrow the * KeyProvider's exception. */ throw new IOException("Key " + keyName + " doesn't exist."); } // If the provider supports pool for EDEKs, this will fill in the pool provider.warmUpEncryptedKeys(keyName); createEncryptionZoneInt(src, metadata.getCipher(), keyName, logRetryCache); } catch (AccessControlException e) { logAuditEvent(false, "createEncryptionZone", src); throw e; } } private void createEncryptionZoneInt(final String srcArg, String cipher, String keyName, final boolean logRetryCache) throws IOException { String src = srcArg; HdfsFileStatus resultingStat = null; checkSuperuserPrivilege(); final byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); FSPermissionChecker pc = getPermissionChecker(); writeLock(); try { checkSuperuserPrivilege(); checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot create encryption zone on " + src); src = dir.resolvePath(pc, src, pathComponents); final CipherSuite suite = CipherSuite.convert(cipher); // For now this is hardcoded, as we only support one method. final CryptoProtocolVersion version = CryptoProtocolVersion.ENCRYPTION_ZONES; final XAttr ezXAttr = dir.createEncryptionZone(src, suite, version, keyName); List<XAttr> xAttrs = Lists.newArrayListWithCapacity(1); xAttrs.add(ezXAttr); getEditLog().logSetXAttrs(src, xAttrs, logRetryCache); final INodesInPath iip = dir.getINodesInPath4Write(src, false); resultingStat = dir.getAuditFileInfo(iip); } finally { writeUnlock(); } getEditLog().logSync(); logAuditEvent(true, "createEncryptionZone", srcArg, null, resultingStat); } /** * Get the encryption zone for the specified path. * * @param srcArg the path of a file or directory to get the EZ for. * @return the EZ of the of the path or null if none. * @throws AccessControlException if the caller is not the superuser. * @throws UnresolvedLinkException if the path can't be resolved. */ EncryptionZone getEZForPath(final String srcArg) throws AccessControlException, UnresolvedLinkException, IOException { String src = srcArg; HdfsFileStatus resultingStat = null; final byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); boolean success = false; final FSPermissionChecker pc = getPermissionChecker(); checkOperation(OperationCategory.READ); readLock(); try { checkOperation(OperationCategory.READ); src = dir.resolvePath(pc, src, pathComponents); final INodesInPath iip = dir.getINodesInPath(src, true); if (isPermissionEnabled) { dir.checkPathAccess(pc, iip, FsAction.READ); } final EncryptionZone ret = dir.getEZForPath(iip); resultingStat = dir.getAuditFileInfo(iip); success = true; return ret; } finally { readUnlock(); logAuditEvent(success, "getEZForPath", srcArg, null, resultingStat); } } BatchedListEntries<EncryptionZone> listEncryptionZones(long prevId) throws IOException { boolean success = false; checkSuperuserPrivilege(); checkOperation(OperationCategory.READ); readLock(); try { checkSuperuserPrivilege(); checkOperation(OperationCategory.READ); final BatchedListEntries<EncryptionZone> ret = dir.listEncryptionZones(prevId); success = true; return ret; } finally { readUnlock(); logAuditEvent(success, "listEncryptionZones", null); } } void setXAttr(String src, XAttr xAttr, EnumSet<XAttrSetFlag> flag, boolean logRetryCache) throws IOException { HdfsFileStatus auditStat = null; writeLock(); try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot set XAttr on " + src); auditStat = FSDirXAttrOp.setXAttr(dir, src, xAttr, flag, logRetryCache); } catch (AccessControlException e) { logAuditEvent(false, "setXAttr", src); throw e; } finally { writeUnlock(); } getEditLog().logSync(); logAuditEvent(true, "setXAttr", src, null, auditStat); } List<XAttr> getXAttrs(final String src, List<XAttr> xAttrs) throws IOException { checkOperation(OperationCategory.READ); readLock(); try { checkOperation(OperationCategory.READ); return FSDirXAttrOp.getXAttrs(dir, src, xAttrs); } catch (AccessControlException e) { logAuditEvent(false, "getXAttrs", src); throw e; } finally { readUnlock(); } } List<XAttr> listXAttrs(String src) throws IOException { checkOperation(OperationCategory.READ); readLock(); try { checkOperation(OperationCategory.READ); return FSDirXAttrOp.listXAttrs(dir, src); } catch (AccessControlException e) { logAuditEvent(false, "listXAttrs", src); throw e; } finally { readUnlock(); } } void removeXAttr(String src, XAttr xAttr, boolean logRetryCache) throws IOException { HdfsFileStatus auditStat = null; writeLock(); try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot remove XAttr entry on " + src); auditStat = FSDirXAttrOp.removeXAttr(dir, src, xAttr, logRetryCache); } catch (AccessControlException e) { logAuditEvent(false, "removeXAttr", src); throw e; } finally { writeUnlock(); } getEditLog().logSync(); logAuditEvent(true, "removeXAttr", src, null, auditStat); } void checkAccess(String src, FsAction mode) throws IOException { checkOperation(OperationCategory.READ); byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); readLock(); try { checkOperation(OperationCategory.READ); src = FSDirectory.resolvePath(src, pathComponents, dir); final INodesInPath iip = dir.getINodesInPath(src, true); INode inode = iip.getLastINode(); if (inode == null) { throw new FileNotFoundException("Path not found"); } if (isPermissionEnabled) { FSPermissionChecker pc = getPermissionChecker(); dir.checkPathAccess(pc, iip, mode); } } catch (AccessControlException e) { logAuditEvent(false, "checkAccess", src); throw e; } finally { readUnlock(); } } /** * Default AuditLogger implementation; used when no access logger is * defined in the config file. It can also be explicitly listed in the * config file. */ @VisibleForTesting static class DefaultAuditLogger extends HdfsAuditLogger { private boolean logTokenTrackingId; private Set<String> debugCmdSet = new HashSet<String>(); @Override public void initialize(Configuration conf) { logTokenTrackingId = conf.getBoolean( DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_TOKEN_TRACKING_ID_KEY, DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_TOKEN_TRACKING_ID_DEFAULT); debugCmdSet.addAll(Arrays.asList(conf.getTrimmedStrings( DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_DEBUG_CMDLIST))); } @Override public void logAuditEvent(boolean succeeded, String userName, InetAddress addr, String cmd, String src, String dst, FileStatus status, UserGroupInformation ugi, DelegationTokenSecretManager dtSecretManager) { if (auditLog.isDebugEnabled() || (auditLog.isInfoEnabled() && !debugCmdSet.contains(cmd))) { final StringBuilder sb = auditBuffer.get(); sb.setLength(0); sb.append("allowed=").append(succeeded).append("\t"); sb.append("ugi=").append(userName).append("\t"); sb.append("ip=").append(addr).append("\t"); sb.append("cmd=").append(cmd).append("\t"); sb.append("src=").append(src).append("\t"); sb.append("dst=").append(dst).append("\t"); if (null == status) { sb.append("perm=null"); } else { sb.append("perm="); sb.append(status.getOwner()).append(":"); sb.append(status.getGroup()).append(":"); sb.append(status.getPermission()); } if (logTokenTrackingId) { sb.append("\t").append("trackingId="); String trackingId = null; if (ugi != null && dtSecretManager != null && ugi.getAuthenticationMethod() == AuthenticationMethod.TOKEN) { for (TokenIdentifier tid: ugi.getTokenIdentifiers()) { if (tid instanceof DelegationTokenIdentifier) { DelegationTokenIdentifier dtid = (DelegationTokenIdentifier)tid; trackingId = dtSecretManager.getTokenTrackingId(dtid); break; } } } sb.append(trackingId); } sb.append("\t").append("proto="); sb.append(NamenodeWebHdfsMethods.isWebHdfsInvocation() ? "webhdfs" : "rpc"); logAuditMessage(sb.toString()); } } public void logAuditMessage(String message) { auditLog.info(message); } } private static void enableAsyncAuditLog() { if (!(auditLog instanceof Log4JLogger)) { LOG.warn("Log4j is required to enable async auditlog"); return; } Logger logger = ((Log4JLogger)auditLog).getLogger(); @SuppressWarnings("unchecked") List<Appender> appenders = Collections.list(logger.getAllAppenders()); // failsafe against trying to async it more than once if (!appenders.isEmpty() && !(appenders.get(0) instanceof AsyncAppender)) { AsyncAppender asyncAppender = new AsyncAppender(); // change logger to have an async appender containing all the // previously configured appenders for (Appender appender : appenders) { logger.removeAppender(appender); asyncAppender.addAppender(appender); } logger.addAppender(asyncAppender); } } }
257,920
34.521416
112
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.namenode; import com.google.common.base.Preconditions; import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.InvalidPathException; import org.apache.hadoop.fs.Options; import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.protocol.SnapshotException; import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite; import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.hdfs.util.ReadOnlyList; import org.apache.hadoop.util.ChunkedArrayList; import org.apache.hadoop.util.Time; import java.io.FileNotFoundException; import java.io.IOException; import java.util.AbstractMap; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Map; import static org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException; import static org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException; class FSDirRenameOp { @Deprecated static RenameOldResult renameToInt( FSDirectory fsd, final String srcArg, final String dstArg, boolean logRetryCache) throws IOException { String src = srcArg; String dst = dstArg; if (NameNode.stateChangeLog.isDebugEnabled()) { NameNode.stateChangeLog.debug("DIR* NameSystem.renameTo: " + src + " to " + dst); } if (!DFSUtil.isValidName(dst)) { throw new IOException("Invalid name: " + dst); } FSPermissionChecker pc = fsd.getPermissionChecker(); byte[][] srcComponents = FSDirectory.getPathComponentsForReservedPath(src); byte[][] dstComponents = FSDirectory.getPathComponentsForReservedPath(dst); HdfsFileStatus resultingStat = null; src = fsd.resolvePath(pc, src, srcComponents); dst = fsd.resolvePath(pc, dst, dstComponents); @SuppressWarnings("deprecation") final boolean status = renameTo(fsd, pc, src, dst, logRetryCache); if (status) { INodesInPath dstIIP = fsd.getINodesInPath(dst, false); resultingStat = fsd.getAuditFileInfo(dstIIP); } return new RenameOldResult(status, resultingStat); } /** * Verify quota for rename operation where srcInodes[srcInodes.length-1] moves * dstInodes[dstInodes.length-1] */ private static void verifyQuotaForRename(FSDirectory fsd, INodesInPath src, INodesInPath dst) throws QuotaExceededException { if (!fsd.getFSNamesystem().isImageLoaded() || fsd.shouldSkipQuotaChecks()) { // Do not check quota if edits log is still being processed return; } int i = 0; while(src.getINode(i) == dst.getINode(i)) { i++; } // src[i - 1] is the last common ancestor. BlockStoragePolicySuite bsps = fsd.getBlockStoragePolicySuite(); final QuotaCounts delta = src.getLastINode().computeQuotaUsage(bsps); // Reduce the required quota by dst that is being removed final INode dstINode = dst.getLastINode(); if (dstINode != null) { delta.subtract(dstINode.computeQuotaUsage(bsps)); } FSDirectory.verifyQuota(dst, dst.length() - 1, delta, src.getINode(i - 1)); } /** * Checks file system limits (max component length and max directory items) * during a rename operation. */ static void verifyFsLimitsForRename(FSDirectory fsd, INodesInPath srcIIP, INodesInPath dstIIP) throws PathComponentTooLongException, MaxDirectoryItemsExceededException { byte[] dstChildName = dstIIP.getLastLocalName(); final String parentPath = dstIIP.getParentPath(); fsd.verifyMaxComponentLength(dstChildName, parentPath); // Do not enforce max directory items if renaming within same directory. if (srcIIP.getINode(-2) != dstIIP.getINode(-2)) { fsd.verifyMaxDirItems(dstIIP.getINode(-2).asDirectory(), parentPath); } } /** * <br> * Note: This is to be used by {@link FSEditLogLoader} only. * <br> */ @Deprecated @SuppressWarnings("deprecation") static boolean renameForEditLog(FSDirectory fsd, String src, String dst, long timestamp) throws IOException { if (fsd.isDir(dst)) { dst += Path.SEPARATOR + new Path(src).getName(); } final INodesInPath srcIIP = fsd.getINodesInPath4Write(src, false); final INodesInPath dstIIP = fsd.getINodesInPath4Write(dst, false); return unprotectedRenameTo(fsd, src, dst, srcIIP, dstIIP, timestamp); } /** * Change a path name * * @param fsd FSDirectory * @param src source path * @param dst destination path * @return true if rename succeeds; false otherwise * @deprecated See {@link #renameToInt(FSDirectory, String, String, * boolean, Options.Rename...)} */ @Deprecated static boolean unprotectedRenameTo(FSDirectory fsd, String src, String dst, final INodesInPath srcIIP, final INodesInPath dstIIP, long timestamp) throws IOException { assert fsd.hasWriteLock(); final INode srcInode = srcIIP.getLastINode(); try { validateRenameSource(srcIIP); } catch (SnapshotException e) { throw e; } catch (IOException ignored) { return false; } // validate the destination if (dst.equals(src)) { return true; } try { validateDestination(src, dst, srcInode); } catch (IOException ignored) { return false; } if (dstIIP.getLastINode() != null) { NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " + "failed to rename " + src + " to " + dst + " because destination " + "exists"); return false; } INode dstParent = dstIIP.getINode(-2); if (dstParent == null) { NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " + "failed to rename " + src + " to " + dst + " because destination's " + "parent does not exist"); return false; } fsd.ezManager.checkMoveValidity(srcIIP, dstIIP, src); // Ensure dst has quota to accommodate rename verifyFsLimitsForRename(fsd, srcIIP, dstIIP); verifyQuotaForRename(fsd, srcIIP, dstIIP); RenameOperation tx = new RenameOperation(fsd, src, dst, srcIIP, dstIIP); boolean added = false; try { // remove src if (!tx.removeSrc4OldRename()) { return false; } added = tx.addSourceToDestination(); if (added) { if (NameNode.stateChangeLog.isDebugEnabled()) { NameNode.stateChangeLog.debug("DIR* FSDirectory" + ".unprotectedRenameTo: " + src + " is renamed to " + dst); } tx.updateMtimeAndLease(timestamp); tx.updateQuotasInSourceTree(fsd.getBlockStoragePolicySuite()); return true; } } finally { if (!added) { tx.restoreSource(); } } NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " + "failed to rename " + src + " to " + dst); return false; } /** * The new rename which has the POSIX semantic. */ static Map.Entry<BlocksMapUpdateInfo, HdfsFileStatus> renameToInt( FSDirectory fsd, final String srcArg, final String dstArg, boolean logRetryCache, Options.Rename... options) throws IOException { String src = srcArg; String dst = dstArg; if (NameNode.stateChangeLog.isDebugEnabled()) { NameNode.stateChangeLog.debug("DIR* NameSystem.renameTo: with options -" + " " + src + " to " + dst); } if (!DFSUtil.isValidName(dst)) { throw new InvalidPathException("Invalid name: " + dst); } final FSPermissionChecker pc = fsd.getPermissionChecker(); byte[][] srcComponents = FSDirectory.getPathComponentsForReservedPath(src); byte[][] dstComponents = FSDirectory.getPathComponentsForReservedPath(dst); BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo(); src = fsd.resolvePath(pc, src, srcComponents); dst = fsd.resolvePath(pc, dst, dstComponents); renameTo(fsd, pc, src, dst, collectedBlocks, logRetryCache, options); INodesInPath dstIIP = fsd.getINodesInPath(dst, false); HdfsFileStatus resultingStat = fsd.getAuditFileInfo(dstIIP); return new AbstractMap.SimpleImmutableEntry<>( collectedBlocks, resultingStat); } /** * @see {@link #unprotectedRenameTo(FSDirectory, String, String, INodesInPath, * INodesInPath, long, BlocksMapUpdateInfo, Options.Rename...)} */ static void renameTo(FSDirectory fsd, FSPermissionChecker pc, String src, String dst, BlocksMapUpdateInfo collectedBlocks, boolean logRetryCache, Options.Rename... options) throws IOException { final INodesInPath srcIIP = fsd.getINodesInPath4Write(src, false); final INodesInPath dstIIP = fsd.getINodesInPath4Write(dst, false); if (fsd.isPermissionEnabled()) { // Rename does not operate on link targets // Do not resolveLink when checking permissions of src and dst // Check write access to parent of src fsd.checkPermission(pc, srcIIP, false, null, FsAction.WRITE, null, null, false); // Check write access to ancestor of dst fsd.checkPermission(pc, dstIIP, false, FsAction.WRITE, null, null, null, false); } if (NameNode.stateChangeLog.isDebugEnabled()) { NameNode.stateChangeLog.debug("DIR* FSDirectory.renameTo: " + src + " to " + dst); } final long mtime = Time.now(); fsd.writeLock(); try { if (unprotectedRenameTo(fsd, src, dst, srcIIP, dstIIP, mtime, collectedBlocks, options)) { FSDirDeleteOp.incrDeletedFileCount(1); } } finally { fsd.writeUnlock(); } fsd.getEditLog().logRename(src, dst, mtime, logRetryCache, options); } /** * Rename src to dst. * <br> * Note: This is to be used by {@link org.apache.hadoop.hdfs.server * .namenode.FSEditLogLoader} only. * <br> * * @param fsd FSDirectory * @param src source path * @param dst destination path * @param timestamp modification time * @param options Rename options */ static void renameForEditLog( FSDirectory fsd, String src, String dst, long timestamp, Options.Rename... options) throws IOException { BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo(); final INodesInPath srcIIP = fsd.getINodesInPath4Write(src, false); final INodesInPath dstIIP = fsd.getINodesInPath4Write(dst, false); unprotectedRenameTo(fsd, src, dst, srcIIP, dstIIP, timestamp, collectedBlocks, options); if (!collectedBlocks.getToDeleteList().isEmpty()) { fsd.getFSNamesystem().removeBlocksAndUpdateSafemodeTotal(collectedBlocks); } } /** * Rename src to dst. * See {@link DistributedFileSystem#rename(Path, Path, Options.Rename...)} * for details related to rename semantics and exceptions. * * @param fsd FSDirectory * @param src source path * @param dst destination path * @param timestamp modification time * @param collectedBlocks blocks to be removed * @param options Rename options * @return whether a file/directory gets overwritten in the dst path */ static boolean unprotectedRenameTo(FSDirectory fsd, String src, String dst, final INodesInPath srcIIP, final INodesInPath dstIIP, long timestamp, BlocksMapUpdateInfo collectedBlocks, Options.Rename... options) throws IOException { assert fsd.hasWriteLock(); boolean overwrite = options != null && Arrays.asList(options).contains(Options.Rename.OVERWRITE); final String error; final INode srcInode = srcIIP.getLastINode(); validateRenameSource(srcIIP); // validate the destination if (dst.equals(src)) { throw new FileAlreadyExistsException("The source " + src + " and destination " + dst + " are the same"); } validateDestination(src, dst, srcInode); if (dstIIP.length() == 1) { error = "rename destination cannot be the root"; NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " + error); throw new IOException(error); } BlockStoragePolicySuite bsps = fsd.getBlockStoragePolicySuite(); fsd.ezManager.checkMoveValidity(srcIIP, dstIIP, src); final INode dstInode = dstIIP.getLastINode(); List<INodeDirectory> snapshottableDirs = new ArrayList<>(); if (dstInode != null) { // Destination exists validateOverwrite(src, dst, overwrite, srcInode, dstInode); FSDirSnapshotOp.checkSnapshot(dstInode, snapshottableDirs); } INode dstParent = dstIIP.getINode(-2); if (dstParent == null) { error = "rename destination parent " + dst + " not found."; NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " + error); throw new FileNotFoundException(error); } if (!dstParent.isDirectory()) { error = "rename destination parent " + dst + " is a file."; NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " + error); throw new ParentNotDirectoryException(error); } // Ensure dst has quota to accommodate rename verifyFsLimitsForRename(fsd, srcIIP, dstIIP); verifyQuotaForRename(fsd, srcIIP, dstIIP); RenameOperation tx = new RenameOperation(fsd, src, dst, srcIIP, dstIIP); boolean undoRemoveSrc = true; tx.removeSrc(); boolean undoRemoveDst = false; long removedNum = 0; try { if (dstInode != null) { // dst exists, remove it removedNum = tx.removeDst(); if (removedNum != -1) { undoRemoveDst = true; } } // add src as dst to complete rename if (tx.addSourceToDestination()) { undoRemoveSrc = false; if (NameNode.stateChangeLog.isDebugEnabled()) { NameNode.stateChangeLog.debug("DIR* FSDirectory.unprotectedRenameTo: " + src + " is renamed to " + dst); } tx.updateMtimeAndLease(timestamp); // Collect the blocks and remove the lease for previous dst boolean filesDeleted = false; if (undoRemoveDst) { undoRemoveDst = false; if (removedNum > 0) { filesDeleted = tx.cleanDst(bsps, collectedBlocks); } } if (snapshottableDirs.size() > 0) { // There are snapshottable directories (without snapshots) to be // deleted. Need to update the SnapshotManager. fsd.getFSNamesystem().removeSnapshottableDirs(snapshottableDirs); } tx.updateQuotasInSourceTree(bsps); return filesDeleted; } } finally { if (undoRemoveSrc) { tx.restoreSource(); } if (undoRemoveDst) { // Rename failed - restore dst tx.restoreDst(bsps); } } NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " + "failed to rename " + src + " to " + dst); throw new IOException("rename from " + src + " to " + dst + " failed."); } /** * @deprecated Use {@link #renameToInt(FSDirectory, String, String, * boolean, Options.Rename...)} */ @Deprecated @SuppressWarnings("deprecation") private static boolean renameTo(FSDirectory fsd, FSPermissionChecker pc, String src, String dst, boolean logRetryCache) throws IOException { // Rename does not operate on link targets // Do not resolveLink when checking permissions of src and dst // Check write access to parent of src final INodesInPath srcIIP = fsd.getINodesInPath4Write(src, false); // Note: We should not be doing this. This is move() not renameTo(). final String actualDst = fsd.isDir(dst) ? dst + Path.SEPARATOR + new Path(src).getName() : dst; final INodesInPath dstIIP = fsd.getINodesInPath4Write(actualDst, false); if (fsd.isPermissionEnabled()) { fsd.checkPermission(pc, srcIIP, false, null, FsAction.WRITE, null, null, false); // Check write access to ancestor of dst fsd.checkPermission(pc, dstIIP, false, FsAction.WRITE, null, null, null, false); } if (NameNode.stateChangeLog.isDebugEnabled()) { NameNode.stateChangeLog.debug("DIR* FSDirectory.renameTo: " + src + " to " + dst); } final long mtime = Time.now(); boolean stat = false; fsd.writeLock(); try { stat = unprotectedRenameTo(fsd, src, actualDst, srcIIP, dstIIP, mtime); } finally { fsd.writeUnlock(); } if (stat) { fsd.getEditLog().logRename(src, actualDst, mtime, logRetryCache); return true; } return false; } private static void validateDestination( String src, String dst, INode srcInode) throws IOException { String error; if (srcInode.isSymlink() && dst.equals(srcInode.asSymlink().getSymlinkString())) { throw new FileAlreadyExistsException("Cannot rename symlink " + src + " to its target " + dst); } // dst cannot be a directory or a file under src if (dst.startsWith(src) && dst.charAt(src.length()) == Path.SEPARATOR_CHAR) { error = "Rename destination " + dst + " is a directory or file under source " + src; NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " + error); throw new IOException(error); } } private static void validateOverwrite( String src, String dst, boolean overwrite, INode srcInode, INode dstInode) throws IOException { String error;// It's OK to rename a file to a symlink and vice versa if (dstInode.isDirectory() != srcInode.isDirectory()) { error = "Source " + src + " and destination " + dst + " must both be directories"; NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " + error); throw new IOException(error); } if (!overwrite) { // If destination exists, overwrite flag must be true error = "rename destination " + dst + " already exists"; NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " + error); throw new FileAlreadyExistsException(error); } if (dstInode.isDirectory()) { final ReadOnlyList<INode> children = dstInode.asDirectory() .getChildrenList(Snapshot.CURRENT_STATE_ID); if (!children.isEmpty()) { error = "rename destination directory is not empty: " + dst; NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " + error); throw new IOException(error); } } } private static void validateRenameSource(INodesInPath srcIIP) throws IOException { String error; final INode srcInode = srcIIP.getLastINode(); // validate source if (srcInode == null) { error = "rename source " + srcIIP.getPath() + " is not found."; NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " + error); throw new FileNotFoundException(error); } if (srcIIP.length() == 1) { error = "rename source cannot be the root"; NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " + error); throw new IOException(error); } // srcInode and its subtree cannot contain snapshottable directories with // snapshots FSDirSnapshotOp.checkSnapshot(srcInode, null); } private static class RenameOperation { private final FSDirectory fsd; private INodesInPath srcIIP; private final INodesInPath srcParentIIP; private INodesInPath dstIIP; private final INodesInPath dstParentIIP; private final String src; private final String dst; private final INodeReference.WithCount withCount; private final int srcRefDstSnapshot; private final INodeDirectory srcParent; private final byte[] srcChildName; private final boolean isSrcInSnapshot; private final boolean srcChildIsReference; private final QuotaCounts oldSrcCounts; private INode srcChild; private INode oldDstChild; RenameOperation(FSDirectory fsd, String src, String dst, INodesInPath srcIIP, INodesInPath dstIIP) throws QuotaExceededException { this.fsd = fsd; this.src = src; this.dst = dst; this.srcIIP = srcIIP; this.dstIIP = dstIIP; this.srcParentIIP = srcIIP.getParentINodesInPath(); this.dstParentIIP = dstIIP.getParentINodesInPath(); BlockStoragePolicySuite bsps = fsd.getBlockStoragePolicySuite(); srcChild = this.srcIIP.getLastINode(); srcChildName = srcChild.getLocalNameBytes(); final int srcLatestSnapshotId = srcIIP.getLatestSnapshotId(); isSrcInSnapshot = srcChild.isInLatestSnapshot(srcLatestSnapshotId); srcChildIsReference = srcChild.isReference(); srcParent = this.srcIIP.getINode(-2).asDirectory(); // Record the snapshot on srcChild. After the rename, before any new // snapshot is taken on the dst tree, changes will be recorded in the // latest snapshot of the src tree. if (isSrcInSnapshot) { srcChild.recordModification(srcLatestSnapshotId); } // check srcChild for reference srcRefDstSnapshot = srcChildIsReference ? srcChild.asReference().getDstSnapshotId() : Snapshot.CURRENT_STATE_ID; oldSrcCounts = new QuotaCounts.Builder().build(); if (isSrcInSnapshot) { final INodeReference.WithName withName = srcParent .replaceChild4ReferenceWithName(srcChild, srcLatestSnapshotId); withCount = (INodeReference.WithCount) withName.getReferredINode(); srcChild = withName; this.srcIIP = INodesInPath.replace(srcIIP, srcIIP.length() - 1, srcChild); // get the counts before rename oldSrcCounts.add(withCount.getReferredINode().computeQuotaUsage(bsps)); } else if (srcChildIsReference) { // srcChild is reference but srcChild is not in latest snapshot withCount = (INodeReference.WithCount) srcChild.asReference() .getReferredINode(); } else { withCount = null; } } long removeSrc() throws IOException { long removedNum = fsd.removeLastINode(srcIIP); if (removedNum == -1) { String error = "Failed to rename " + src + " to " + dst + " because the source can not be removed"; NameNode.stateChangeLog.warn("DIR* FSDirRenameOp.unprotectedRenameTo:" + error); throw new IOException(error); } else { // update the quota count if necessary fsd.updateCountForDelete(srcChild, srcIIP); srcIIP = INodesInPath.replace(srcIIP, srcIIP.length() - 1, null); return removedNum; } } boolean removeSrc4OldRename() { final long removedSrc = fsd.removeLastINode(srcIIP); if (removedSrc == -1) { NameNode.stateChangeLog.warn("DIR* FSDirRenameOp.unprotectedRenameTo: " + "failed to rename " + src + " to " + dst + " because the source" + " can not be removed"); return false; } else { // update the quota count if necessary fsd.updateCountForDelete(srcChild, srcIIP); srcIIP = INodesInPath.replace(srcIIP, srcIIP.length() - 1, null); return true; } } long removeDst() { long removedNum = fsd.removeLastINode(dstIIP); if (removedNum != -1) { oldDstChild = dstIIP.getLastINode(); // update the quota count if necessary fsd.updateCountForDelete(oldDstChild, dstIIP); dstIIP = INodesInPath.replace(dstIIP, dstIIP.length() - 1, null); } return removedNum; } boolean addSourceToDestination() { final INode dstParent = dstParentIIP.getLastINode(); final byte[] dstChildName = dstIIP.getLastLocalName(); final INode toDst; if (withCount == null) { srcChild.setLocalName(dstChildName); toDst = srcChild; } else { withCount.getReferredINode().setLocalName(dstChildName); toDst = new INodeReference.DstReference(dstParent.asDirectory(), withCount, dstIIP.getLatestSnapshotId()); } return fsd.addLastINodeNoQuotaCheck(dstParentIIP, toDst) != null; } void updateMtimeAndLease(long timestamp) throws QuotaExceededException { srcParent.updateModificationTime(timestamp, srcIIP.getLatestSnapshotId()); final INode dstParent = dstParentIIP.getLastINode(); dstParent.updateModificationTime(timestamp, dstIIP.getLatestSnapshotId()); } void restoreSource() throws QuotaExceededException { // Rename failed - restore src final INode oldSrcChild = srcChild; // put it back if (withCount == null) { srcChild.setLocalName(srcChildName); } else if (!srcChildIsReference) { // src must be in snapshot // the withCount node will no longer be used thus no need to update // its reference number here srcChild = withCount.getReferredINode(); srcChild.setLocalName(srcChildName); } else { withCount.removeReference(oldSrcChild.asReference()); srcChild = new INodeReference.DstReference(srcParent, withCount, srcRefDstSnapshot); withCount.getReferredINode().setLocalName(srcChildName); } if (isSrcInSnapshot) { srcParent.undoRename4ScrParent(oldSrcChild.asReference(), srcChild); } else { // srcParent is not an INodeDirectoryWithSnapshot, we only need to add // the srcChild back fsd.addLastINodeNoQuotaCheck(srcParentIIP, srcChild); } } void restoreDst(BlockStoragePolicySuite bsps) throws QuotaExceededException { Preconditions.checkState(oldDstChild != null); final INodeDirectory dstParent = dstParentIIP.getLastINode().asDirectory(); if (dstParent.isWithSnapshot()) { dstParent.undoRename4DstParent(bsps, oldDstChild, dstIIP.getLatestSnapshotId()); } else { fsd.addLastINodeNoQuotaCheck(dstParentIIP, oldDstChild); } if (oldDstChild != null && oldDstChild.isReference()) { final INodeReference removedDstRef = oldDstChild.asReference(); final INodeReference.WithCount wc = (INodeReference.WithCount) removedDstRef.getReferredINode().asReference(); wc.addReference(removedDstRef); } } boolean cleanDst(BlockStoragePolicySuite bsps, BlocksMapUpdateInfo collectedBlocks) throws QuotaExceededException { Preconditions.checkState(oldDstChild != null); List<INode> removedINodes = new ChunkedArrayList<>(); List<Long> removedUCFiles = new ChunkedArrayList<>(); INode.ReclaimContext context = new INode.ReclaimContext(bsps, collectedBlocks, removedINodes, removedUCFiles); final boolean filesDeleted; if (!oldDstChild.isInLatestSnapshot(dstIIP.getLatestSnapshotId())) { oldDstChild.destroyAndCollectBlocks(context); filesDeleted = true; } else { oldDstChild.cleanSubtree(context, Snapshot.CURRENT_STATE_ID, dstIIP.getLatestSnapshotId()); filesDeleted = context.quotaDelta().getNsDelta() >= 0; } fsd.getFSNamesystem().removeLeasesAndINodes( removedUCFiles, removedINodes, false); return filesDeleted; } void updateQuotasInSourceTree(BlockStoragePolicySuite bsps) throws QuotaExceededException { // update the quota usage in src tree if (isSrcInSnapshot) { // get the counts after rename QuotaCounts newSrcCounts = srcChild.computeQuotaUsage(bsps, false); newSrcCounts.subtract(oldSrcCounts); srcParent.addSpaceConsumed(newSrcCounts, false); } } } static class RenameOldResult { final boolean success; final HdfsFileStatus auditStat; RenameOldResult(boolean success, HdfsFileStatus auditStat) { this.success = success; this.auditStat = auditStat; } } }
29,146
36.902471
98
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.namenode; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * Op codes for edits file */ @InterfaceAudience.Private @InterfaceStability.Unstable public enum FSEditLogOpCodes { // last op code in file OP_ADD ((byte) 0), OP_RENAME_OLD ((byte) 1), // deprecated operation OP_DELETE ((byte) 2), OP_MKDIR ((byte) 3), OP_SET_REPLICATION ((byte) 4), @Deprecated OP_DATANODE_ADD ((byte) 5), // obsolete @Deprecated OP_DATANODE_REMOVE((byte) 6), // obsolete OP_SET_PERMISSIONS ((byte) 7), OP_SET_OWNER ((byte) 8), OP_CLOSE ((byte) 9), OP_SET_GENSTAMP_V1 ((byte) 10), OP_SET_NS_QUOTA ((byte) 11), // obsolete OP_CLEAR_NS_QUOTA ((byte) 12), // obsolete OP_TIMES ((byte) 13), // set atime, mtime OP_SET_QUOTA ((byte) 14), OP_RENAME ((byte) 15), // filecontext rename OP_CONCAT_DELETE ((byte) 16), // concat files OP_SYMLINK ((byte) 17), OP_GET_DELEGATION_TOKEN ((byte) 18), OP_RENEW_DELEGATION_TOKEN ((byte) 19), OP_CANCEL_DELEGATION_TOKEN ((byte) 20), OP_UPDATE_MASTER_KEY ((byte) 21), OP_REASSIGN_LEASE ((byte) 22), OP_END_LOG_SEGMENT ((byte) 23), OP_START_LOG_SEGMENT ((byte) 24), OP_UPDATE_BLOCKS ((byte) 25), OP_CREATE_SNAPSHOT ((byte) 26), OP_DELETE_SNAPSHOT ((byte) 27), OP_RENAME_SNAPSHOT ((byte) 28), OP_ALLOW_SNAPSHOT ((byte) 29), OP_DISALLOW_SNAPSHOT ((byte) 30), OP_SET_GENSTAMP_V2 ((byte) 31), OP_ALLOCATE_BLOCK_ID ((byte) 32), OP_ADD_BLOCK ((byte) 33), OP_ADD_CACHE_DIRECTIVE ((byte) 34), OP_REMOVE_CACHE_DIRECTIVE ((byte) 35), OP_ADD_CACHE_POOL ((byte) 36), OP_MODIFY_CACHE_POOL ((byte) 37), OP_REMOVE_CACHE_POOL ((byte) 38), OP_MODIFY_CACHE_DIRECTIVE ((byte) 39), OP_SET_ACL ((byte) 40), OP_ROLLING_UPGRADE_START ((byte) 41), OP_ROLLING_UPGRADE_FINALIZE ((byte) 42), OP_SET_XATTR ((byte) 43), OP_REMOVE_XATTR ((byte) 44), OP_SET_STORAGE_POLICY ((byte) 45), OP_TRUNCATE ((byte) 46), OP_APPEND ((byte) 47), OP_SET_QUOTA_BY_STORAGETYPE ((byte) 48), // Note that the current range of the valid OP code is 0~127 OP_INVALID ((byte) -1); private final byte opCode; /** * Constructor * * @param opCode byte value of constructed enum */ FSEditLogOpCodes(byte opCode) { this.opCode = opCode; } /** * return the byte value of the enum * * @return the byte value of the enum */ public byte getOpCode() { return opCode; } private static final FSEditLogOpCodes[] VALUES; static { byte max = 0; for (FSEditLogOpCodes code : FSEditLogOpCodes.values()) { if (code.getOpCode() > max) { max = code.getOpCode(); } } VALUES = new FSEditLogOpCodes[max + 1]; for (FSEditLogOpCodes code : FSEditLogOpCodes.values()) { if (code.getOpCode() >= 0) { VALUES[code.getOpCode()] = code; } } } /** * Converts byte to FSEditLogOpCodes enum value * * @param opCode get enum for this opCode * @return enum with byte value of opCode */ public static FSEditLogOpCodes fromByte(byte opCode) { if (opCode >= 0 && opCode < VALUES.length) { return VALUES[opCode]; } return opCode == -1 ? OP_INVALID : null; } }
4,698
34.330827
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AuditLogger.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.namenode; import java.net.InetAddress; import java.security.Principal; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; /** * Interface defining an audit logger. */ @InterfaceAudience.Public @InterfaceStability.Evolving public interface AuditLogger { /** * Called during initialization of the logger. * * @param conf The configuration object. */ void initialize(Configuration conf); /** * Called to log an audit event. * <p> * This method must return as quickly as possible, since it's called * in a critical section of the NameNode's operation. * * @param succeeded Whether authorization succeeded. * @param userName Name of the user executing the request. * @param addr Remote address of the request. * @param cmd The requested command. * @param src Path of affected source file. * @param dst Path of affected destination file (if any). * @param stat File information for operations that change the file's * metadata (permissions, owner, times, etc). */ void logAuditEvent(boolean succeeded, String userName, InetAddress addr, String cmd, String src, String dst, FileStatus stat); }
2,179
34.16129
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeResourceChecker.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.namenode; import java.io.File; import java.io.IOException; import java.net.URI; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.Map; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.DF; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.server.common.Util; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Collections2; import com.google.common.base.Predicate; /** * * NameNodeResourceChecker provides a method - * <code>hasAvailableDiskSpace</code> - which will return true if and only if * the NameNode has disk space available on all required volumes, and any volume * which is configured to be redundant. Volumes containing file system edits dirs * are added by default, and arbitrary extra volumes may be configured as well. */ @InterfaceAudience.Private public class NameNodeResourceChecker { private static final Log LOG = LogFactory.getLog(NameNodeResourceChecker.class.getName()); // Space (in bytes) reserved per volume. private final long duReserved; private final Configuration conf; private Map<String, CheckedVolume> volumes; private int minimumRedundantVolumes; @VisibleForTesting class CheckedVolume implements CheckableNameNodeResource { private DF df; private boolean required; private String volume; public CheckedVolume(File dirToCheck, boolean required) throws IOException { df = new DF(dirToCheck, conf); this.required = required; volume = df.getFilesystem(); } public String getVolume() { return volume; } @Override public boolean isRequired() { return required; } @Override public boolean isResourceAvailable() { long availableSpace = df.getAvailable(); if (LOG.isDebugEnabled()) { LOG.debug("Space available on volume '" + volume + "' is " + availableSpace); } if (availableSpace < duReserved) { LOG.warn("Space available on volume '" + volume + "' is " + availableSpace + ", which is below the configured reserved amount " + duReserved); return false; } else { return true; } } @Override public String toString() { return "volume: " + volume + " required: " + required + " resource available: " + isResourceAvailable(); } } /** * Create a NameNodeResourceChecker, which will check the edits dirs and any * additional dirs to check set in <code>conf</code>. */ public NameNodeResourceChecker(Configuration conf) throws IOException { this.conf = conf; volumes = new HashMap<String, CheckedVolume>(); duReserved = conf.getLong(DFSConfigKeys.DFS_NAMENODE_DU_RESERVED_KEY, DFSConfigKeys.DFS_NAMENODE_DU_RESERVED_DEFAULT); Collection<URI> extraCheckedVolumes = Util.stringCollectionAsURIs(conf .getTrimmedStringCollection(DFSConfigKeys.DFS_NAMENODE_CHECKED_VOLUMES_KEY)); Collection<URI> localEditDirs = Collections2.filter( FSNamesystem.getNamespaceEditsDirs(conf), new Predicate<URI>() { @Override public boolean apply(URI input) { if (input.getScheme().equals(NNStorage.LOCAL_URI_SCHEME)) { return true; } return false; } }); // Add all the local edits dirs, marking some as required if they are // configured as such. for (URI editsDirToCheck : localEditDirs) { addDirToCheck(editsDirToCheck, FSNamesystem.getRequiredNamespaceEditsDirs(conf).contains( editsDirToCheck)); } // All extra checked volumes are marked "required" for (URI extraDirToCheck : extraCheckedVolumes) { addDirToCheck(extraDirToCheck, true); } minimumRedundantVolumes = conf.getInt( DFSConfigKeys.DFS_NAMENODE_CHECKED_VOLUMES_MINIMUM_KEY, DFSConfigKeys.DFS_NAMENODE_CHECKED_VOLUMES_MINIMUM_DEFAULT); } /** * Add the volume of the passed-in directory to the list of volumes to check. * If <code>required</code> is true, and this volume is already present, but * is marked redundant, it will be marked required. If the volume is already * present but marked required then this method is a no-op. * * @param directoryToCheck * The directory whose volume will be checked for available space. */ private void addDirToCheck(URI directoryToCheck, boolean required) throws IOException { File dir = new File(directoryToCheck.getPath()); if (!dir.exists()) { throw new IOException("Missing directory "+dir.getAbsolutePath()); } CheckedVolume newVolume = new CheckedVolume(dir, required); CheckedVolume volume = volumes.get(newVolume.getVolume()); if (volume == null || !volume.isRequired()) { volumes.put(newVolume.getVolume(), newVolume); } } /** * Return true if disk space is available on at least one of the configured * redundant volumes, and all of the configured required volumes. * * @return True if the configured amount of disk space is available on at * least one redundant volume and all of the required volumes, false * otherwise. */ public boolean hasAvailableDiskSpace() { return NameNodeResourcePolicy.areResourcesAvailable(volumes.values(), minimumRedundantVolumes); } /** * Return the set of directories which are low on space. * * @return the set of directories whose free space is below the threshold. */ @VisibleForTesting Collection<String> getVolumesLowOnSpace() throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("Going to check the following volumes disk space: " + volumes); } Collection<String> lowVolumes = new ArrayList<String>(); for (CheckedVolume volume : volumes.values()) { lowVolumes.add(volume.getVolume()); } return lowVolumes; } @VisibleForTesting void setVolumes(Map<String, CheckedVolume> volumes) { this.volumes = volumes; } @VisibleForTesting void setMinimumReduntdantVolumes(int minimumRedundantVolumes) { this.minimumRedundantVolumes = minimumRedundantVolumes; } }
7,299
33.433962
92
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.namenode; import java.io.DataOutputStream; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; import java.io.RandomAccessFile; import java.nio.ByteBuffer; import java.nio.channels.FileChannel; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.protocol.LayoutFlags; import org.apache.hadoop.io.IOUtils; import com.google.common.annotations.VisibleForTesting; /** * An implementation of the abstract class {@link EditLogOutputStream}, which * stores edits in a local file. */ @InterfaceAudience.Private public class EditLogFileOutputStream extends EditLogOutputStream { private static final Log LOG = LogFactory.getLog(EditLogFileOutputStream.class); public static final int MIN_PREALLOCATION_LENGTH = 1024 * 1024; private File file; private FileOutputStream fp; // file stream for storing edit logs private FileChannel fc; // channel of the file stream for sync private EditsDoubleBuffer doubleBuf; static final ByteBuffer fill = ByteBuffer.allocateDirect(MIN_PREALLOCATION_LENGTH); private boolean shouldSyncWritesAndSkipFsync = false; private static boolean shouldSkipFsyncForTests = false; static { fill.position(0); for (int i = 0; i < fill.capacity(); i++) { fill.put(FSEditLogOpCodes.OP_INVALID.getOpCode()); } } /** * Creates output buffers and file object. * * @param conf * Configuration object * @param name * File name to store edit log * @param size * Size of flush buffer * @throws IOException */ public EditLogFileOutputStream(Configuration conf, File name, int size) throws IOException { super(); shouldSyncWritesAndSkipFsync = conf.getBoolean( DFSConfigKeys.DFS_NAMENODE_EDITS_NOEDITLOGCHANNELFLUSH, DFSConfigKeys.DFS_NAMENODE_EDITS_NOEDITLOGCHANNELFLUSH_DEFAULT); file = name; doubleBuf = new EditsDoubleBuffer(size); RandomAccessFile rp; if (shouldSyncWritesAndSkipFsync) { rp = new RandomAccessFile(name, "rws"); } else { rp = new RandomAccessFile(name, "rw"); } fp = new FileOutputStream(rp.getFD()); // open for append fc = rp.getChannel(); fc.position(fc.size()); } @Override public void write(FSEditLogOp op) throws IOException { doubleBuf.writeOp(op); } /** * Write a transaction to the stream. The serialization format is: * <ul> * <li>the opcode (byte)</li> * <li>the transaction id (long)</li> * <li>the actual Writables for the transaction</li> * </ul> * */ @Override public void writeRaw(byte[] bytes, int offset, int length) throws IOException { doubleBuf.writeRaw(bytes, offset, length); } /** * Create empty edits logs file. */ @Override public void create(int layoutVersion) throws IOException { fc.truncate(0); fc.position(0); writeHeader(layoutVersion, doubleBuf.getCurrentBuf()); setReadyToFlush(); flush(); } /** * Write header information for this EditLogFileOutputStream to the provided * DataOutputSream. * * @param layoutVersion the LayoutVersion of the EditLog * @param out the output stream to write the header to. * @throws IOException in the event of error writing to the stream. */ @VisibleForTesting public static void writeHeader(int layoutVersion, DataOutputStream out) throws IOException { out.writeInt(layoutVersion); LayoutFlags.write(out); } @Override public void close() throws IOException { if (fp == null) { throw new IOException("Trying to use aborted output stream"); } try { // close should have been called after all pending transactions // have been flushed & synced. // if already closed, just skip if (doubleBuf != null) { doubleBuf.close(); doubleBuf = null; } // remove any preallocated padding bytes from the transaction log. if (fc != null && fc.isOpen()) { fc.truncate(fc.position()); fc.close(); fc = null; } fp.close(); fp = null; } finally { IOUtils.cleanup(LOG, fc, fp); doubleBuf = null; fc = null; fp = null; } fp = null; } @Override public void abort() throws IOException { if (fp == null) { return; } IOUtils.cleanup(LOG, fp); fp = null; } /** * All data that has been written to the stream so far will be flushed. New * data can be still written to the stream while flushing is performed. */ @Override public void setReadyToFlush() throws IOException { doubleBuf.setReadyToFlush(); } /** * Flush ready buffer to persistent store. currentBuffer is not flushed as it * accumulates new log records while readyBuffer will be flushed and synced. */ @Override public void flushAndSync(boolean durable) throws IOException { if (fp == null) { throw new IOException("Trying to use aborted output stream"); } if (doubleBuf.isFlushed()) { LOG.info("Nothing to flush"); return; } preallocate(); // preallocate file if necessary doubleBuf.flushTo(fp); if (durable && !shouldSkipFsyncForTests && !shouldSyncWritesAndSkipFsync) { fc.force(false); // metadata updates not needed } } /** * @return true if the number of buffered data exceeds the intial buffer size */ @Override public boolean shouldForceSync() { return doubleBuf.shouldForceSync(); } private void preallocate() throws IOException { long position = fc.position(); long size = fc.size(); int bufSize = doubleBuf.getReadyBuf().getLength(); long need = bufSize - (size - position); if (need <= 0) { return; } long oldSize = size; long total = 0; long fillCapacity = fill.capacity(); while (need > 0) { fill.position(0); IOUtils.writeFully(fc, fill, size); need -= fillCapacity; size += fillCapacity; total += fillCapacity; } if(LOG.isDebugEnabled()) { LOG.debug("Preallocated " + total + " bytes at the end of " + "the edit log (offset " + oldSize + ")"); } } /** * Returns the file associated with this stream. */ File getFile() { return file; } @Override public String toString() { return "EditLogFileOutputStream(" + file + ")"; } /** * @return true if this stream is currently open. */ public boolean isOpen() { return fp != null; } @VisibleForTesting public void setFileChannelForTesting(FileChannel fc) { this.fc = fc; } @VisibleForTesting public FileChannel getFileChannelForTesting() { return fc; } /** * For the purposes of unit tests, we don't need to actually * write durably to disk. So, we can skip the fsync() calls * for a speed improvement. * @param skip true if fsync should <em>not</em> be called */ @VisibleForTesting public static void setShouldSkipFsyncForTesting(boolean skip) { shouldSkipFsyncForTests = skip; } }
8,108
27.755319
85
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNDNCacheOp.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.namenode; import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries; import org.apache.hadoop.fs.CacheFlag; import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry; import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; import org.apache.hadoop.hdfs.protocol.CachePoolEntry; import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.security.AccessControlException; import java.io.IOException; import java.util.EnumSet; class FSNDNCacheOp { static CacheDirectiveInfo addCacheDirective( FSNamesystem fsn, CacheManager cacheManager, CacheDirectiveInfo directive, EnumSet<CacheFlag> flags, boolean logRetryCache) throws IOException { final FSPermissionChecker pc = getFsPermissionChecker(fsn); if (directive.getId() != null) { throw new IOException("addDirective: you cannot specify an ID " + "for this operation."); } CacheDirectiveInfo effectiveDirective = cacheManager.addDirective(directive, pc, flags); fsn.getEditLog().logAddCacheDirectiveInfo(effectiveDirective, logRetryCache); return effectiveDirective; } static void modifyCacheDirective( FSNamesystem fsn, CacheManager cacheManager, CacheDirectiveInfo directive, EnumSet<CacheFlag> flags, boolean logRetryCache) throws IOException { final FSPermissionChecker pc = getFsPermissionChecker(fsn); cacheManager.modifyDirective(directive, pc, flags); fsn.getEditLog().logModifyCacheDirectiveInfo(directive, logRetryCache); } static void removeCacheDirective( FSNamesystem fsn, CacheManager cacheManager, long id, boolean logRetryCache) throws IOException { final FSPermissionChecker pc = getFsPermissionChecker(fsn); cacheManager.removeDirective(id, pc); fsn.getEditLog().logRemoveCacheDirectiveInfo(id, logRetryCache); } static BatchedListEntries<CacheDirectiveEntry> listCacheDirectives( FSNamesystem fsn, CacheManager cacheManager, long startId, CacheDirectiveInfo filter) throws IOException { final FSPermissionChecker pc = getFsPermissionChecker(fsn); return cacheManager.listCacheDirectives(startId, filter, pc); } static CachePoolInfo addCachePool( FSNamesystem fsn, CacheManager cacheManager, CachePoolInfo req, boolean logRetryCache) throws IOException { final FSPermissionChecker pc = getFsPermissionChecker(fsn); if (pc != null) { pc.checkSuperuserPrivilege(); } CachePoolInfo info = cacheManager.addCachePool(req); fsn.getEditLog().logAddCachePool(info, logRetryCache); return info; } static void modifyCachePool( FSNamesystem fsn, CacheManager cacheManager, CachePoolInfo req, boolean logRetryCache) throws IOException { final FSPermissionChecker pc = getFsPermissionChecker(fsn); if (pc != null) { pc.checkSuperuserPrivilege(); } cacheManager.modifyCachePool(req); fsn.getEditLog().logModifyCachePool(req, logRetryCache); } static void removeCachePool( FSNamesystem fsn, CacheManager cacheManager, String cachePoolName, boolean logRetryCache) throws IOException { final FSPermissionChecker pc = getFsPermissionChecker(fsn); if (pc != null) { pc.checkSuperuserPrivilege(); } cacheManager.removeCachePool(cachePoolName); fsn.getEditLog().logRemoveCachePool(cachePoolName, logRetryCache); } static BatchedListEntries<CachePoolEntry> listCachePools( FSNamesystem fsn, CacheManager cacheManager, String prevKey) throws IOException { final FSPermissionChecker pc = getFsPermissionChecker(fsn); return cacheManager.listCachePools(pc, prevKey); } private static FSPermissionChecker getFsPermissionChecker(FSNamesystem fsn) throws AccessControlException { return fsn.isPermissionEnabled() ? fsn.getPermissionChecker() : null; } }
4,731
36.856
80
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/MetaRecoveryContext.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.namenode; import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; /** Context data for an ongoing NameNode metadata recovery process. */ @InterfaceAudience.Private @InterfaceStability.Evolving public final class MetaRecoveryContext { public static final Log LOG = LogFactory.getLog(MetaRecoveryContext.class.getName()); public final static int FORCE_NONE = 0; public final static int FORCE_FIRST_CHOICE = 1; public final static int FORCE_ALL = 2; private int force; /** Exception thrown when the user has requested processing to stop. */ static public class RequestStopException extends IOException { private static final long serialVersionUID = 1L; public RequestStopException(String msg) { super(msg); } } public MetaRecoveryContext(int force) { this.force = force; } /** * Display a prompt to the user and get his or her choice. * * @param prompt The prompt to display * @param default First choice (will be taken if autoChooseDefault is * true) * @param choices Other choies * * @return The choice that was taken * @throws IOException */ public String ask(String prompt, String firstChoice, String... choices) throws IOException { while (true) { System.err.print(prompt); if (force > FORCE_NONE) { System.out.println("automatically choosing " + firstChoice); return firstChoice; } StringBuilder responseBuilder = new StringBuilder(); while (true) { int c = System.in.read(); if (c == -1 || c == '\r' || c == '\n') { break; } responseBuilder.append((char)c); } String response = responseBuilder.toString(); if (response.equalsIgnoreCase(firstChoice)) return firstChoice; for (String c : choices) { if (response.equalsIgnoreCase(c)) { return c; } } System.err.print("I'm sorry, I cannot understand your response.\n"); } } public static void editLogLoaderPrompt(String prompt, MetaRecoveryContext recovery, String contStr) throws IOException, RequestStopException { if (recovery == null) { throw new IOException(prompt); } LOG.error(prompt); String answer = recovery.ask("\nEnter 'c' to continue, " + contStr + "\n" + "Enter 's' to stop reading the edit log here, abandoning any later " + "edits\n" + "Enter 'q' to quit without saving\n" + "Enter 'a' to always select the first choice in the future " + "without prompting. " + "(c/s/q/a)\n", "c", "s", "q", "a"); if (answer.equals("c")) { LOG.info("Continuing"); return; } else if (answer.equals("s")) { throw new RequestStopException("user requested stop"); } else if (answer.equals("q")) { recovery.quit(); } else { recovery.setForce(FORCE_FIRST_CHOICE); return; } } /** Log a message and quit */ public void quit() { LOG.error("Exiting on user request."); System.exit(0); } public int getForce() { return this.force; } public void setForce(int force) { this.force = force; } }
4,233
31.320611
87
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryServlet.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.namenode; import java.io.IOException; import java.io.PrintWriter; import java.security.PrivilegedExceptionAction; import javax.servlet.ServletException; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.server.common.JspHelper; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.ServletUtil; import org.znerd.xmlenc.XMLOutputter; /** Servlets for file checksum */ @InterfaceAudience.Private public class ContentSummaryServlet extends DfsServlet { /** For java.io.Serializable */ private static final long serialVersionUID = 1L; @Override public void doGet(final HttpServletRequest request, final HttpServletResponse response) throws ServletException, IOException { final Configuration conf = (Configuration) getServletContext().getAttribute(JspHelper.CURRENT_CONF); final UserGroupInformation ugi = getUGI(request, conf); try { ugi.doAs(new PrivilegedExceptionAction<Void>() { @Override public Void run() throws Exception { final String path = ServletUtil.getDecodedPath(request, "/contentSummary"); final PrintWriter out = response.getWriter(); final XMLOutputter xml = new XMLOutputter(out, "UTF-8"); xml.declaration(); try { //get content summary final ClientProtocol nnproxy = createNameNodeProxy(); final ContentSummary cs = nnproxy.getContentSummary(path); //write xml xml.startTag(ContentSummary.class.getName()); if (cs != null) { xml.attribute("length" , "" + cs.getLength()); xml.attribute("fileCount" , "" + cs.getFileCount()); xml.attribute("directoryCount", "" + cs.getDirectoryCount()); xml.attribute("quota" , "" + cs.getQuota()); xml.attribute("spaceConsumed" , "" + cs.getSpaceConsumed()); xml.attribute("spaceQuota" , "" + cs.getSpaceQuota()); } xml.endTag(); } catch(IOException ioe) { writeXml(ioe, path, xml); } xml.endDocument(); return null; } }); } catch (InterruptedException e) { throw new IOException(e); } } }
3,391
38.905882
85
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclStorage.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.namenode; import java.util.Collections; import java.util.List; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclEntryScope; import org.apache.hadoop.fs.permission.AclEntryType; import org.apache.hadoop.fs.permission.AclUtil; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.ScopedAclEntries; import org.apache.hadoop.hdfs.protocol.AclException; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.util.ReferenceCountMap; /** * AclStorage contains utility methods that define how ACL data is stored in the * namespace. * * If an inode has an ACL, then the ACL bit is set in the inode's * {@link FsPermission} and the inode also contains an {@link AclFeature}. For * the access ACL, the owner and other entries are identical to the owner and * other bits stored in FsPermission, so we reuse those. The access mask entry * is stored into the group permission bits of FsPermission. This is consistent * with other file systems' implementations of ACLs and eliminates the need for * special handling in various parts of the codebase. For example, if a user * calls chmod to change group permission bits on a file with an ACL, then the * expected behavior is to change the ACL's mask entry. By saving the mask entry * into the group permission bits, chmod continues to work correctly without * special handling. All remaining access entries (named users and named groups) * are stored as explicit {@link AclEntry} instances in a list inside the * AclFeature. Additionally, all default entries are stored in the AclFeature. * * The methods in this class encapsulate these rules for reading or writing the * ACL entries to the appropriate location. * * The methods in this class assume that input ACL entry lists have already been * validated and sorted according to the rules enforced by * {@link AclTransformation}. */ @InterfaceAudience.Private public final class AclStorage { private final static ReferenceCountMap<AclFeature> UNIQUE_ACL_FEATURES = new ReferenceCountMap<AclFeature>(); /** * If a default ACL is defined on a parent directory, then copies that default * ACL to a newly created child file or directory. * * @param child INode newly created child */ public static void copyINodeDefaultAcl(INode child) { INodeDirectory parent = child.getParent(); AclFeature parentAclFeature = parent.getAclFeature(); if (parentAclFeature == null || !(child.isFile() || child.isDirectory())) { return; } // Split parent's entries into access vs. default. List<AclEntry> featureEntries = getEntriesFromAclFeature(parent .getAclFeature()); ScopedAclEntries scopedEntries = new ScopedAclEntries(featureEntries); List<AclEntry> parentDefaultEntries = scopedEntries.getDefaultEntries(); // The parent may have an access ACL but no default ACL. If so, exit. if (parentDefaultEntries.isEmpty()) { return; } // Pre-allocate list size for access entries to copy from parent. List<AclEntry> accessEntries = Lists.newArrayListWithCapacity( parentDefaultEntries.size()); FsPermission childPerm = child.getFsPermission(); // Copy each default ACL entry from parent to new child's access ACL. boolean parentDefaultIsMinimal = AclUtil.isMinimalAcl(parentDefaultEntries); for (AclEntry entry: parentDefaultEntries) { AclEntryType type = entry.getType(); String name = entry.getName(); AclEntry.Builder builder = new AclEntry.Builder() .setScope(AclEntryScope.ACCESS) .setType(type) .setName(name); // The child's initial permission bits are treated as the mode parameter, // which can filter copied permission values for owner, mask and other. final FsAction permission; if (type == AclEntryType.USER && name == null) { permission = entry.getPermission().and(childPerm.getUserAction()); } else if (type == AclEntryType.GROUP && parentDefaultIsMinimal) { // This only happens if the default ACL is a minimal ACL: exactly 3 // entries corresponding to owner, group and other. In this case, // filter the group permissions. permission = entry.getPermission().and(childPerm.getGroupAction()); } else if (type == AclEntryType.MASK) { // Group bits from mode parameter filter permission of mask entry. permission = entry.getPermission().and(childPerm.getGroupAction()); } else if (type == AclEntryType.OTHER) { permission = entry.getPermission().and(childPerm.getOtherAction()); } else { permission = entry.getPermission(); } builder.setPermission(permission); accessEntries.add(builder.build()); } // A new directory also receives a copy of the parent's default ACL. List<AclEntry> defaultEntries = child.isDirectory() ? parentDefaultEntries : Collections.<AclEntry>emptyList(); final FsPermission newPerm; if (!AclUtil.isMinimalAcl(accessEntries) || !defaultEntries.isEmpty()) { // Save the new ACL to the child. child.addAclFeature(createAclFeature(accessEntries, defaultEntries)); newPerm = createFsPermissionForExtendedAcl(accessEntries, childPerm); } else { // The child is receiving a minimal ACL. newPerm = createFsPermissionForMinimalAcl(accessEntries, childPerm); } child.setPermission(newPerm); } /** * Reads the existing extended ACL entries of an inode. This method returns * only the extended ACL entries stored in the AclFeature. If the inode does * not have an ACL, then this method returns an empty list. This method * supports querying by snapshot ID. * * @param inode INode to read * @param snapshotId int ID of snapshot to read * @return List<AclEntry> containing extended inode ACL entries */ public static List<AclEntry> readINodeAcl(INode inode, int snapshotId) { AclFeature f = inode.getAclFeature(snapshotId); return getEntriesFromAclFeature(f); } /** * Reads the existing extended ACL entries of an INodeAttribute object. * * @param inodeAttr INode to read * @return List<AclEntry> containing extended inode ACL entries */ public static List<AclEntry> readINodeAcl(INodeAttributes inodeAttr) { AclFeature f = inodeAttr.getAclFeature(); return getEntriesFromAclFeature(f); } /** * Build list of AclEntries from the AclFeature * @param aclFeature AclFeature * @return List of entries */ @VisibleForTesting static ImmutableList<AclEntry> getEntriesFromAclFeature(AclFeature aclFeature) { if (aclFeature == null) { return ImmutableList.<AclEntry> of(); } ImmutableList.Builder<AclEntry> b = new ImmutableList.Builder<AclEntry>(); for (int pos = 0, entry; pos < aclFeature.getEntriesSize(); pos++) { entry = aclFeature.getEntryAt(pos); b.add(AclEntryStatusFormat.toAclEntry(entry)); } return b.build(); } /** * Reads the existing ACL of an inode. This method always returns the full * logical ACL of the inode after reading relevant data from the inode's * {@link FsPermission} and {@link AclFeature}. Note that every inode * logically has an ACL, even if no ACL has been set explicitly. If the inode * does not have an extended ACL, then the result is a minimal ACL consising of * exactly 3 entries that correspond to the owner, group and other permissions. * This method always reads the inode's current state and does not support * querying by snapshot ID. This is because the method is intended to support * ACL modification APIs, which always apply a delta on top of current state. * * @param inode INode to read * @return List<AclEntry> containing all logical inode ACL entries */ public static List<AclEntry> readINodeLogicalAcl(INode inode) { FsPermission perm = inode.getFsPermission(); AclFeature f = inode.getAclFeature(); if (f == null) { return AclUtil.getMinimalAcl(perm); } final List<AclEntry> existingAcl; // Split ACL entries stored in the feature into access vs. default. List<AclEntry> featureEntries = getEntriesFromAclFeature(f); ScopedAclEntries scoped = new ScopedAclEntries(featureEntries); List<AclEntry> accessEntries = scoped.getAccessEntries(); List<AclEntry> defaultEntries = scoped.getDefaultEntries(); // Pre-allocate list size for the explicit entries stored in the feature // plus the 3 implicit entries (owner, group and other) from the permission // bits. existingAcl = Lists.newArrayListWithCapacity(featureEntries.size() + 3); if (!accessEntries.isEmpty()) { // Add owner entry implied from user permission bits. existingAcl.add(new AclEntry.Builder().setScope(AclEntryScope.ACCESS) .setType(AclEntryType.USER).setPermission(perm.getUserAction()) .build()); // Next add all named user and group entries taken from the feature. existingAcl.addAll(accessEntries); // Add mask entry implied from group permission bits. existingAcl.add(new AclEntry.Builder().setScope(AclEntryScope.ACCESS) .setType(AclEntryType.MASK).setPermission(perm.getGroupAction()) .build()); // Add other entry implied from other permission bits. existingAcl.add(new AclEntry.Builder().setScope(AclEntryScope.ACCESS) .setType(AclEntryType.OTHER).setPermission(perm.getOtherAction()) .build()); } else { // It's possible that there is a default ACL but no access ACL. In this // case, add the minimal access ACL implied by the permission bits. existingAcl.addAll(AclUtil.getMinimalAcl(perm)); } // Add all default entries after the access entries. existingAcl.addAll(defaultEntries); // The above adds entries in the correct order, so no need to sort here. return existingAcl; } /** * Updates an inode with a new ACL. This method takes a full logical ACL and * stores the entries to the inode's {@link FsPermission} and * {@link AclFeature}. * * @param inode INode to update * @param newAcl List<AclEntry> containing new ACL entries * @param snapshotId int latest snapshot ID of inode * @throws AclException if the ACL is invalid for the given inode * @throws QuotaExceededException if quota limit is exceeded */ public static void updateINodeAcl(INode inode, List<AclEntry> newAcl, int snapshotId) throws AclException, QuotaExceededException { assert newAcl.size() >= 3; FsPermission perm = inode.getFsPermission(); final FsPermission newPerm; if (!AclUtil.isMinimalAcl(newAcl)) { // This is an extended ACL. Split entries into access vs. default. ScopedAclEntries scoped = new ScopedAclEntries(newAcl); List<AclEntry> accessEntries = scoped.getAccessEntries(); List<AclEntry> defaultEntries = scoped.getDefaultEntries(); // Only directories may have a default ACL. if (!defaultEntries.isEmpty() && !inode.isDirectory()) { throw new AclException( "Invalid ACL: only directories may have a default ACL."); } // Attach entries to the feature. if (inode.getAclFeature() != null) { inode.removeAclFeature(snapshotId); } inode.addAclFeature(createAclFeature(accessEntries, defaultEntries), snapshotId); newPerm = createFsPermissionForExtendedAcl(accessEntries, perm); } else { // This is a minimal ACL. Remove the ACL feature if it previously had one. if (inode.getAclFeature() != null) { inode.removeAclFeature(snapshotId); } newPerm = createFsPermissionForMinimalAcl(newAcl, perm); } inode.setPermission(newPerm, snapshotId); } /** * There is no reason to instantiate this class. */ private AclStorage() { } /** * Creates an AclFeature from the given ACL entries. * * @param accessEntries List<AclEntry> access ACL entries * @param defaultEntries List<AclEntry> default ACL entries * @return AclFeature containing the required ACL entries */ private static AclFeature createAclFeature(List<AclEntry> accessEntries, List<AclEntry> defaultEntries) { // Pre-allocate list size for the explicit entries stored in the feature, // which is all entries minus the 3 entries implicitly stored in the // permission bits. List<AclEntry> featureEntries = Lists.newArrayListWithCapacity( (accessEntries.size() - 3) + defaultEntries.size()); // For the access ACL, the feature only needs to hold the named user and // group entries. For a correctly sorted ACL, these will be in a // predictable range. if (!AclUtil.isMinimalAcl(accessEntries)) { featureEntries.addAll( accessEntries.subList(1, accessEntries.size() - 2)); } // Add all default entries to the feature. featureEntries.addAll(defaultEntries); return new AclFeature(AclEntryStatusFormat.toInt(featureEntries)); } /** * Creates the new FsPermission for an inode that is receiving an extended * ACL, based on its access ACL entries. For a correctly sorted ACL, the * first entry is the owner and the last 2 entries are the mask and other * entries respectively. Also preserve sticky bit and toggle ACL bit on. * Note that this method intentionally copies the permissions of the mask * entry into the FsPermission group permissions. This is consistent with the * POSIX ACLs model, which presents the mask as the permissions of the group * class. * * @param accessEntries List<AclEntry> access ACL entries * @param existingPerm FsPermission existing permissions * @return FsPermission new permissions */ private static FsPermission createFsPermissionForExtendedAcl( List<AclEntry> accessEntries, FsPermission existingPerm) { return new FsPermission(accessEntries.get(0).getPermission(), accessEntries.get(accessEntries.size() - 2).getPermission(), accessEntries.get(accessEntries.size() - 1).getPermission(), existingPerm.getStickyBit()); } /** * Creates the new FsPermission for an inode that is receiving a minimal ACL, * based on its access ACL entries. For a correctly sorted ACL, the owner, * group and other permissions are in order. Also preserve sticky bit and * toggle ACL bit off. * * @param accessEntries List<AclEntry> access ACL entries * @param existingPerm FsPermission existing permissions * @return FsPermission new permissions */ private static FsPermission createFsPermissionForMinimalAcl( List<AclEntry> accessEntries, FsPermission existingPerm) { return new FsPermission(accessEntries.get(0).getPermission(), accessEntries.get(1).getPermission(), accessEntries.get(2).getPermission(), existingPerm.getStickyBit()); } @VisibleForTesting public static ReferenceCountMap<AclFeature> getUniqueAclFeatures() { return UNIQUE_ACL_FEATURES; } /** * Add reference for the said AclFeature * * @param aclFeature * @return Referenced AclFeature */ public static AclFeature addAclFeature(AclFeature aclFeature) { return UNIQUE_ACL_FEATURES.put(aclFeature); } /** * Remove reference to the AclFeature * * @param aclFeature */ public static void removeAclFeature(AclFeature aclFeature) { UNIQUE_ACL_FEATURES.remove(aclFeature); } }
16,667
40.462687
82
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.namenode; import java.io.IOException; import java.io.PrintWriter; import java.net.InetAddress; import java.security.PrivilegedExceptionAction; import java.util.Map; import javax.servlet.ServletContext; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.security.UserGroupInformation; /** * This class is used in Namesystem's web server to do fsck on namenode. */ @InterfaceAudience.Private public class FsckServlet extends DfsServlet { /** for java.io.Serializable */ private static final long serialVersionUID = 1L; /** Handle fsck request */ @Override public void doGet(HttpServletRequest request, HttpServletResponse response ) throws IOException { @SuppressWarnings("unchecked") final Map<String,String[]> pmap = request.getParameterMap(); final PrintWriter out = response.getWriter(); final InetAddress remoteAddress = InetAddress.getByName(request.getRemoteAddr()); final ServletContext context = getServletContext(); final Configuration conf = NameNodeHttpServer.getConfFromContext(context); final UserGroupInformation ugi = getUGI(request, conf); try { ugi.doAs(new PrivilegedExceptionAction<Object>() { @Override public Object run() throws Exception { NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context); final FSNamesystem namesystem = nn.getNamesystem(); final BlockManager bm = namesystem.getBlockManager(); final int totalDatanodes = namesystem.getNumberOfDatanodes(DatanodeReportType.LIVE); new NamenodeFsck(conf, nn, bm.getDatanodeManager().getNetworkTopology(), pmap, out, totalDatanodes, remoteAddress).fsck(); return null; } }); } catch (InterruptedException e) { response.sendError(400, e.getMessage()); } } }
3,043
37.531646
78
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/InotifyFSEditLogOpTranslator.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.namenode; import com.google.common.collect.Lists; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.inotify.Event; import org.apache.hadoop.hdfs.inotify.EventBatch; import org.apache.hadoop.hdfs.protocol.Block; import java.util.List; /** * Translates from edit log ops to inotify events. */ @InterfaceAudience.Private public class InotifyFSEditLogOpTranslator { private static long getSize(FSEditLogOp.AddCloseOp acOp) { long size = 0; for (Block b : acOp.getBlocks()) { size += b.getNumBytes(); } return size; } public static EventBatch translate(FSEditLogOp op) { switch(op.opCode) { case OP_ADD: FSEditLogOp.AddOp addOp = (FSEditLogOp.AddOp) op; if (addOp.blocks.length == 0) { // create return new EventBatch(op.txid, new Event[] { new Event.CreateEvent.Builder().path(addOp.path) .ctime(addOp.atime) .replication(addOp.replication) .ownerName(addOp.permissions.getUserName()) .groupName(addOp.permissions.getGroupName()) .perms(addOp.permissions.getPermission()) .overwrite(addOp.overwrite) .defaultBlockSize(addOp.blockSize) .iNodeType(Event.CreateEvent.INodeType.FILE).build() }); } else { // append return new EventBatch(op.txid, new Event[]{new Event.AppendEvent.Builder() .path(addOp.path) .build()}); } case OP_CLOSE: FSEditLogOp.CloseOp cOp = (FSEditLogOp.CloseOp) op; return new EventBatch(op.txid, new Event[] { new Event.CloseEvent(cOp.path, getSize(cOp), cOp.mtime) }); case OP_APPEND: FSEditLogOp.AppendOp appendOp = (FSEditLogOp.AppendOp) op; return new EventBatch(op.txid, new Event[] {new Event.AppendEvent .Builder().path(appendOp.path).newBlock(appendOp.newBlock).build()}); case OP_SET_REPLICATION: FSEditLogOp.SetReplicationOp setRepOp = (FSEditLogOp.SetReplicationOp) op; return new EventBatch(op.txid, new Event[] { new Event.MetadataUpdateEvent.Builder() .metadataType(Event.MetadataUpdateEvent.MetadataType.REPLICATION) .path(setRepOp.path) .replication(setRepOp.replication).build() }); case OP_CONCAT_DELETE: FSEditLogOp.ConcatDeleteOp cdOp = (FSEditLogOp.ConcatDeleteOp) op; List<Event> events = Lists.newArrayList(); events.add(new Event.AppendEvent.Builder() .path(cdOp.trg) .build()); for (String src : cdOp.srcs) { events.add(new Event.UnlinkEvent.Builder() .path(src) .timestamp(cdOp.timestamp) .build()); } events.add(new Event.CloseEvent(cdOp.trg, -1, cdOp.timestamp)); return new EventBatch(op.txid, events.toArray(new Event[0])); case OP_RENAME_OLD: FSEditLogOp.RenameOldOp rnOpOld = (FSEditLogOp.RenameOldOp) op; return new EventBatch(op.txid, new Event[] { new Event.RenameEvent.Builder() .srcPath(rnOpOld.src) .dstPath(rnOpOld.dst) .timestamp(rnOpOld.timestamp) .build() }); case OP_RENAME: FSEditLogOp.RenameOp rnOp = (FSEditLogOp.RenameOp) op; return new EventBatch(op.txid, new Event[] { new Event.RenameEvent.Builder() .srcPath(rnOp.src) .dstPath(rnOp.dst) .timestamp(rnOp.timestamp) .build() }); case OP_DELETE: FSEditLogOp.DeleteOp delOp = (FSEditLogOp.DeleteOp) op; return new EventBatch(op.txid, new Event[] { new Event.UnlinkEvent.Builder() .path(delOp.path) .timestamp(delOp.timestamp) .build() }); case OP_MKDIR: FSEditLogOp.MkdirOp mkOp = (FSEditLogOp.MkdirOp) op; return new EventBatch(op.txid, new Event[] { new Event.CreateEvent.Builder().path(mkOp.path) .ctime(mkOp.timestamp) .ownerName(mkOp.permissions.getUserName()) .groupName(mkOp.permissions.getGroupName()) .perms(mkOp.permissions.getPermission()) .iNodeType(Event.CreateEvent.INodeType.DIRECTORY).build() }); case OP_SET_PERMISSIONS: FSEditLogOp.SetPermissionsOp permOp = (FSEditLogOp.SetPermissionsOp) op; return new EventBatch(op.txid, new Event[] { new Event.MetadataUpdateEvent.Builder() .metadataType(Event.MetadataUpdateEvent.MetadataType.PERMS) .path(permOp.src) .perms(permOp.permissions).build() }); case OP_SET_OWNER: FSEditLogOp.SetOwnerOp ownOp = (FSEditLogOp.SetOwnerOp) op; return new EventBatch(op.txid, new Event[] { new Event.MetadataUpdateEvent.Builder() .metadataType(Event.MetadataUpdateEvent.MetadataType.OWNER) .path(ownOp.src) .ownerName(ownOp.username).groupName(ownOp.groupname).build() }); case OP_TIMES: FSEditLogOp.TimesOp timesOp = (FSEditLogOp.TimesOp) op; return new EventBatch(op.txid, new Event[] { new Event.MetadataUpdateEvent.Builder() .metadataType(Event.MetadataUpdateEvent.MetadataType.TIMES) .path(timesOp.path) .atime(timesOp.atime).mtime(timesOp.mtime).build() }); case OP_SYMLINK: FSEditLogOp.SymlinkOp symOp = (FSEditLogOp.SymlinkOp) op; return new EventBatch(op.txid, new Event[] { new Event.CreateEvent.Builder().path(symOp.path) .ctime(symOp.atime) .ownerName(symOp.permissionStatus.getUserName()) .groupName(symOp.permissionStatus.getGroupName()) .perms(symOp.permissionStatus.getPermission()) .symlinkTarget(symOp.value) .iNodeType(Event.CreateEvent.INodeType.SYMLINK).build() }); case OP_REMOVE_XATTR: FSEditLogOp.RemoveXAttrOp rxOp = (FSEditLogOp.RemoveXAttrOp) op; return new EventBatch(op.txid, new Event[] { new Event.MetadataUpdateEvent.Builder() .metadataType(Event.MetadataUpdateEvent.MetadataType.XATTRS) .path(rxOp.src) .xAttrs(rxOp.xAttrs) .xAttrsRemoved(true).build() }); case OP_SET_XATTR: FSEditLogOp.SetXAttrOp sxOp = (FSEditLogOp.SetXAttrOp) op; return new EventBatch(op.txid, new Event[] { new Event.MetadataUpdateEvent.Builder() .metadataType(Event.MetadataUpdateEvent.MetadataType.XATTRS) .path(sxOp.src) .xAttrs(sxOp.xAttrs) .xAttrsRemoved(false).build() }); case OP_SET_ACL: FSEditLogOp.SetAclOp saOp = (FSEditLogOp.SetAclOp) op; return new EventBatch(op.txid, new Event[] { new Event.MetadataUpdateEvent.Builder() .metadataType(Event.MetadataUpdateEvent.MetadataType.ACLS) .path(saOp.src) .acls(saOp.aclEntries).build() }); case OP_TRUNCATE: FSEditLogOp.TruncateOp tOp = (FSEditLogOp.TruncateOp) op; return new EventBatch(op.txid, new Event[] { new Event.TruncateEvent(tOp.src, tOp.newLength, tOp.timestamp) }); default: return null; } } }
7,920
41.132979
80
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.namenode; import java.io.BufferedInputStream; import java.io.DataInputStream; import java.io.EOFException; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; import java.net.HttpURLConnection; import java.net.URL; import java.security.PrivilegedExceptionAction; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.protocol.LayoutFlags; import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.EditLogValidation; import org.apache.hadoop.hdfs.server.namenode.TransferFsImage.HttpGetFailedException; import org.apache.hadoop.hdfs.web.URLConnectionFactory; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authentication.client.AuthenticationException; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.base.Throwables; /** * An implementation of the abstract class {@link EditLogInputStream}, which * reads edits from a file. That file may be either on the local disk or * accessible via a URL. */ @InterfaceAudience.Private public class EditLogFileInputStream extends EditLogInputStream { private final LogSource log; private final long firstTxId; private final long lastTxId; private final boolean isInProgress; private int maxOpSize; static private enum State { UNINIT, OPEN, CLOSED } private State state = State.UNINIT; private InputStream fStream = null; private int logVersion = 0; private FSEditLogOp.Reader reader = null; private FSEditLogLoader.PositionTrackingInputStream tracker = null; private DataInputStream dataIn = null; static final Log LOG = LogFactory.getLog(EditLogInputStream.class); /** * Open an EditLogInputStream for the given file. * The file is pretransactional, so has no txids * @param name filename to open * @throws LogHeaderCorruptException if the header is either missing or * appears to be corrupt/truncated * @throws IOException if an actual IO error occurs while reading the * header */ EditLogFileInputStream(File name) throws LogHeaderCorruptException, IOException { this(name, HdfsServerConstants.INVALID_TXID, HdfsServerConstants.INVALID_TXID, false); } /** * Open an EditLogInputStream for the given file. * @param name filename to open * @param firstTxId first transaction found in file * @param lastTxId last transaction id found in file */ public EditLogFileInputStream(File name, long firstTxId, long lastTxId, boolean isInProgress) { this(new FileLog(name), firstTxId, lastTxId, isInProgress); } /** * Open an EditLogInputStream for the given URL. * * @param connectionFactory * the URLConnectionFactory used to create the connection. * @param url * the url hosting the log * @param startTxId * the expected starting txid * @param endTxId * the expected ending txid * @param inProgress * whether the log is in-progress * @return a stream from which edits may be read */ public static EditLogInputStream fromUrl( URLConnectionFactory connectionFactory, URL url, long startTxId, long endTxId, boolean inProgress) { return new EditLogFileInputStream(new URLLog(connectionFactory, url), startTxId, endTxId, inProgress); } private EditLogFileInputStream(LogSource log, long firstTxId, long lastTxId, boolean isInProgress) { this.log = log; this.firstTxId = firstTxId; this.lastTxId = lastTxId; this.isInProgress = isInProgress; this.maxOpSize = DFSConfigKeys.DFS_NAMENODE_MAX_OP_SIZE_DEFAULT; } private void init(boolean verifyLayoutVersion) throws LogHeaderCorruptException, IOException { Preconditions.checkState(state == State.UNINIT); BufferedInputStream bin = null; try { fStream = log.getInputStream(); bin = new BufferedInputStream(fStream); tracker = new FSEditLogLoader.PositionTrackingInputStream(bin); dataIn = new DataInputStream(tracker); try { logVersion = readLogVersion(dataIn, verifyLayoutVersion); } catch (EOFException eofe) { throw new LogHeaderCorruptException("No header found in log"); } // We assume future layout will also support ADD_LAYOUT_FLAGS if (NameNodeLayoutVersion.supports( LayoutVersion.Feature.ADD_LAYOUT_FLAGS, logVersion) || logVersion < NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION) { try { LayoutFlags.read(dataIn); } catch (EOFException eofe) { throw new LogHeaderCorruptException("EOF while reading layout " + "flags from log"); } } reader = new FSEditLogOp.Reader(dataIn, tracker, logVersion); reader.setMaxOpSize(maxOpSize); state = State.OPEN; } finally { if (reader == null) { IOUtils.cleanup(LOG, dataIn, tracker, bin, fStream); state = State.CLOSED; } } } @Override public long getFirstTxId() { return firstTxId; } @Override public long getLastTxId() { return lastTxId; } @Override public String getName() { return log.getName(); } private FSEditLogOp nextOpImpl(boolean skipBrokenEdits) throws IOException { FSEditLogOp op = null; switch (state) { case UNINIT: try { init(true); } catch (Throwable e) { LOG.error("caught exception initializing " + this, e); if (skipBrokenEdits) { return null; } Throwables.propagateIfPossible(e, IOException.class); } Preconditions.checkState(state != State.UNINIT); return nextOpImpl(skipBrokenEdits); case OPEN: op = reader.readOp(skipBrokenEdits); if ((op != null) && (op.hasTransactionId())) { long txId = op.getTransactionId(); if ((txId >= lastTxId) && (lastTxId != HdfsServerConstants.INVALID_TXID)) { // // Sometimes, the NameNode crashes while it's writing to the // edit log. In that case, you can end up with an unfinalized edit log // which has some garbage at the end. // JournalManager#recoverUnfinalizedSegments will finalize these // unfinished edit logs, giving them a defined final transaction // ID. Then they will be renamed, so that any subsequent // readers will have this information. // // Since there may be garbage at the end of these "cleaned up" // logs, we want to be sure to skip it here if we've read everything // we were supposed to read out of the stream. // So we force an EOF on all subsequent reads. // long skipAmt = log.length() - tracker.getPos(); if (skipAmt > 0) { if (LOG.isDebugEnabled()) { LOG.debug("skipping " + skipAmt + " bytes at the end " + "of edit log '" + getName() + "': reached txid " + txId + " out of " + lastTxId); } tracker.clearLimit(); IOUtils.skipFully(tracker, skipAmt); } } } break; case CLOSED: break; // return null } return op; } @Override protected long scanNextOp() throws IOException { Preconditions.checkState(state == State.OPEN); FSEditLogOp cachedNext = getCachedOp(); return cachedNext == null ? reader.scanOp() : cachedNext.txid; } @Override protected FSEditLogOp nextOp() throws IOException { return nextOpImpl(false); } @Override protected FSEditLogOp nextValidOp() { try { return nextOpImpl(true); } catch (Throwable e) { LOG.error("nextValidOp: got exception while reading " + this, e); return null; } } @Override public int getVersion(boolean verifyVersion) throws IOException { if (state == State.UNINIT) { init(verifyVersion); } return logVersion; } @Override public long getPosition() { if (state == State.OPEN) { return tracker.getPos(); } else { return 0; } } @Override public void close() throws IOException { if (state == State.OPEN) { dataIn.close(); } state = State.CLOSED; } @Override public long length() throws IOException { // file size + size of both buffers return log.length(); } @Override public boolean isInProgress() { return isInProgress; } @Override public String toString() { return getName(); } static FSEditLogLoader.EditLogValidation validateEditLog(File file) throws IOException { EditLogFileInputStream in; try { in = new EditLogFileInputStream(file); in.getVersion(true); // causes us to read the header } catch (LogHeaderCorruptException e) { // If the header is malformed or the wrong value, this indicates a corruption LOG.warn("Log file " + file + " has no valid header", e); return new FSEditLogLoader.EditLogValidation(0, HdfsServerConstants.INVALID_TXID, true); } try { return FSEditLogLoader.validateEditLog(in); } finally { IOUtils.closeStream(in); } } static FSEditLogLoader.EditLogValidation scanEditLog(File file) throws IOException { EditLogFileInputStream in; try { in = new EditLogFileInputStream(file); // read the header, initialize the inputstream, but do not check the // layoutversion in.getVersion(false); } catch (LogHeaderCorruptException e) { LOG.warn("Log file " + file + " has no valid header", e); return new FSEditLogLoader.EditLogValidation(0, HdfsServerConstants.INVALID_TXID, true); } long lastPos = 0; long lastTxId = HdfsServerConstants.INVALID_TXID; long numValid = 0; try { while (true) { long txid = HdfsServerConstants.INVALID_TXID; lastPos = in.getPosition(); try { if ((txid = in.scanNextOp()) == HdfsServerConstants.INVALID_TXID) { break; } } catch (Throwable t) { FSImage.LOG.warn("Caught exception after scanning through " + numValid + " ops from " + in + " while determining its valid length. Position was " + lastPos, t); in.resync(); FSImage.LOG.warn("After resync, position is " + in.getPosition()); continue; } if (lastTxId == HdfsServerConstants.INVALID_TXID || txid > lastTxId) { lastTxId = txid; } numValid++; } return new EditLogValidation(lastPos, lastTxId, false); } finally { IOUtils.closeStream(in); } } /** * Read the header of fsedit log * @param in fsedit stream * @return the edit log version number * @throws IOException if error occurs */ @VisibleForTesting static int readLogVersion(DataInputStream in, boolean verifyLayoutVersion) throws IOException, LogHeaderCorruptException { int logVersion; try { logVersion = in.readInt(); } catch (EOFException eofe) { throw new LogHeaderCorruptException( "Reached EOF when reading log header"); } if (verifyLayoutVersion && (logVersion < HdfsServerConstants.NAMENODE_LAYOUT_VERSION || // future version logVersion > Storage.LAST_UPGRADABLE_LAYOUT_VERSION)) { // unsupported throw new LogHeaderCorruptException( "Unexpected version of the file system log file: " + logVersion + ". Current version = " + HdfsServerConstants.NAMENODE_LAYOUT_VERSION + "."); } return logVersion; } /** * Exception indicating that the header of an edits log file is * corrupted. This can be because the header is not present, * or because the header data is invalid (eg claims to be * over a newer version than the running NameNode) */ static class LogHeaderCorruptException extends IOException { private static final long serialVersionUID = 1L; private LogHeaderCorruptException(String msg) { super(msg); } } private interface LogSource { public InputStream getInputStream() throws IOException; public long length(); public String getName(); } private static class FileLog implements LogSource { private final File file; public FileLog(File file) { this.file = file; } @Override public InputStream getInputStream() throws IOException { return new FileInputStream(file); } @Override public long length() { return file.length(); } @Override public String getName() { return file.getPath(); } } private static class URLLog implements LogSource { private final URL url; private long advertisedSize = -1; private final static String CONTENT_LENGTH = "Content-Length"; private final URLConnectionFactory connectionFactory; private final boolean isSpnegoEnabled; public URLLog(URLConnectionFactory connectionFactory, URL url) { this.connectionFactory = connectionFactory; this.isSpnegoEnabled = UserGroupInformation.isSecurityEnabled(); this.url = url; } @Override public InputStream getInputStream() throws IOException { return SecurityUtil.doAsCurrentUser( new PrivilegedExceptionAction<InputStream>() { @Override public InputStream run() throws IOException { HttpURLConnection connection; try { connection = (HttpURLConnection) connectionFactory.openConnection(url, isSpnegoEnabled); } catch (AuthenticationException e) { throw new IOException(e); } if (connection.getResponseCode() != HttpURLConnection.HTTP_OK) { throw new HttpGetFailedException( "Fetch of " + url + " failed with status code " + connection.getResponseCode() + "\nResponse message:\n" + connection.getResponseMessage(), connection); } String contentLength = connection.getHeaderField(CONTENT_LENGTH); if (contentLength != null) { advertisedSize = Long.parseLong(contentLength); if (advertisedSize <= 0) { throw new IOException("Invalid " + CONTENT_LENGTH + " header: " + contentLength); } } else { throw new IOException(CONTENT_LENGTH + " header is not provided " + "by the server when trying to fetch " + url); } return connection.getInputStream(); } }); } @Override public long length() { return advertisedSize; } @Override public String getName() { return url.toString(); } } @Override public void setMaxOpSize(int maxOpSize) { this.maxOpSize = maxOpSize; if (reader != null) { reader.setMaxOpSize(maxOpSize); } } @Override public boolean isLocalLog() { return log instanceof FileLog; } }
16,660
31.28876
90
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.namenode; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.collect.Lists; import com.google.protobuf.InvalidProtocolBufferException; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.crypto.CipherSuite; import org.apache.hadoop.crypto.CryptoProtocolVersion; import org.apache.hadoop.fs.FileEncryptionInfo; import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.XAttr; import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.XAttrHelper; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.EncryptionZone; import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException; import org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos; import org.apache.hadoop.hdfs.protocolPB.PBHelper; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.util.ByteArray; import org.apache.hadoop.hdfs.util.EnumCounters; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.Closeable; import java.io.FileNotFoundException; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.EnumSet; import java.util.List; import java.util.Map; import java.util.concurrent.locks.ReentrantReadWriteLock; import static org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_QUOTA_BY_STORAGETYPE_ENABLED_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_QUOTA_BY_STORAGETYPE_ENABLED_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY; import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.CRYPTO_XATTR_ENCRYPTION_ZONE; import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.CRYPTO_XATTR_FILE_ENCRYPTION_INFO; import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.SECURITY_XATTR_UNREADABLE_BY_SUPERUSER; import static org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.CURRENT_STATE_ID; /** * Both FSDirectory and FSNamesystem manage the state of the namespace. * FSDirectory is a pure in-memory data structure, all of whose operations * happen entirely in memory. In contrast, FSNamesystem persists the operations * to the disk. * @see org.apache.hadoop.hdfs.server.namenode.FSNamesystem **/ @InterfaceAudience.Private public class FSDirectory implements Closeable { static final Logger LOG = LoggerFactory.getLogger(FSDirectory.class); private static INodeDirectory createRoot(FSNamesystem namesystem) { final INodeDirectory r = new INodeDirectory( INodeId.ROOT_INODE_ID, INodeDirectory.ROOT_NAME, namesystem.createFsOwnerPermissions(new FsPermission((short) 0755)), 0L); r.addDirectoryWithQuotaFeature( new DirectoryWithQuotaFeature.Builder(). nameSpaceQuota(DirectoryWithQuotaFeature.DEFAULT_NAMESPACE_QUOTA). storageSpaceQuota(DirectoryWithQuotaFeature.DEFAULT_STORAGE_SPACE_QUOTA). build()); r.addSnapshottableFeature(); r.setSnapshotQuota(0); return r; } @VisibleForTesting static boolean CHECK_RESERVED_FILE_NAMES = true; public final static String DOT_RESERVED_STRING = ".reserved"; public final static String DOT_RESERVED_PATH_PREFIX = Path.SEPARATOR + DOT_RESERVED_STRING; public final static byte[] DOT_RESERVED = DFSUtil.string2Bytes(DOT_RESERVED_STRING); private final static String RAW_STRING = "raw"; private final static byte[] RAW = DFSUtil.string2Bytes(RAW_STRING); public final static String DOT_INODES_STRING = ".inodes"; public final static byte[] DOT_INODES = DFSUtil.string2Bytes(DOT_INODES_STRING); INodeDirectory rootDir; private final FSNamesystem namesystem; private volatile boolean skipQuotaCheck = false; //skip while consuming edits private final int maxComponentLength; private final int maxDirItems; private final int lsLimit; // max list limit private final int contentCountLimit; // max content summary counts per run private final long contentSleepMicroSec; private final INodeMap inodeMap; // Synchronized by dirLock private long yieldCount = 0; // keep track of lock yield count. private final int inodeXAttrsLimit; //inode xattrs max limit // lock to protect the directory and BlockMap private final ReentrantReadWriteLock dirLock; private final boolean isPermissionEnabled; /** * Support for ACLs is controlled by a configuration flag. If the * configuration flag is false, then the NameNode will reject all * ACL-related operations. */ private final boolean aclsEnabled; private final boolean xattrsEnabled; private final int xattrMaxSize; // precision of access times. private final long accessTimePrecision; // whether setStoragePolicy is allowed. private final boolean storagePolicyEnabled; // whether quota by storage type is allowed private final boolean quotaByStorageTypeEnabled; private final String fsOwnerShortUserName; private final String supergroup; private final INodeId inodeId; private final FSEditLog editLog; private INodeAttributeProvider attributeProvider; public void setINodeAttributeProvider(INodeAttributeProvider provider) { attributeProvider = provider; } // utility methods to acquire and release read lock and write lock void readLock() { this.dirLock.readLock().lock(); } void readUnlock() { this.dirLock.readLock().unlock(); } void writeLock() { this.dirLock.writeLock().lock(); } void writeUnlock() { this.dirLock.writeLock().unlock(); } boolean hasWriteLock() { return this.dirLock.isWriteLockedByCurrentThread(); } boolean hasReadLock() { return this.dirLock.getReadHoldCount() > 0 || hasWriteLock(); } public int getReadHoldCount() { return this.dirLock.getReadHoldCount(); } public int getWriteHoldCount() { return this.dirLock.getWriteHoldCount(); } @VisibleForTesting public final EncryptionZoneManager ezManager; /** * Caches frequently used file names used in {@link INode} to reuse * byte[] objects and reduce heap usage. */ private final NameCache<ByteArray> nameCache; FSDirectory(FSNamesystem ns, Configuration conf) throws IOException { this.dirLock = new ReentrantReadWriteLock(true); // fair this.inodeId = new INodeId(); rootDir = createRoot(ns); inodeMap = INodeMap.newInstance(rootDir); this.isPermissionEnabled = conf.getBoolean( DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, DFSConfigKeys.DFS_PERMISSIONS_ENABLED_DEFAULT); this.fsOwnerShortUserName = UserGroupInformation.getCurrentUser().getShortUserName(); this.supergroup = conf.get( DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY, DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT); this.aclsEnabled = conf.getBoolean( DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_DEFAULT); LOG.info("ACLs enabled? " + aclsEnabled); this.xattrsEnabled = conf.getBoolean( DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_DEFAULT); LOG.info("XAttrs enabled? " + xattrsEnabled); this.xattrMaxSize = conf.getInt( DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY, DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_DEFAULT); Preconditions.checkArgument(xattrMaxSize >= 0, "Cannot set a negative value for the maximum size of an xattr (%s).", DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY); final String unlimited = xattrMaxSize == 0 ? " (unlimited)" : ""; LOG.info("Maximum size of an xattr: " + xattrMaxSize + unlimited); this.accessTimePrecision = conf.getLong( DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, DFS_NAMENODE_ACCESSTIME_PRECISION_DEFAULT); this.storagePolicyEnabled = conf.getBoolean(DFS_STORAGE_POLICY_ENABLED_KEY, DFS_STORAGE_POLICY_ENABLED_DEFAULT); this.quotaByStorageTypeEnabled = conf.getBoolean(DFS_QUOTA_BY_STORAGETYPE_ENABLED_KEY, DFS_QUOTA_BY_STORAGETYPE_ENABLED_DEFAULT); int configuredLimit = conf.getInt( DFSConfigKeys.DFS_LIST_LIMIT, DFSConfigKeys.DFS_LIST_LIMIT_DEFAULT); this.lsLimit = configuredLimit>0 ? configuredLimit : DFSConfigKeys.DFS_LIST_LIMIT_DEFAULT; this.contentCountLimit = conf.getInt( DFSConfigKeys.DFS_CONTENT_SUMMARY_LIMIT_KEY, DFSConfigKeys.DFS_CONTENT_SUMMARY_LIMIT_DEFAULT); this.contentSleepMicroSec = conf.getLong( DFSConfigKeys.DFS_CONTENT_SUMMARY_SLEEP_MICROSEC_KEY, DFSConfigKeys.DFS_CONTENT_SUMMARY_SLEEP_MICROSEC_DEFAULT); // filesystem limits this.maxComponentLength = conf.getInt( DFSConfigKeys.DFS_NAMENODE_MAX_COMPONENT_LENGTH_KEY, DFSConfigKeys.DFS_NAMENODE_MAX_COMPONENT_LENGTH_DEFAULT); this.maxDirItems = conf.getInt( DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY, DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_DEFAULT); this.inodeXAttrsLimit = conf.getInt( DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY, DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_DEFAULT); Preconditions.checkArgument(this.inodeXAttrsLimit >= 0, "Cannot set a negative limit on the number of xattrs per inode (%s).", DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY); // We need a maximum maximum because by default, PB limits message sizes // to 64MB. This means we can only store approximately 6.7 million entries // per directory, but let's use 6.4 million for some safety. final int MAX_DIR_ITEMS = 64 * 100 * 1000; Preconditions.checkArgument( maxDirItems > 0 && maxDirItems <= MAX_DIR_ITEMS, "Cannot set " + DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY + " to a value less than 1 or greater than " + MAX_DIR_ITEMS); int threshold = conf.getInt( DFSConfigKeys.DFS_NAMENODE_NAME_CACHE_THRESHOLD_KEY, DFSConfigKeys.DFS_NAMENODE_NAME_CACHE_THRESHOLD_DEFAULT); NameNode.LOG.info("Caching file names occuring more than " + threshold + " times"); nameCache = new NameCache<ByteArray>(threshold); namesystem = ns; this.editLog = ns.getEditLog(); ezManager = new EncryptionZoneManager(this, conf); } FSNamesystem getFSNamesystem() { return namesystem; } BlockManager getBlockManager() { return getFSNamesystem().getBlockManager(); } /** @return the root directory inode. */ public INodeDirectory getRoot() { return rootDir; } public BlockStoragePolicySuite getBlockStoragePolicySuite() { return getBlockManager().getStoragePolicySuite(); } boolean isPermissionEnabled() { return isPermissionEnabled; } boolean isAclsEnabled() { return aclsEnabled; } boolean isXattrsEnabled() { return xattrsEnabled; } int getXattrMaxSize() { return xattrMaxSize; } boolean isStoragePolicyEnabled() { return storagePolicyEnabled; } boolean isAccessTimeSupported() { return accessTimePrecision > 0; } long getAccessTimePrecision() { return accessTimePrecision; } boolean isQuotaByStorageTypeEnabled() { return quotaByStorageTypeEnabled; } int getLsLimit() { return lsLimit; } int getContentCountLimit() { return contentCountLimit; } long getContentSleepMicroSec() { return contentSleepMicroSec; } int getInodeXAttrsLimit() { return inodeXAttrsLimit; } FSEditLog getEditLog() { return editLog; } /** * Shutdown the filestore */ @Override public void close() throws IOException {} void markNameCacheInitialized() { writeLock(); try { nameCache.initialized(); } finally { writeUnlock(); } } boolean shouldSkipQuotaChecks() { return skipQuotaCheck; } /** Enable quota verification */ void enableQuotaChecks() { skipQuotaCheck = false; } /** Disable quota verification */ void disableQuotaChecks() { skipQuotaCheck = true; } /** * This is a wrapper for resolvePath(). If the path passed * is prefixed with /.reserved/raw, then it checks to ensure that the caller * has super user privileges. * * @param pc The permission checker used when resolving path. * @param path The path to resolve. * @param pathComponents path components corresponding to the path * @return if the path indicates an inode, return path after replacing up to * <inodeid> with the corresponding path of the inode, else the path * in {@code src} as is. If the path refers to a path in the "raw" * directory, return the non-raw pathname. * @throws FileNotFoundException * @throws AccessControlException */ String resolvePath(FSPermissionChecker pc, String path, byte[][] pathComponents) throws FileNotFoundException, AccessControlException { if (isReservedRawName(path) && isPermissionEnabled) { pc.checkSuperuserPrivilege(); } return resolvePath(path, pathComponents, this); } /** * @return true if the path is a non-empty directory; otherwise, return false. */ boolean isNonEmptyDirectory(INodesInPath inodesInPath) { readLock(); try { final INode inode = inodesInPath.getLastINode(); if (inode == null || !inode.isDirectory()) { //not found or not a directory return false; } final int s = inodesInPath.getPathSnapshotId(); return !inode.asDirectory().getChildrenList(s).isEmpty(); } finally { readUnlock(); } } /** * Check whether the filepath could be created * @throws SnapshotAccessControlException if path is in RO snapshot */ boolean isValidToCreate(String src, INodesInPath iip) throws SnapshotAccessControlException { String srcs = normalizePath(src); return srcs.startsWith("/") && !srcs.endsWith("/") && iip.getLastINode() == null; } /** * Check whether the path specifies a directory */ boolean isDir(String src) throws UnresolvedLinkException { src = normalizePath(src); readLock(); try { INode node = getINode(src, false); return node != null && node.isDirectory(); } finally { readUnlock(); } } /** Updates namespace, storagespace and typespaces consumed for all * directories until the parent directory of file represented by path. * * @param iip the INodesInPath instance containing all the INodes for * updating quota usage * @param nsDelta the delta change of namespace * @param ssDelta the delta change of storage space consumed without replication * @param replication the replication factor of the block consumption change * @throws QuotaExceededException if the new count violates any quota limit * @throws FileNotFoundException if path does not exist. */ void updateSpaceConsumed(INodesInPath iip, long nsDelta, long ssDelta, short replication) throws QuotaExceededException, FileNotFoundException, UnresolvedLinkException, SnapshotAccessControlException { writeLock(); try { if (iip.getLastINode() == null) { throw new FileNotFoundException("Path not found: " + iip.getPath()); } updateCount(iip, nsDelta, ssDelta, replication, true); } finally { writeUnlock(); } } public void updateCount(INodesInPath iip, INode.QuotaDelta quotaDelta, boolean check) throws QuotaExceededException { QuotaCounts counts = quotaDelta.getCountsCopy(); updateCount(iip, iip.length() - 1, counts.negation(), check); Map<INode, QuotaCounts> deltaInOtherPaths = quotaDelta.getUpdateMap(); for (Map.Entry<INode, QuotaCounts> entry : deltaInOtherPaths.entrySet()) { INodesInPath path = INodesInPath.fromINode(entry.getKey()); updateCount(path, path.length() - 1, entry.getValue().negation(), check); } for (Map.Entry<INodeDirectory, QuotaCounts> entry : quotaDelta.getQuotaDirMap().entrySet()) { INodeDirectory quotaDir = entry.getKey(); quotaDir.getDirectoryWithQuotaFeature().addSpaceConsumed2Cache( entry.getValue().negation()); } } /** * Update the quota usage after deletion. The quota update is only necessary * when image/edits have been loaded and the file/dir to be deleted is not * contained in snapshots. */ void updateCountForDelete(final INode inode, final INodesInPath iip) { if (getFSNamesystem().isImageLoaded() && !inode.isInLatestSnapshot(iip.getLatestSnapshotId())) { QuotaCounts counts = inode.computeQuotaUsage(getBlockStoragePolicySuite()); unprotectedUpdateCount(iip, iip.length() - 1, counts.negation()); } } /** * Update usage count without replication factor change */ void updateCount(INodesInPath iip, long nsDelta, long ssDelta, short replication, boolean checkQuota) throws QuotaExceededException { final INodeFile fileINode = iip.getLastINode().asFile(); EnumCounters<StorageType> typeSpaceDeltas = getStorageTypeDeltas(fileINode.getStoragePolicyID(), ssDelta, replication, replication);; updateCount(iip, iip.length() - 1, new QuotaCounts.Builder().nameSpace(nsDelta).storageSpace(ssDelta * replication). typeSpaces(typeSpaceDeltas).build(), checkQuota); } /** * Update usage count with replication factor change due to setReplication */ void updateCount(INodesInPath iip, long nsDelta, long ssDelta, short oldRep, short newRep, boolean checkQuota) throws QuotaExceededException { final INodeFile fileINode = iip.getLastINode().asFile(); EnumCounters<StorageType> typeSpaceDeltas = getStorageTypeDeltas(fileINode.getStoragePolicyID(), ssDelta, oldRep, newRep); updateCount(iip, iip.length() - 1, new QuotaCounts.Builder().nameSpace(nsDelta). storageSpace(ssDelta * (newRep - oldRep)). typeSpaces(typeSpaceDeltas).build(), checkQuota); } /** update count of each inode with quota * * @param iip inodes in a path * @param numOfINodes the number of inodes to update starting from index 0 * @param counts the count of space/namespace/type usage to be update * @param checkQuota if true then check if quota is exceeded * @throws QuotaExceededException if the new count violates any quota limit */ void updateCount(INodesInPath iip, int numOfINodes, QuotaCounts counts, boolean checkQuota) throws QuotaExceededException { assert hasWriteLock(); if (!namesystem.isImageLoaded()) { //still initializing. do not check or update quotas. return; } if (numOfINodes > iip.length()) { numOfINodes = iip.length(); } if (checkQuota && !skipQuotaCheck) { verifyQuota(iip, numOfINodes, counts, null); } unprotectedUpdateCount(iip, numOfINodes, counts); } /** * update quota of each inode and check to see if quota is exceeded. * See {@link #updateCount(INodesInPath, int, QuotaCounts, boolean)} */ void updateCountNoQuotaCheck(INodesInPath inodesInPath, int numOfINodes, QuotaCounts counts) { assert hasWriteLock(); try { updateCount(inodesInPath, numOfINodes, counts, false); } catch (QuotaExceededException e) { NameNode.LOG.error("BUG: unexpected exception ", e); } } /** * updates quota without verification * callers responsibility is to make sure quota is not exceeded */ static void unprotectedUpdateCount(INodesInPath inodesInPath, int numOfINodes, QuotaCounts counts) { for(int i=0; i < numOfINodes; i++) { if (inodesInPath.getINode(i).isQuotaSet()) { // a directory with quota inodesInPath.getINode(i).asDirectory().getDirectoryWithQuotaFeature() .addSpaceConsumed2Cache(counts); } } } public EnumCounters<StorageType> getStorageTypeDeltas(byte storagePolicyID, long dsDelta, short oldRep, short newRep) { EnumCounters<StorageType> typeSpaceDeltas = new EnumCounters<StorageType>(StorageType.class); // Storage type and its quota are only available when storage policy is set if (storagePolicyID != HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED) { BlockStoragePolicy storagePolicy = getBlockManager().getStoragePolicy(storagePolicyID); if (oldRep != newRep) { List<StorageType> oldChosenStorageTypes = storagePolicy.chooseStorageTypes(oldRep); for (StorageType t : oldChosenStorageTypes) { if (!t.supportTypeQuota()) { continue; } Preconditions.checkArgument(dsDelta > 0); typeSpaceDeltas.add(t, -dsDelta); } } List<StorageType> newChosenStorageTypes = storagePolicy.chooseStorageTypes(newRep); for (StorageType t : newChosenStorageTypes) { if (!t.supportTypeQuota()) { continue; } typeSpaceDeltas.add(t, dsDelta); } } return typeSpaceDeltas; } /** Return the name of the path represented by inodes at [0, pos] */ static String getFullPathName(INode[] inodes, int pos) { StringBuilder fullPathName = new StringBuilder(); if (inodes[0].isRoot()) { if (pos == 0) return Path.SEPARATOR; } else { fullPathName.append(inodes[0].getLocalName()); } for (int i=1; i<=pos; i++) { fullPathName.append(Path.SEPARATOR_CHAR).append(inodes[i].getLocalName()); } return fullPathName.toString(); } /** * @return the relative path of an inode from one of its ancestors, * represented by an array of inodes. */ private static INode[] getRelativePathINodes(INode inode, INode ancestor) { // calculate the depth of this inode from the ancestor int depth = 0; for (INode i = inode; i != null && !i.equals(ancestor); i = i.getParent()) { depth++; } INode[] inodes = new INode[depth]; // fill up the inodes in the path from this inode to root for (int i = 0; i < depth; i++) { if (inode == null) { NameNode.stateChangeLog.warn("Could not get full path." + " Corresponding file might have deleted already."); return null; } inodes[depth-i-1] = inode; inode = inode.getParent(); } return inodes; } private static INode[] getFullPathINodes(INode inode) { return getRelativePathINodes(inode, null); } /** Return the full path name of the specified inode */ static String getFullPathName(INode inode) { INode[] inodes = getFullPathINodes(inode); // inodes can be null only when its called without holding lock return inodes == null ? "" : getFullPathName(inodes, inodes.length - 1); } /** * Add the given child to the namespace. * @param existing the INodesInPath containing all the ancestral INodes * @param child the new INode to add * @return a new INodesInPath instance containing the new child INode. Null * if the adding fails. * @throws QuotaExceededException is thrown if it violates quota limit */ INodesInPath addINode(INodesInPath existing, INode child) throws QuotaExceededException, UnresolvedLinkException { cacheName(child); writeLock(); try { return addLastINode(existing, child, true); } finally { writeUnlock(); } } /** * Verify quota for adding or moving a new INode with required * namespace and storagespace to a given position. * * @param iip INodes corresponding to a path * @param pos position where a new INode will be added * @param deltas needed namespace, storagespace and storage types * @param commonAncestor Last node in inodes array that is a common ancestor * for a INode that is being moved from one location to the other. * Pass null if a node is not being moved. * @throws QuotaExceededException if quota limit is exceeded. */ static void verifyQuota(INodesInPath iip, int pos, QuotaCounts deltas, INode commonAncestor) throws QuotaExceededException { if (deltas.getNameSpace() <= 0 && deltas.getStorageSpace() <= 0 && deltas.getTypeSpaces().allLessOrEqual(0L)) { // if quota is being freed or not being consumed return; } // check existing components in the path for(int i = (pos > iip.length() ? iip.length(): pos) - 1; i >= 0; i--) { if (commonAncestor == iip.getINode(i)) { // Stop checking for quota when common ancestor is reached return; } final DirectoryWithQuotaFeature q = iip.getINode(i).asDirectory().getDirectoryWithQuotaFeature(); if (q != null) { // a directory with quota try { q.verifyQuota(deltas); } catch (QuotaExceededException e) { List<INode> inodes = iip.getReadOnlyINodes(); final String path = getFullPathName(inodes.toArray(new INode[inodes.size()]), i); e.setPathName(path); throw e; } } } } /** Verify if the inode name is legal. */ void verifyINodeName(byte[] childName) throws HadoopIllegalArgumentException { if (Arrays.equals(HdfsServerConstants.DOT_SNAPSHOT_DIR_BYTES, childName)) { String s = "\"" + HdfsConstants.DOT_SNAPSHOT_DIR + "\" is a reserved name."; if (!namesystem.isImageLoaded()) { s += " Please rename it before upgrade."; } throw new HadoopIllegalArgumentException(s); } } /** * Verify child's name for fs limit. * * @param childName byte[] containing new child name * @param parentPath String containing parent path * @throws PathComponentTooLongException child's name is too long. */ void verifyMaxComponentLength(byte[] childName, String parentPath) throws PathComponentTooLongException { if (maxComponentLength == 0) { return; } final int length = childName.length; if (length > maxComponentLength) { final PathComponentTooLongException e = new PathComponentTooLongException( maxComponentLength, length, parentPath, DFSUtil.bytes2String(childName)); if (namesystem.isImageLoaded()) { throw e; } else { // Do not throw if edits log is still being processed NameNode.LOG.error("ERROR in FSDirectory.verifyINodeName", e); } } } /** * Verify children size for fs limit. * * @throws MaxDirectoryItemsExceededException too many children. */ void verifyMaxDirItems(INodeDirectory parent, String parentPath) throws MaxDirectoryItemsExceededException { final int count = parent.getChildrenList(CURRENT_STATE_ID).size(); if (count >= maxDirItems) { final MaxDirectoryItemsExceededException e = new MaxDirectoryItemsExceededException(maxDirItems, count); if (namesystem.isImageLoaded()) { e.setPathName(parentPath); throw e; } else { // Do not throw if edits log is still being processed NameNode.LOG.error("FSDirectory.verifyMaxDirItems: " + e.getLocalizedMessage()); } } } /** * Add a child to the end of the path specified by INodesInPath. * @return an INodesInPath instance containing the new INode */ @VisibleForTesting public INodesInPath addLastINode(INodesInPath existing, INode inode, boolean checkQuota) throws QuotaExceededException { assert existing.getLastINode() != null && existing.getLastINode().isDirectory(); final int pos = existing.length(); // Disallow creation of /.reserved. This may be created when loading // editlog/fsimage during upgrade since /.reserved was a valid name in older // release. This may also be called when a user tries to create a file // or directory /.reserved. if (pos == 1 && existing.getINode(0) == rootDir && isReservedName(inode)) { throw new HadoopIllegalArgumentException( "File name \"" + inode.getLocalName() + "\" is reserved and cannot " + "be created. If this is during upgrade change the name of the " + "existing file or directory to another name before upgrading " + "to the new release."); } final INodeDirectory parent = existing.getINode(pos - 1).asDirectory(); // The filesystem limits are not really quotas, so this check may appear // odd. It's because a rename operation deletes the src, tries to add // to the dest, if that fails, re-adds the src from whence it came. // The rename code disables the quota when it's restoring to the // original location because a quota violation would cause the the item // to go "poof". The fs limits must be bypassed for the same reason. if (checkQuota) { final String parentPath = existing.getPath(); verifyMaxComponentLength(inode.getLocalNameBytes(), parentPath); verifyMaxDirItems(parent, parentPath); } // always verify inode name verifyINodeName(inode.getLocalNameBytes()); final QuotaCounts counts = inode.computeQuotaUsage(getBlockStoragePolicySuite()); updateCount(existing, pos, counts, checkQuota); boolean isRename = (inode.getParent() != null); boolean added; try { added = parent.addChild(inode, true, existing.getLatestSnapshotId()); } catch (QuotaExceededException e) { updateCountNoQuotaCheck(existing, pos, counts.negation()); throw e; } if (!added) { updateCountNoQuotaCheck(existing, pos, counts.negation()); return null; } else { if (!isRename) { AclStorage.copyINodeDefaultAcl(inode); } addToInodeMap(inode); } return INodesInPath.append(existing, inode, inode.getLocalNameBytes()); } INodesInPath addLastINodeNoQuotaCheck(INodesInPath existing, INode i) { try { return addLastINode(existing, i, false); } catch (QuotaExceededException e) { NameNode.LOG.warn("FSDirectory.addChildNoQuotaCheck - unexpected", e); } return null; } /** * Remove the last inode in the path from the namespace. * Note: the caller needs to update the ancestors' quota count. * * @return -1 for failing to remove; * 0 for removing a reference whose referred inode has other * reference nodes; * 1 otherwise. */ @VisibleForTesting public long removeLastINode(final INodesInPath iip) { final int latestSnapshot = iip.getLatestSnapshotId(); final INode last = iip.getLastINode(); final INodeDirectory parent = iip.getINode(-2).asDirectory(); if (!parent.removeChild(last, latestSnapshot)) { return -1; } return (!last.isInLatestSnapshot(latestSnapshot) && INodeReference.tryRemoveReference(last) > 0) ? 0 : 1; } static String normalizePath(String src) { if (src.length() > 1 && src.endsWith("/")) { src = src.substring(0, src.length() - 1); } return src; } @VisibleForTesting public long getYieldCount() { return yieldCount; } void addYieldCount(long value) { yieldCount += value; } public INodeMap getINodeMap() { return inodeMap; } /** * This method is always called with writeLock of FSDirectory held. */ public final void addToInodeMap(INode inode) { if (inode instanceof INodeWithAdditionalFields) { inodeMap.put(inode); if (!inode.isSymlink()) { final XAttrFeature xaf = inode.getXAttrFeature(); if (xaf != null) { final List<XAttr> xattrs = xaf.getXAttrs(); for (XAttr xattr : xattrs) { final String xaName = XAttrHelper.getPrefixName(xattr); if (CRYPTO_XATTR_ENCRYPTION_ZONE.equals(xaName)) { try { final HdfsProtos.ZoneEncryptionInfoProto ezProto = HdfsProtos.ZoneEncryptionInfoProto.parseFrom( xattr.getValue()); ezManager.unprotectedAddEncryptionZone(inode.getId(), PBHelper.convert(ezProto.getSuite()), PBHelper.convert(ezProto.getCryptoProtocolVersion()), ezProto.getKeyName()); } catch (InvalidProtocolBufferException e) { NameNode.LOG.warn("Error parsing protocol buffer of " + "EZ XAttr " + xattr.getName()); } } } } } } } /** * This method is always called with writeLock of FSDirectory held. */ public final void removeFromInodeMap(List<? extends INode> inodes) { if (inodes != null) { for (INode inode : inodes) { if (inode != null && inode instanceof INodeWithAdditionalFields) { inodeMap.remove(inode); ezManager.removeEncryptionZone(inode.getId()); } } } } /** * Get the inode from inodeMap based on its inode id. * @param id The given id * @return The inode associated with the given id */ public INode getInode(long id) { readLock(); try { return inodeMap.get(id); } finally { readUnlock(); } } @VisibleForTesting int getInodeMapSize() { return inodeMap.size(); } long totalInodes() { readLock(); try { return rootDir.getDirectoryWithQuotaFeature().getSpaceConsumed() .getNameSpace(); } finally { readUnlock(); } } /** * Reset the entire namespace tree. */ void reset() { writeLock(); try { rootDir = createRoot(getFSNamesystem()); inodeMap.clear(); addToInodeMap(rootDir); nameCache.reset(); inodeId.setCurrentValue(INodeId.LAST_RESERVED_ID); } finally { writeUnlock(); } } boolean isInAnEZ(INodesInPath iip) throws UnresolvedLinkException, SnapshotAccessControlException { readLock(); try { return ezManager.isInAnEZ(iip); } finally { readUnlock(); } } String getKeyName(INodesInPath iip) { readLock(); try { return ezManager.getKeyName(iip); } finally { readUnlock(); } } XAttr createEncryptionZone(String src, CipherSuite suite, CryptoProtocolVersion version, String keyName) throws IOException { writeLock(); try { return ezManager.createEncryptionZone(src, suite, version, keyName); } finally { writeUnlock(); } } EncryptionZone getEZForPath(INodesInPath iip) { readLock(); try { return ezManager.getEZINodeForPath(iip); } finally { readUnlock(); } } BatchedListEntries<EncryptionZone> listEncryptionZones(long prevId) throws IOException { readLock(); try { return ezManager.listEncryptionZones(prevId); } finally { readUnlock(); } } /** * Set the FileEncryptionInfo for an INode. */ void setFileEncryptionInfo(String src, FileEncryptionInfo info) throws IOException { // Make the PB for the xattr final HdfsProtos.PerFileEncryptionInfoProto proto = PBHelper.convertPerFileEncInfo(info); final byte[] protoBytes = proto.toByteArray(); final XAttr fileEncryptionAttr = XAttrHelper.buildXAttr(CRYPTO_XATTR_FILE_ENCRYPTION_INFO, protoBytes); final List<XAttr> xAttrs = Lists.newArrayListWithCapacity(1); xAttrs.add(fileEncryptionAttr); writeLock(); try { FSDirXAttrOp.unprotectedSetXAttrs(this, src, xAttrs, EnumSet.of(XAttrSetFlag.CREATE)); } finally { writeUnlock(); } } /** * This function combines the per-file encryption info (obtained * from the inode's XAttrs), and the encryption info from its zone, and * returns a consolidated FileEncryptionInfo instance. Null is returned * for non-encrypted files. * * @param inode inode of the file * @param snapshotId ID of the snapshot that * we want to get encryption info from * @param iip inodes in the path containing the file, passed in to * avoid obtaining the list of inodes again; if iip is * null then the list of inodes will be obtained again * @return consolidated file encryption info; null for non-encrypted files */ FileEncryptionInfo getFileEncryptionInfo(INode inode, int snapshotId, INodesInPath iip) throws IOException { if (!inode.isFile()) { return null; } readLock(); try { EncryptionZone encryptionZone = getEZForPath(iip); if (encryptionZone == null) { // not an encrypted file return null; } else if(encryptionZone.getPath() == null || encryptionZone.getPath().isEmpty()) { if (NameNode.LOG.isDebugEnabled()) { NameNode.LOG.debug("Encryption zone " + encryptionZone.getPath() + " does not have a valid path."); } } final CryptoProtocolVersion version = encryptionZone.getVersion(); final CipherSuite suite = encryptionZone.getSuite(); final String keyName = encryptionZone.getKeyName(); XAttr fileXAttr = FSDirXAttrOp.unprotectedGetXAttrByName(inode, snapshotId, CRYPTO_XATTR_FILE_ENCRYPTION_INFO); if (fileXAttr == null) { NameNode.LOG.warn("Could not find encryption XAttr for file " + iip.getPath() + " in encryption zone " + encryptionZone.getPath()); return null; } try { HdfsProtos.PerFileEncryptionInfoProto fileProto = HdfsProtos.PerFileEncryptionInfoProto.parseFrom( fileXAttr.getValue()); return PBHelper.convert(fileProto, suite, version, keyName); } catch (InvalidProtocolBufferException e) { throw new IOException("Could not parse file encryption info for " + "inode " + inode, e); } } finally { readUnlock(); } } static INode resolveLastINode(INodesInPath iip) throws FileNotFoundException { INode inode = iip.getLastINode(); if (inode == null) { throw new FileNotFoundException("cannot find " + iip.getPath()); } return inode; } /** * Caches frequently used file names to reuse file name objects and * reduce heap size. */ void cacheName(INode inode) { // Name is cached only for files if (!inode.isFile()) { return; } ByteArray name = new ByteArray(inode.getLocalNameBytes()); name = nameCache.put(name); if (name != null) { inode.setLocalName(name.getBytes()); } } void shutdown() { nameCache.reset(); inodeMap.clear(); } /** * Given an INode get all the path complents leading to it from the root. * If an Inode corresponding to C is given in /A/B/C, the returned * patch components will be {root, A, B, C}. * Note that this method cannot handle scenarios where the inode is in a * snapshot. */ public static byte[][] getPathComponents(INode inode) { List<byte[]> components = new ArrayList<byte[]>(); components.add(0, inode.getLocalNameBytes()); while(inode.getParent() != null) { components.add(0, inode.getParent().getLocalNameBytes()); inode = inode.getParent(); } return components.toArray(new byte[components.size()][]); } /** * @return path components for reserved path, else null. */ static byte[][] getPathComponentsForReservedPath(String src) { return !isReservedName(src) ? null : INode.getPathComponents(src); } /** Check if a given inode name is reserved */ public static boolean isReservedName(INode inode) { return CHECK_RESERVED_FILE_NAMES && Arrays.equals(inode.getLocalNameBytes(), DOT_RESERVED); } /** Check if a given path is reserved */ public static boolean isReservedName(String src) { return src.startsWith(DOT_RESERVED_PATH_PREFIX + Path.SEPARATOR); } static boolean isReservedRawName(String src) { return src.startsWith(DOT_RESERVED_PATH_PREFIX + Path.SEPARATOR + RAW_STRING); } /** * Resolve a /.reserved/... path to a non-reserved path. * <p/> * There are two special hierarchies under /.reserved/: * <p/> * /.reserved/.inodes/<inodeid> performs a path lookup by inodeid, * <p/> * /.reserved/raw/... returns the encrypted (raw) bytes of a file in an * encryption zone. For instance, if /ezone is an encryption zone, then * /ezone/a refers to the decrypted file and /.reserved/raw/ezone/a refers to * the encrypted (raw) bytes of /ezone/a. * <p/> * Pathnames in the /.reserved/raw directory that resolve to files not in an * encryption zone are equivalent to the corresponding non-raw path. Hence, * if /a/b/c refers to a file that is not in an encryption zone, then * /.reserved/raw/a/b/c is equivalent (they both refer to the same * unencrypted file). * * @param src path that is being processed * @param pathComponents path components corresponding to the path * @param fsd FSDirectory * @return if the path indicates an inode, return path after replacing up to * <inodeid> with the corresponding path of the inode, else the path * in {@code src} as is. If the path refers to a path in the "raw" * directory, return the non-raw pathname. * @throws FileNotFoundException if inodeid is invalid */ static String resolvePath(String src, byte[][] pathComponents, FSDirectory fsd) throws FileNotFoundException { final int nComponents = (pathComponents == null) ? 0 : pathComponents.length; if (nComponents <= 2) { return src; } if (!Arrays.equals(DOT_RESERVED, pathComponents[1])) { /* This is not a /.reserved/ path so do nothing. */ return src; } if (Arrays.equals(DOT_INODES, pathComponents[2])) { /* It's a /.reserved/.inodes path. */ if (nComponents > 3) { return resolveDotInodesPath(src, pathComponents, fsd); } else { return src; } } else if (Arrays.equals(RAW, pathComponents[2])) { /* It's /.reserved/raw so strip off the /.reserved/raw prefix. */ if (nComponents == 3) { return Path.SEPARATOR; } else { return constructRemainingPath("", pathComponents, 3); } } else { /* It's some sort of /.reserved/<unknown> path. Ignore it. */ return src; } } private static String resolveDotInodesPath(String src, byte[][] pathComponents, FSDirectory fsd) throws FileNotFoundException { final String inodeId = DFSUtil.bytes2String(pathComponents[3]); final long id; try { id = Long.parseLong(inodeId); } catch (NumberFormatException e) { throw new FileNotFoundException("Invalid inode path: " + src); } if (id == INodeId.ROOT_INODE_ID && pathComponents.length == 4) { return Path.SEPARATOR; } INode inode = fsd.getInode(id); if (inode == null) { throw new FileNotFoundException( "File for given inode path does not exist: " + src); } // Handle single ".." for NFS lookup support. if ((pathComponents.length > 4) && DFSUtil.bytes2String(pathComponents[4]).equals("..")) { INode parent = inode.getParent(); if (parent == null || parent.getId() == INodeId.ROOT_INODE_ID) { // inode is root, or its parent is root. return Path.SEPARATOR; } else { return parent.getFullPathName(); } } String path = ""; if (id != INodeId.ROOT_INODE_ID) { path = inode.getFullPathName(); } return constructRemainingPath(path, pathComponents, 4); } private static String constructRemainingPath(String pathPrefix, byte[][] pathComponents, int startAt) { StringBuilder path = new StringBuilder(pathPrefix); for (int i = startAt; i < pathComponents.length; i++) { path.append(Path.SEPARATOR).append( DFSUtil.bytes2String(pathComponents[i])); } if (NameNode.LOG.isDebugEnabled()) { NameNode.LOG.debug("Resolved path is " + path); } return path.toString(); } INode getINode4DotSnapshot(String src) throws UnresolvedLinkException { Preconditions.checkArgument( src.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR), "%s does not end with %s", src, HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR); final String dirPath = normalizePath(src.substring(0, src.length() - HdfsConstants.DOT_SNAPSHOT_DIR.length())); final INode node = this.getINode(dirPath); if (node != null && node.isDirectory() && node.asDirectory().isSnapshottable()) { return node; } return null; } INodesInPath getExistingPathINodes(byte[][] components) throws UnresolvedLinkException { return INodesInPath.resolve(rootDir, components, false); } /** * Get {@link INode} associated with the file / directory. */ public INodesInPath getINodesInPath4Write(String src) throws UnresolvedLinkException, SnapshotAccessControlException { return getINodesInPath4Write(src, true); } /** * Get {@link INode} associated with the file / directory. * @throws SnapshotAccessControlException if path is in RO snapshot */ public INode getINode4Write(String src) throws UnresolvedLinkException, SnapshotAccessControlException { return getINodesInPath4Write(src, true).getLastINode(); } /** @return the {@link INodesInPath} containing all inodes in the path. */ public INodesInPath getINodesInPath(String path, boolean resolveLink) throws UnresolvedLinkException { final byte[][] components = INode.getPathComponents(path); return INodesInPath.resolve(rootDir, components, resolveLink); } /** @return the last inode in the path. */ INode getINode(String path, boolean resolveLink) throws UnresolvedLinkException { return getINodesInPath(path, resolveLink).getLastINode(); } /** * Get {@link INode} associated with the file / directory. */ public INode getINode(String src) throws UnresolvedLinkException { return getINode(src, true); } /** * @return the INodesInPath of the components in src * @throws UnresolvedLinkException if symlink can't be resolved * @throws SnapshotAccessControlException if path is in RO snapshot */ INodesInPath getINodesInPath4Write(String src, boolean resolveLink) throws UnresolvedLinkException, SnapshotAccessControlException { final byte[][] components = INode.getPathComponents(src); INodesInPath inodesInPath = INodesInPath.resolve(rootDir, components, resolveLink); if (inodesInPath.isSnapshot()) { throw new SnapshotAccessControlException( "Modification on a read-only snapshot is disallowed"); } return inodesInPath; } FSPermissionChecker getPermissionChecker() throws AccessControlException { try { return getPermissionChecker(fsOwnerShortUserName, supergroup, NameNode.getRemoteUser()); } catch (IOException e) { throw new AccessControlException(e); } } @VisibleForTesting FSPermissionChecker getPermissionChecker(String fsOwner, String superGroup, UserGroupInformation ugi) throws AccessControlException { return new FSPermissionChecker( fsOwner, superGroup, ugi, attributeProvider == null ? DefaultINodeAttributesProvider.DEFAULT_PROVIDER : attributeProvider); } void checkOwner(FSPermissionChecker pc, INodesInPath iip) throws AccessControlException { checkPermission(pc, iip, true, null, null, null, null); } void checkPathAccess(FSPermissionChecker pc, INodesInPath iip, FsAction access) throws AccessControlException { checkPermission(pc, iip, false, null, null, access, null); } void checkParentAccess(FSPermissionChecker pc, INodesInPath iip, FsAction access) throws AccessControlException { checkPermission(pc, iip, false, null, access, null, null); } void checkAncestorAccess(FSPermissionChecker pc, INodesInPath iip, FsAction access) throws AccessControlException { checkPermission(pc, iip, false, access, null, null, null); } void checkTraverse(FSPermissionChecker pc, INodesInPath iip) throws AccessControlException { checkPermission(pc, iip, false, null, null, null, null); } /** * Check whether current user have permissions to access the path. For more * details of the parameters, see * {@link FSPermissionChecker#checkPermission}. */ void checkPermission(FSPermissionChecker pc, INodesInPath iip, boolean doCheckOwner, FsAction ancestorAccess, FsAction parentAccess, FsAction access, FsAction subAccess) throws AccessControlException { checkPermission(pc, iip, doCheckOwner, ancestorAccess, parentAccess, access, subAccess, false); } /** * Check whether current user have permissions to access the path. For more * details of the parameters, see * {@link FSPermissionChecker#checkPermission}. */ void checkPermission(FSPermissionChecker pc, INodesInPath iip, boolean doCheckOwner, FsAction ancestorAccess, FsAction parentAccess, FsAction access, FsAction subAccess, boolean ignoreEmptyDir) throws AccessControlException { if (!pc.isSuperUser()) { readLock(); try { pc.checkPermission(iip, doCheckOwner, ancestorAccess, parentAccess, access, subAccess, ignoreEmptyDir); } finally { readUnlock(); } } } void checkUnreadableBySuperuser( FSPermissionChecker pc, INode inode, int snapshotId) throws IOException { if (pc.isSuperUser()) { for (XAttr xattr : FSDirXAttrOp.getXAttrs(this, inode, snapshotId)) { if (XAttrHelper.getPrefixName(xattr). equals(SECURITY_XATTR_UNREADABLE_BY_SUPERUSER)) { throw new AccessControlException( "Access is denied for " + pc.getUser() + " since the superuser " + "is not allowed to perform this operation."); } } } } HdfsFileStatus getAuditFileInfo(INodesInPath iip) throws IOException { return (namesystem.isAuditEnabled() && namesystem.isExternalInvocation()) ? FSDirStatAndListingOp.getFileInfo(this, iip.getPath(), iip, false, false) : null; } /** * Verify that parent directory of src exists. */ void verifyParentDir(INodesInPath iip, String src) throws FileNotFoundException, ParentNotDirectoryException { Path parent = new Path(src).getParent(); if (parent != null) { final INode parentNode = iip.getINode(-2); if (parentNode == null) { throw new FileNotFoundException("Parent directory doesn't exist: " + parent); } else if (!parentNode.isDirectory() && !parentNode.isSymlink()) { throw new ParentNotDirectoryException("Parent path is not a directory: " + parent); } } } /** Allocate a new inode ID. */ long allocateNewInodeId() { return inodeId.nextValue(); } /** @return the last inode ID. */ public long getLastInodeId() { return inodeId.getCurrentValue(); } /** * Set the last allocated inode id when fsimage or editlog is loaded. */ void resetLastInodeId(long newValue) throws IOException { try { inodeId.skipTo(newValue); } catch(IllegalStateException ise) { throw new IOException(ise); } } /** Should only be used for tests to reset to any value */ void resetLastInodeIdWithoutChecking(long newValue) { inodeId.setCurrentValue(newValue); } INodeAttributes getAttributes(String fullPath, byte[] path, INode node, int snapshot) { INodeAttributes nodeAttrs = node; if (attributeProvider != null) { nodeAttrs = node.getSnapshotINode(snapshot); fullPath = fullPath + (fullPath.endsWith(Path.SEPARATOR) ? "" : Path.SEPARATOR) + DFSUtil.bytes2String(path); nodeAttrs = attributeProvider.getAttributes(fullPath, nodeAttrs); } else { nodeAttrs = node.getSnapshotINode(snapshot); } return nodeAttrs; } }
54,664
34.381877
110
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.namenode; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.server.namenode.INodeFile.HeaderFormat; import org.apache.hadoop.hdfs.server.namenode.XAttrFeature; /** * The attributes of a file. */ @InterfaceAudience.Private public interface INodeFileAttributes extends INodeAttributes { /** @return the file replication. */ public short getFileReplication(); /** @return preferred block size in bytes */ public long getPreferredBlockSize(); /** @return the header as a long. */ public long getHeaderLong(); public boolean metadataEquals(INodeFileAttributes other); public byte getLocalStoragePolicyID(); /** A copy of the inode file attributes */ public static class SnapshotCopy extends INodeAttributes.SnapshotCopy implements INodeFileAttributes { private final long header; public SnapshotCopy(byte[] name, PermissionStatus permissions, AclFeature aclFeature, long modificationTime, long accessTime, short replication, long preferredBlockSize, byte storagePolicyID, XAttrFeature xAttrsFeature) { super(name, permissions, aclFeature, modificationTime, accessTime, xAttrsFeature); header = HeaderFormat.toLong(preferredBlockSize, replication, storagePolicyID); } public SnapshotCopy(INodeFile file) { super(file); this.header = file.getHeaderLong(); } @Override public boolean isDirectory() { return false; } @Override public short getFileReplication() { return HeaderFormat.getReplication(header); } @Override public long getPreferredBlockSize() { return HeaderFormat.getPreferredBlockSize(header); } @Override public byte getLocalStoragePolicyID() { return HeaderFormat.getStoragePolicyID(header); } @Override public long getHeaderLong() { return header; } @Override public boolean metadataEquals(INodeFileAttributes other) { return other != null && getHeaderLong()== other.getHeaderLong() && getPermissionLong() == other.getPermissionLong() && getAclFeature() == other.getAclFeature() && getXAttrFeature() == other.getXAttrFeature(); } } }
3,154
31.864583
85
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.namenode; import java.io.IOException; import java.net.InetSocketAddress; import java.net.SocketTimeoutException; import java.net.URL; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.ha.ServiceFailedException; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.NameNodeProxies; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException; import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalProtocolService; import org.apache.hadoop.hdfs.protocolPB.JournalProtocolPB; import org.apache.hadoop.hdfs.protocolPB.JournalProtocolServerSideTranslatorPB; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.namenode.ha.HAState; import org.apache.hadoop.hdfs.server.protocol.FenceResponse; import org.apache.hadoop.hdfs.server.protocol.JournalInfo; import org.apache.hadoop.hdfs.server.protocol.JournalProtocol; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol; import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.StandbyException; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.UserGroupInformation; import com.google.protobuf.BlockingService; /** * BackupNode. * <p> * Backup node can play two roles. * <ol> * <li>{@link NamenodeRole#CHECKPOINT} node periodically creates checkpoints, * that is downloads image and edits from the active node, merges them, and * uploads the new image back to the active.</li> * <li>{@link NamenodeRole#BACKUP} node keeps its namespace in sync with the * active node, and periodically creates checkpoints by simply saving the * namespace image to local disk(s).</li> * </ol> */ @InterfaceAudience.Private public class BackupNode extends NameNode { private static final String BN_ADDRESS_NAME_KEY = DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY; private static final String BN_ADDRESS_DEFAULT = DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_DEFAULT; private static final String BN_HTTP_ADDRESS_NAME_KEY = DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY; private static final String BN_HTTP_ADDRESS_DEFAULT = DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_DEFAULT; private static final String BN_SERVICE_RPC_ADDRESS_KEY = DFSConfigKeys.DFS_NAMENODE_BACKUP_SERVICE_RPC_ADDRESS_KEY; private static final float BN_SAFEMODE_THRESHOLD_PCT_DEFAULT = 1.5f; private static final int BN_SAFEMODE_EXTENSION_DEFAULT = Integer.MAX_VALUE; /** Name-node proxy */ NamenodeProtocol namenode; /** Name-node RPC address */ String nnRpcAddress; /** Name-node HTTP address */ URL nnHttpAddress; /** Checkpoint manager */ Checkpointer checkpointManager; BackupNode(Configuration conf, NamenodeRole role) throws IOException { super(conf, role); } ///////////////////////////////////////////////////// // Common NameNode methods implementation for backup node. ///////////////////////////////////////////////////// @Override // NameNode protected InetSocketAddress getRpcServerAddress(Configuration conf) { String addr = conf.getTrimmed(BN_ADDRESS_NAME_KEY, BN_ADDRESS_DEFAULT); return NetUtils.createSocketAddr(addr); } @Override protected InetSocketAddress getServiceRpcServerAddress(Configuration conf) { String addr = conf.getTrimmed(BN_SERVICE_RPC_ADDRESS_KEY); if (addr == null || addr.isEmpty()) { return null; } return NetUtils.createSocketAddr(addr); } @Override // NameNode protected void setRpcServerAddress(Configuration conf, InetSocketAddress addr) { conf.set(BN_ADDRESS_NAME_KEY, NetUtils.getHostPortString(addr)); } @Override // Namenode protected void setRpcServiceServerAddress(Configuration conf, InetSocketAddress addr) { conf.set(BN_SERVICE_RPC_ADDRESS_KEY, NetUtils.getHostPortString(addr)); } @Override // NameNode protected InetSocketAddress getHttpServerAddress(Configuration conf) { assert getNameNodeAddress() != null : "rpcAddress should be calculated first"; String addr = conf.getTrimmed(BN_HTTP_ADDRESS_NAME_KEY, BN_HTTP_ADDRESS_DEFAULT); return NetUtils.createSocketAddr(addr); } @Override // NameNode protected void loadNamesystem(Configuration conf) throws IOException { conf.setFloat(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, BN_SAFEMODE_THRESHOLD_PCT_DEFAULT); conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, BN_SAFEMODE_EXTENSION_DEFAULT); BackupImage bnImage = new BackupImage(conf); this.namesystem = new FSNamesystem(conf, bnImage); namesystem.dir.disableQuotaChecks(); bnImage.setNamesystem(namesystem); bnImage.recoverCreateRead(); } @Override // NameNode protected void initialize(Configuration conf) throws IOException { // Trash is disabled in BackupNameNode, // but should be turned back on if it ever becomes active. conf.setLong(CommonConfigurationKeys.FS_TRASH_INTERVAL_KEY, CommonConfigurationKeys.FS_TRASH_INTERVAL_DEFAULT); NamespaceInfo nsInfo = handshake(conf); super.initialize(conf); namesystem.setBlockPoolId(nsInfo.getBlockPoolID()); if (false == namesystem.isInSafeMode()) { namesystem.setSafeMode(SafeModeAction.SAFEMODE_ENTER); } // Backup node should never do lease recovery, // therefore lease hard limit should never expire. namesystem.leaseManager.setLeasePeriod( HdfsServerConstants.LEASE_SOFTLIMIT_PERIOD, Long.MAX_VALUE); // register with the active name-node registerWith(nsInfo); // Checkpoint daemon should start after the rpc server started runCheckpointDaemon(conf); InetSocketAddress addr = getHttpAddress(); if (addr != null) { conf.set(BN_HTTP_ADDRESS_NAME_KEY, NetUtils.getHostPortString(getHttpAddress())); } } @Override protected NameNodeRpcServer createRpcServer(Configuration conf) throws IOException { return new BackupNodeRpcServer(conf, this); } @Override // NameNode public void stop() { if(checkpointManager != null) { // Prevent from starting a new checkpoint. // Checkpoints that has already been started may proceed until // the error reporting to the name-node is complete. // Checkpoint manager should not be interrupted yet because it will // close storage file channels and the checkpoint may fail with // ClosedByInterruptException. checkpointManager.shouldRun = false; } if(namenode != null && getRegistration() != null) { // Exclude this node from the list of backup streams on the name-node try { namenode.errorReport(getRegistration(), NamenodeProtocol.FATAL, "Shutting down."); } catch(IOException e) { LOG.error("Failed to report to name-node.", e); } } // Stop the RPC client if (namenode != null) { RPC.stopProxy(namenode); } namenode = null; // Stop the checkpoint manager if(checkpointManager != null) { checkpointManager.interrupt(); checkpointManager = null; } // Abort current log segment - otherwise the NN shutdown code // will close it gracefully, which is incorrect. getFSImage().getEditLog().abortCurrentLogSegment(); // Stop name-node threads super.stop(); } /* @Override */// NameNode public boolean setSafeMode(SafeModeAction action) throws IOException { throw new UnsupportedActionException("setSafeMode"); } static class BackupNodeRpcServer extends NameNodeRpcServer implements JournalProtocol { private BackupNodeRpcServer(Configuration conf, BackupNode nn) throws IOException { super(conf, nn); JournalProtocolServerSideTranslatorPB journalProtocolTranslator = new JournalProtocolServerSideTranslatorPB(this); BlockingService service = JournalProtocolService .newReflectiveBlockingService(journalProtocolTranslator); DFSUtil.addPBProtocol(conf, JournalProtocolPB.class, service, this.clientRpcServer); } /** * Verifies a journal request */ private void verifyJournalRequest(JournalInfo journalInfo) throws IOException { verifyLayoutVersion(journalInfo.getLayoutVersion()); String errorMsg = null; int expectedNamespaceID = namesystem.getNamespaceInfo().getNamespaceID(); if (journalInfo.getNamespaceId() != expectedNamespaceID) { errorMsg = "Invalid namespaceID in journal request - expected " + expectedNamespaceID + " actual " + journalInfo.getNamespaceId(); LOG.warn(errorMsg); throw new UnregisteredNodeException(journalInfo); } if (!journalInfo.getClusterId().equals(namesystem.getClusterId())) { errorMsg = "Invalid clusterId in journal request - expected " + journalInfo.getClusterId() + " actual " + namesystem.getClusterId(); LOG.warn(errorMsg); throw new UnregisteredNodeException(journalInfo); } } ///////////////////////////////////////////////////// // BackupNodeProtocol implementation for backup node. ///////////////////////////////////////////////////// @Override public void startLogSegment(JournalInfo journalInfo, long epoch, long txid) throws IOException { namesystem.checkOperation(OperationCategory.JOURNAL); verifyJournalRequest(journalInfo); getBNImage().namenodeStartedLogSegment(txid); } @Override public void journal(JournalInfo journalInfo, long epoch, long firstTxId, int numTxns, byte[] records) throws IOException { namesystem.checkOperation(OperationCategory.JOURNAL); verifyJournalRequest(journalInfo); getBNImage().journal(firstTxId, numTxns, records); } private BackupImage getBNImage() { return (BackupImage)nn.getFSImage(); } @Override public FenceResponse fence(JournalInfo journalInfo, long epoch, String fencerInfo) throws IOException { LOG.info("Fenced by " + fencerInfo + " with epoch " + epoch); throw new UnsupportedOperationException( "BackupNode does not support fence"); } } ////////////////////////////////////////////////////// boolean shouldCheckpointAtStartup() { FSImage fsImage = getFSImage(); if(isRole(NamenodeRole.CHECKPOINT)) { assert fsImage.getStorage().getNumStorageDirs() > 0; return ! fsImage.getStorage().getStorageDir(0).getVersionFile().exists(); } // BN always checkpoints on startup in order to get in sync with namespace return true; } private NamespaceInfo handshake(Configuration conf) throws IOException { // connect to name node InetSocketAddress nnAddress = NameNode.getServiceAddress(conf, true); this.namenode = NameNodeProxies.createNonHAProxy(conf, nnAddress, NamenodeProtocol.class, UserGroupInformation.getCurrentUser(), true).getProxy(); this.nnRpcAddress = NetUtils.getHostPortString(nnAddress); this.nnHttpAddress = DFSUtil.getInfoServer(nnAddress, conf, DFSUtil.getHttpClientScheme(conf)).toURL(); // get version and id info from the name-node NamespaceInfo nsInfo = null; while(!isStopRequested()) { try { nsInfo = handshake(namenode); break; } catch(SocketTimeoutException e) { // name-node is busy LOG.info("Problem connecting to server: " + nnAddress); try { Thread.sleep(1000); } catch (InterruptedException ie) { LOG.warn("Encountered exception ", e); } } } return nsInfo; } /** * Start a backup node daemon. */ private void runCheckpointDaemon(Configuration conf) throws IOException { checkpointManager = new Checkpointer(conf, this); checkpointManager.start(); } /** * Checkpoint.<br> * Tests may use it to initiate a checkpoint process. * @throws IOException */ void doCheckpoint() throws IOException { checkpointManager.doCheckpoint(); } /** * Register this backup node with the active name-node. * @param nsInfo namespace information * @throws IOException */ private void registerWith(NamespaceInfo nsInfo) throws IOException { BackupImage bnImage = (BackupImage)getFSImage(); NNStorage storage = bnImage.getStorage(); // verify namespaceID if (storage.getNamespaceID() == 0) { // new backup storage storage.setStorageInfo(nsInfo); storage.setBlockPoolID(nsInfo.getBlockPoolID()); storage.setClusterID(nsInfo.getClusterID()); } else { nsInfo.validateStorage(storage); } bnImage.initEditLog(StartupOption.REGULAR); setRegistration(); NamenodeRegistration nnReg = null; while(!isStopRequested()) { try { nnReg = namenode.registerSubordinateNamenode(getRegistration()); break; } catch(SocketTimeoutException e) { // name-node is busy LOG.info("Problem connecting to name-node: " + nnRpcAddress); try { Thread.sleep(1000); } catch (InterruptedException ie) { LOG.warn("Encountered exception ", e); } } } String msg = null; if(nnReg == null) // consider as a rejection msg = "Registration rejected by " + nnRpcAddress; else if(!nnReg.isRole(NamenodeRole.NAMENODE)) { msg = "Name-node " + nnRpcAddress + " is not active"; } if(msg != null) { msg += ". Shutting down."; LOG.error(msg); throw new IOException(msg); // stop the node } nnRpcAddress = nnReg.getAddress(); } // TODO: move to a common with DataNode util class private static NamespaceInfo handshake(NamenodeProtocol namenode) throws IOException, SocketTimeoutException { NamespaceInfo nsInfo; nsInfo = namenode.versionRequest(); // throws SocketTimeoutException String errorMsg = null; // verify build version if( ! nsInfo.getBuildVersion().equals( Storage.getBuildVersion())) { errorMsg = "Incompatible build versions: active name-node BV = " + nsInfo.getBuildVersion() + "; backup node BV = " + Storage.getBuildVersion(); LOG.error(errorMsg); throw new IOException(errorMsg); } assert HdfsServerConstants.NAMENODE_LAYOUT_VERSION == nsInfo.getLayoutVersion() : "Active and backup node layout versions must be the same. Expected: " + HdfsServerConstants.NAMENODE_LAYOUT_VERSION + " actual "+ nsInfo.getLayoutVersion(); return nsInfo; } @Override protected String getNameServiceId(Configuration conf) { return DFSUtil.getBackupNameServiceId(conf); } @Override protected HAState createHAState(StartupOption startOpt) { return new BackupState(); } @Override // NameNode protected NameNodeHAContext createHAContext() { return new BNHAContext(); } private class BNHAContext extends NameNodeHAContext { @Override // NameNodeHAContext public void checkOperation(OperationCategory op) throws StandbyException { if (op == OperationCategory.UNCHECKED || op == OperationCategory.CHECKPOINT) { return; } if (OperationCategory.JOURNAL != op && !(OperationCategory.READ == op && !isRole(NamenodeRole.CHECKPOINT))) { String msg = "Operation category " + op + " is not supported at " + getRole(); throw new StandbyException(msg); } } @Override // NameNodeHAContext public void prepareToStopStandbyServices() throws ServiceFailedException { } /** * Start services for BackupNode. * <p> * The following services should be muted * (not run or not pass any control commands to DataNodes) * on BackupNode: * {@link LeaseManager.Monitor} protected by SafeMode. * {@link BlockManager.ReplicationMonitor} protected by SafeMode. * {@link HeartbeatManager.Monitor} protected by SafeMode. * {@link DecommissionManager.Monitor} need to prohibit refreshNodes(). * {@link PendingReplicationBlocks.PendingReplicationMonitor} harmless, * because ReplicationMonitor is muted. */ @Override public void startActiveServices() throws IOException { try { namesystem.startActiveServices(); } catch (Throwable t) { doImmediateShutdown(t); } } @Override public void stopActiveServices() throws IOException { try { if (namesystem != null) { namesystem.stopActiveServices(); } } catch (Throwable t) { doImmediateShutdown(t); } } } }
18,184
36.572314
117
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageTransactionalStorageInspector.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.namenode; import java.io.File; import java.io.FileNotFoundException; import java.io.IOException; import java.util.ArrayList; import java.util.EnumSet; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Set; import java.util.SortedMap; import java.util.TreeMap; import java.util.TreeSet; import java.util.regex.Matcher; import java.util.regex.Pattern; import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile; import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; class FSImageTransactionalStorageInspector extends FSImageStorageInspector { public static final Log LOG = LogFactory.getLog( FSImageTransactionalStorageInspector.class); private boolean needToSave = false; private boolean isUpgradeFinalized = true; final List<FSImageFile> foundImages = new ArrayList<FSImageFile>(); private long maxSeenTxId = 0; private final List<Pattern> namePatterns = Lists.newArrayList(); FSImageTransactionalStorageInspector() { this(EnumSet.of(NameNodeFile.IMAGE)); } FSImageTransactionalStorageInspector(EnumSet<NameNodeFile> nnfs) { for (NameNodeFile nnf : nnfs) { Pattern pattern = Pattern.compile(nnf.getName() + "_(\\d+)"); namePatterns.add(pattern); } } private Matcher matchPattern(String name) { for (Pattern p : namePatterns) { Matcher m = p.matcher(name); if (m.matches()) { return m; } } return null; } @Override public void inspectDirectory(StorageDirectory sd) throws IOException { // Was the directory just formatted? if (!sd.getVersionFile().exists()) { LOG.info("No version file in " + sd.getRoot()); needToSave |= true; return; } // Check for a seen_txid file, which marks a minimum transaction ID that // must be included in our load plan. try { maxSeenTxId = Math.max(maxSeenTxId, NNStorage.readTransactionIdFile(sd)); } catch (IOException ioe) { LOG.warn("Unable to determine the max transaction ID seen by " + sd, ioe); return; } File currentDir = sd.getCurrentDir(); File filesInStorage[]; try { filesInStorage = FileUtil.listFiles(currentDir); } catch (IOException ioe) { LOG.warn("Unable to inspect storage directory " + currentDir, ioe); return; } for (File f : filesInStorage) { LOG.debug("Checking file " + f); String name = f.getName(); // Check for fsimage_* Matcher imageMatch = this.matchPattern(name); if (imageMatch != null) { if (sd.getStorageDirType().isOfType(NameNodeDirType.IMAGE)) { try { long txid = Long.parseLong(imageMatch.group(1)); foundImages.add(new FSImageFile(sd, f, txid)); } catch (NumberFormatException nfe) { LOG.error("Image file " + f + " has improperly formatted " + "transaction ID"); // skip } } else { LOG.warn("Found image file at " + f + " but storage directory is " + "not configured to contain images."); } } } // set finalized flag isUpgradeFinalized = isUpgradeFinalized && !sd.getPreviousDir().exists(); } @Override public boolean isUpgradeFinalized() { return isUpgradeFinalized; } /** * @return the image files that have the most recent associated * transaction IDs. If there are multiple storage directories which * contain equal images, we'll return them all. * * @throws FileNotFoundException if not images are found. */ @Override List<FSImageFile> getLatestImages() throws IOException { LinkedList<FSImageFile> ret = new LinkedList<FSImageFile>(); for (FSImageFile img : foundImages) { if (ret.isEmpty()) { ret.add(img); } else { FSImageFile cur = ret.getFirst(); if (cur.txId == img.txId) { ret.add(img); } else if (cur.txId < img.txId) { ret.clear(); ret.add(img); } } } if (ret.isEmpty()) { throw new FileNotFoundException("No valid image files found"); } return ret; } public List<FSImageFile> getFoundImages() { return ImmutableList.copyOf(foundImages); } @Override public boolean needToSave() { return needToSave; } @Override long getMaxSeenTxId() { return maxSeenTxId; } }
5,685
29.902174
80
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageUtil.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.namenode; import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; import java.io.RandomAccessFile; import java.util.Arrays; import org.apache.commons.io.Charsets; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature; import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.Loader; import org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary; import org.apache.hadoop.io.compress.CompressionCodec; @InterfaceAudience.Private public final class FSImageUtil { public static final byte[] MAGIC_HEADER = "HDFSIMG1".getBytes(Charsets.UTF_8); public static final int FILE_VERSION = 1; public static boolean checkFileFormat(RandomAccessFile file) throws IOException { if (file.length() < Loader.MINIMUM_FILE_LENGTH) return false; byte[] magic = new byte[MAGIC_HEADER.length]; file.readFully(magic); if (!Arrays.equals(MAGIC_HEADER, magic)) return false; return true; } public static FileSummary loadSummary(RandomAccessFile file) throws IOException { final int FILE_LENGTH_FIELD_SIZE = 4; long fileLength = file.length(); file.seek(fileLength - FILE_LENGTH_FIELD_SIZE); int summaryLength = file.readInt(); if (summaryLength <= 0) { throw new IOException("Negative length of the file"); } file.seek(fileLength - FILE_LENGTH_FIELD_SIZE - summaryLength); byte[] summaryBytes = new byte[summaryLength]; file.readFully(summaryBytes); FileSummary summary = FileSummary .parseDelimitedFrom(new ByteArrayInputStream(summaryBytes)); if (summary.getOndiskVersion() != FILE_VERSION) { throw new IOException("Unsupported file version " + summary.getOndiskVersion()); } if (!NameNodeLayoutVersion.supports(Feature.PROTOBUF_FORMAT, summary.getLayoutVersion())) { throw new IOException("Unsupported layout version " + summary.getLayoutVersion()); } return summary; } public static InputStream wrapInputStreamForCompression( Configuration conf, String codec, InputStream in) throws IOException { if (codec.isEmpty()) return in; FSImageCompression compression = FSImageCompression.createCompression( conf, codec); CompressionCodec imageCodec = compression.getImageCodec(); return imageCodec.createInputStream(in); } }
3,340
34.168421
76
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RetryStartFileException.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.namenode; import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; @InterfaceAudience.Private public class RetryStartFileException extends IOException { private static final long serialVersionUID = 1L; public RetryStartFileException() { super("Preconditions for creating a file failed because of a " + "transient error, retry create later."); } public RetryStartFileException(String s) { super(s); } }
1,308
34.378378
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.namenode; import java.io.IOException; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.RecoverLeaseOp; import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; import com.google.common.annotations.VisibleForTesting; /** * Helper class to perform truncate operation. */ final class FSDirTruncateOp { /** * Private constructor for preventing FSDirTruncateOp object creation. * Static-only class. */ private FSDirTruncateOp() {} /** * Truncate a file to a given size. * * @param fsn namespace * @param srcArg path name * @param newLength the target file size * @param clientName client name * @param clientMachine client machine info * @param mtime modified time * @param toRemoveBlocks to be removed blocks * @param pc permission checker to check fs permission * @return tuncate result * @throws IOException */ static TruncateResult truncate(final FSNamesystem fsn, final String srcArg, final long newLength, final String clientName, final String clientMachine, final long mtime, final BlocksMapUpdateInfo toRemoveBlocks, final FSPermissionChecker pc) throws IOException, UnresolvedLinkException { assert fsn.hasWriteLock(); FSDirectory fsd = fsn.getFSDirectory(); byte[][] pathComponents = FSDirectory .getPathComponentsForReservedPath(srcArg); final String src; final INodesInPath iip; final boolean onBlockBoundary; Block truncateBlock = null; fsd.writeLock(); try { src = fsd.resolvePath(pc, srcArg, pathComponents); iip = fsd.getINodesInPath4Write(src, true); if (fsd.isPermissionEnabled()) { fsd.checkPathAccess(pc, iip, FsAction.WRITE); } INodeFile file = INodeFile.valueOf(iip.getLastINode(), src); final BlockStoragePolicy lpPolicy = fsd.getBlockManager() .getStoragePolicy("LAZY_PERSIST"); if (lpPolicy != null && lpPolicy.getId() == file.getStoragePolicyID()) { throw new UnsupportedOperationException( "Cannot truncate lazy persist file " + src); } // Check if the file is already being truncated with the same length final BlockInfo last = file.getLastBlock(); if (last != null && last.getBlockUCState() == BlockUCState.UNDER_RECOVERY) { final Block truncatedBlock = ((BlockInfoContiguousUnderConstruction) last) .getTruncateBlock(); if (truncatedBlock != null) { final long truncateLength = file.computeFileSize(false, false) + truncatedBlock.getNumBytes(); if (newLength == truncateLength) { return new TruncateResult(false, fsd.getAuditFileInfo(iip)); } } } // Opening an existing file for truncate. May need lease recovery. fsn.recoverLeaseInternal(RecoverLeaseOp.TRUNCATE_FILE, iip, src, clientName, clientMachine, false); // Truncate length check. long oldLength = file.computeFileSize(); if (oldLength == newLength) { return new TruncateResult(true, fsd.getAuditFileInfo(iip)); } if (oldLength < newLength) { throw new HadoopIllegalArgumentException( "Cannot truncate to a larger file size. Current size: " + oldLength + ", truncate size: " + newLength + "."); } // Perform INodeFile truncation. final QuotaCounts delta = new QuotaCounts.Builder().build(); onBlockBoundary = unprotectedTruncate(fsn, iip, newLength, toRemoveBlocks, mtime, delta); if (!onBlockBoundary) { // Open file for write, but don't log into edits long lastBlockDelta = file.computeFileSize() - newLength; assert lastBlockDelta > 0 : "delta is 0 only if on block bounday"; truncateBlock = prepareFileForTruncate(fsn, iip, clientName, clientMachine, lastBlockDelta, null); } // update the quota: use the preferred block size for UC block fsd.updateCountNoQuotaCheck(iip, iip.length() - 1, delta); } finally { fsd.writeUnlock(); } fsn.getEditLog().logTruncate(src, clientName, clientMachine, newLength, mtime, truncateBlock); return new TruncateResult(onBlockBoundary, fsd.getAuditFileInfo(iip)); } /** * Unprotected truncate implementation. Unlike * {@link FSDirTruncateOp#truncate}, this will not schedule block recovery. * * @param fsn namespace * @param src path name * @param clientName client name * @param clientMachine client machine info * @param newLength the target file size * @param mtime modified time * @param truncateBlock truncate block * @throws IOException */ static void unprotectedTruncate(final FSNamesystem fsn, final String src, final String clientName, final String clientMachine, final long newLength, final long mtime, final Block truncateBlock) throws UnresolvedLinkException, QuotaExceededException, SnapshotAccessControlException, IOException { assert fsn.hasWriteLock(); FSDirectory fsd = fsn.getFSDirectory(); INodesInPath iip = fsd.getINodesInPath(src, true); INodeFile file = iip.getLastINode().asFile(); BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo(); boolean onBlockBoundary = unprotectedTruncate(fsn, iip, newLength, collectedBlocks, mtime, null); if (!onBlockBoundary) { BlockInfo oldBlock = file.getLastBlock(); Block tBlk = prepareFileForTruncate(fsn, iip, clientName, clientMachine, file.computeFileSize() - newLength, truncateBlock); assert Block.matchingIdAndGenStamp(tBlk, truncateBlock) && tBlk.getNumBytes() == truncateBlock.getNumBytes() : "Should be the same block."; if (oldBlock.getBlockId() != tBlk.getBlockId() && !file.isBlockInLatestSnapshot(oldBlock)) { fsd.getBlockManager().removeBlockFromMap(oldBlock); } } assert onBlockBoundary == (truncateBlock == null) : "truncateBlock is null iff on block boundary: " + truncateBlock; fsn.removeBlocksAndUpdateSafemodeTotal(collectedBlocks); } /** * Convert current INode to UnderConstruction. Recreate lease. Create new * block for the truncated copy. Schedule truncation of the replicas. * * @param fsn namespace * @param iip inodes in the path containing the file * @param leaseHolder lease holder * @param clientMachine client machine info * @param lastBlockDelta last block delta size * @param newBlock new block * @return the returned block will be written to editLog and passed back * into this method upon loading. * @throws IOException */ @VisibleForTesting static Block prepareFileForTruncate(FSNamesystem fsn, INodesInPath iip, String leaseHolder, String clientMachine, long lastBlockDelta, Block newBlock) throws IOException { assert fsn.hasWriteLock(); INodeFile file = iip.getLastINode().asFile(); file.recordModification(iip.getLatestSnapshotId()); file.toUnderConstruction(leaseHolder, clientMachine); assert file.isUnderConstruction() : "inode should be under construction."; fsn.getLeaseManager().addLease( file.getFileUnderConstructionFeature().getClientName(), file.getId()); boolean shouldRecoverNow = (newBlock == null); BlockInfo oldBlock = file.getLastBlock(); boolean shouldCopyOnTruncate = shouldCopyOnTruncate(fsn, file, oldBlock); if (newBlock == null) { newBlock = (shouldCopyOnTruncate) ? fsn.createNewBlock() : new Block( oldBlock.getBlockId(), oldBlock.getNumBytes(), fsn.nextGenerationStamp(fsn.getBlockIdManager().isLegacyBlock( oldBlock))); } BlockInfoContiguousUnderConstruction truncatedBlockUC; BlockManager blockManager = fsn.getFSDirectory().getBlockManager(); if (shouldCopyOnTruncate) { // Add new truncateBlock into blocksMap and // use oldBlock as a source for copy-on-truncate recovery truncatedBlockUC = new BlockInfoContiguousUnderConstruction(newBlock, file.getPreferredBlockReplication()); truncatedBlockUC.setNumBytes(oldBlock.getNumBytes() - lastBlockDelta); truncatedBlockUC.setTruncateBlock(oldBlock); file.setLastBlock(truncatedBlockUC, blockManager.getStorages(oldBlock)); blockManager.addBlockCollection(truncatedBlockUC, file); NameNode.stateChangeLog.debug( "BLOCK* prepareFileForTruncate: Scheduling copy-on-truncate to new" + " size {} new block {} old block {}", truncatedBlockUC.getNumBytes(), newBlock, truncatedBlockUC.getTruncateBlock()); } else { // Use new generation stamp for in-place truncate recovery blockManager.convertLastBlockToUnderConstruction(file, lastBlockDelta); oldBlock = file.getLastBlock(); assert !oldBlock.isComplete() : "oldBlock should be under construction"; truncatedBlockUC = (BlockInfoContiguousUnderConstruction) oldBlock; truncatedBlockUC.setTruncateBlock(new Block(oldBlock)); truncatedBlockUC.getTruncateBlock().setNumBytes( oldBlock.getNumBytes() - lastBlockDelta); truncatedBlockUC.getTruncateBlock().setGenerationStamp( newBlock.getGenerationStamp()); NameNode.stateChangeLog.debug( "BLOCK* prepareFileForTruncate: {} Scheduling in-place block " + "truncate to new size {}", truncatedBlockUC.getTruncateBlock() .getNumBytes(), truncatedBlockUC); } if (shouldRecoverNow) { truncatedBlockUC.initializeBlockRecovery(newBlock.getGenerationStamp()); } return newBlock; } /** * Truncate has the following properties: * 1.) Any block deletions occur now. * 2.) INode length is truncated now - new clients can only read up to * the truncated length. * 3.) INode will be set to UC and lastBlock set to UNDER_RECOVERY. * 4.) NN will trigger DN truncation recovery and waits for DNs to report. * 5.) File is considered UNDER_RECOVERY until truncation recovery * completes. * 6.) Soft and hard Lease expiration require truncation recovery to * complete. * * @return true if on the block boundary or false if recovery is need */ private static boolean unprotectedTruncate(FSNamesystem fsn, INodesInPath iip, long newLength, BlocksMapUpdateInfo collectedBlocks, long mtime, QuotaCounts delta) throws IOException { assert fsn.hasWriteLock(); INodeFile file = iip.getLastINode().asFile(); int latestSnapshot = iip.getLatestSnapshotId(); file.recordModification(latestSnapshot, true); verifyQuotaForTruncate(fsn, iip, file, newLength, delta); long remainingLength = file.collectBlocksBeyondMax(newLength, collectedBlocks); file.excludeSnapshotBlocks(latestSnapshot, collectedBlocks); file.setModificationTime(mtime); // return whether on a block boundary return (remainingLength - newLength) == 0; } private static void verifyQuotaForTruncate(FSNamesystem fsn, INodesInPath iip, INodeFile file, long newLength, QuotaCounts delta) throws QuotaExceededException { FSDirectory fsd = fsn.getFSDirectory(); if (!fsn.isImageLoaded() || fsd.shouldSkipQuotaChecks()) { // Do not check quota if edit log is still being processed return; } final BlockStoragePolicy policy = fsd.getBlockStoragePolicySuite() .getPolicy(file.getStoragePolicyID()); file.computeQuotaDeltaForTruncate(newLength, policy, delta); fsd.readLock(); try { FSDirectory.verifyQuota(iip, iip.length() - 1, delta, null); } finally { fsd.readUnlock(); } } /** * Defines if a replica needs to be copied on truncate or * can be truncated in place. */ private static boolean shouldCopyOnTruncate(FSNamesystem fsn, INodeFile file, BlockInfo blk) { if (!fsn.isUpgradeFinalized()) { return true; } if (fsn.isRollingUpgrade()) { return true; } return file.isBlockInLatestSnapshot(blk); } /** * Result of truncate operation. */ static class TruncateResult { private final boolean result; private final HdfsFileStatus stat; public TruncateResult(boolean result, HdfsFileStatus stat) { this.result = result; this.stat = stat; } /** * @return true if client does not need to wait for block recovery, * false if client needs to wait for block recovery. */ boolean getResult() { return result; } /** * @return file information. */ HdfsFileStatus getFileStatus() { return stat; } } }
14,310
38.642659
90
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.namenode; import java.io.FileNotFoundException; import java.io.IOException; import java.io.OutputStream; import java.io.PrintWriter; import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.Socket; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Date; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.TreeSet; import java.util.concurrent.ThreadLocalRandom; import org.apache.commons.io.IOUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.hdfs.BlockReader; import org.apache.hadoop.hdfs.BlockReaderFactory; import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.RemotePeerFactory; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.net.Peer; import org.apache.hadoop.hdfs.net.TcpPeerServer; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfoWithStorage; import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy; import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementStatus; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; import org.apache.hadoop.hdfs.server.blockmanagement.NumberReplicas; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.datanode.CachingStrategy; import org.apache.hadoop.hdfs.util.LightWeightLinkedSet; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.net.NodeBase; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.Time; import com.google.common.annotations.VisibleForTesting; /** * This class provides rudimentary checking of DFS volumes for errors and * sub-optimal conditions. * <p>The tool scans all files and directories, starting from an indicated * root path. The following abnormal conditions are detected and handled:</p> * <ul> * <li>files with blocks that are completely missing from all datanodes.<br/> * In this case the tool can perform one of the following actions: * <ul> * <li>none ({@link #FIXING_NONE})</li> * <li>move corrupted files to /lost+found directory on DFS * ({@link #FIXING_MOVE}). Remaining data blocks are saved as a * block chains, representing longest consecutive series of valid blocks.</li> * <li>delete corrupted files ({@link #FIXING_DELETE})</li> * </ul> * </li> * <li>detect files with under-replicated or over-replicated blocks</li> * </ul> * Additionally, the tool collects a detailed overall DFS statistics, and * optionally can print detailed statistics on block locations and replication * factors of each file. */ @InterfaceAudience.Private public class NamenodeFsck implements DataEncryptionKeyFactory { public static final Log LOG = LogFactory.getLog(NameNode.class.getName()); // return string marking fsck status public static final String CORRUPT_STATUS = "is CORRUPT"; public static final String HEALTHY_STATUS = "is HEALTHY"; public static final String DECOMMISSIONING_STATUS = "is DECOMMISSIONING"; public static final String DECOMMISSIONED_STATUS = "is DECOMMISSIONED"; public static final String NONEXISTENT_STATUS = "does not exist"; public static final String FAILURE_STATUS = "FAILED"; private final NameNode namenode; private final NetworkTopology networktopology; private final int totalDatanodes; private final InetAddress remoteAddress; private String lostFound = null; private boolean lfInited = false; private boolean lfInitedOk = false; private boolean showFiles = false; private boolean showOpenFiles = false; private boolean showBlocks = false; private boolean showLocations = false; private boolean showRacks = false; private boolean showStoragePolcies = false; private boolean showCorruptFileBlocks = false; private boolean showReplicaDetails = false; private long staleInterval; /** * True if we encountered an internal error during FSCK, such as not being * able to delete a corrupt file. */ private boolean internalError = false; /** * True if the user specified the -move option. * * Whe this option is in effect, we will copy salvaged blocks into the lost * and found. */ private boolean doMove = false; /** * True if the user specified the -delete option. * * Whe this option is in effect, we will delete corrupted files. */ private boolean doDelete = false; String path = "/"; private String blockIds = null; // We return back N files that are corrupt; the list of files returned is // ordered by block id; to allow continuation support, pass in the last block // # from previous call private final String[] currentCookie = new String[] { null }; private final Configuration conf; private final PrintWriter out; private List<String> snapshottableDirs = null; private final BlockPlacementPolicy bpPolicy; private StoragePolicySummary storageTypeSummary = null; /** * Filesystem checker. * @param conf configuration (namenode config) * @param namenode namenode that this fsck is going to use * @param pmap key=value[] map passed to the http servlet as url parameters * @param out output stream to write the fsck output * @param totalDatanodes number of live datanodes * @param remoteAddress source address of the fsck request */ NamenodeFsck(Configuration conf, NameNode namenode, NetworkTopology networktopology, Map<String,String[]> pmap, PrintWriter out, int totalDatanodes, InetAddress remoteAddress) { this.conf = conf; this.namenode = namenode; this.networktopology = networktopology; this.out = out; this.totalDatanodes = totalDatanodes; this.remoteAddress = remoteAddress; this.bpPolicy = BlockPlacementPolicy.getInstance(conf, null, networktopology, namenode.getNamesystem().getBlockManager().getDatanodeManager() .getHost2DatanodeMap()); this.staleInterval = conf.getLong(DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY, DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_DEFAULT); for (Iterator<String> it = pmap.keySet().iterator(); it.hasNext();) { String key = it.next(); if (key.equals("path")) { this.path = pmap.get("path")[0]; } else if (key.equals("move")) { this.doMove = true; } else if (key.equals("delete")) { this.doDelete = true; } else if (key.equals("files")) { this.showFiles = true; } else if (key.equals("blocks")) { this.showBlocks = true; } else if (key.equals("locations")) { this.showLocations = true; } else if (key.equals("racks")) { this.showRacks = true; } else if (key.equals("replicadetails")) { this.showReplicaDetails = true; } else if (key.equals("storagepolicies")) { this.showStoragePolcies = true; } else if (key.equals("openforwrite")) {this.showOpenFiles = true; } else if (key.equals("listcorruptfileblocks")) { this.showCorruptFileBlocks = true; } else if (key.equals("startblockafter")) { this.currentCookie[0] = pmap.get("startblockafter")[0]; } else if (key.equals("includeSnapshots")) { this.snapshottableDirs = new ArrayList<String>(); } else if (key.equals("blockId")) { this.blockIds = pmap.get("blockId")[0]; } } } /** * Check block information given a blockId number * */ public void blockIdCK(String blockId) { if(blockId == null) { out.println("Please provide valid blockId!"); return; } BlockManager bm = namenode.getNamesystem().getBlockManager(); try { //get blockInfo Block block = new Block(Block.getBlockId(blockId)); //find which file this block belongs to BlockInfo blockInfo = bm.getStoredBlock(block); if(blockInfo == null) { out.println("Block "+ blockId +" " + NONEXISTENT_STATUS); LOG.warn("Block "+ blockId + " " + NONEXISTENT_STATUS); return; } BlockCollection bc = bm.getBlockCollection(blockInfo); INode iNode = (INode) bc; NumberReplicas numberReplicas= bm.countNodes(blockInfo); out.println("Block Id: " + blockId); out.println("Block belongs to: "+iNode.getFullPathName()); out.println("No. of Expected Replica: " + bc.getPreferredBlockReplication()); out.println("No. of live Replica: " + numberReplicas.liveReplicas()); out.println("No. of excess Replica: " + numberReplicas.excessReplicas()); out.println("No. of stale Replica: " + numberReplicas.replicasOnStaleNodes()); out.println("No. of decommissioned Replica: " + numberReplicas.decommissioned()); out.println("No. of decommissioning Replica: " + numberReplicas.decommissioning()); out.println("No. of corrupted Replica: " + numberReplicas.corruptReplicas()); //record datanodes that have corrupted block replica Collection<DatanodeDescriptor> corruptionRecord = null; if (bm.getCorruptReplicas(block) != null) { corruptionRecord = bm.getCorruptReplicas(block); } //report block replicas status on datanodes for(int idx = (blockInfo.numNodes()-1); idx >= 0; idx--) { DatanodeDescriptor dn = blockInfo.getDatanode(idx); out.print("Block replica on datanode/rack: " + dn.getHostName() + dn.getNetworkLocation() + " "); if (corruptionRecord != null && corruptionRecord.contains(dn)) { out.print(CORRUPT_STATUS+"\t ReasonCode: "+ bm.getCorruptReason(block,dn)); } else if (dn.isDecommissioned() ){ out.print(DECOMMISSIONED_STATUS); } else if (dn.isDecommissionInProgress()) { out.print(DECOMMISSIONING_STATUS); } else { out.print(HEALTHY_STATUS); } out.print("\n"); } } catch (Exception e){ String errMsg = "Fsck on blockId '" + blockId; LOG.warn(errMsg, e); out.println(e.getMessage()); out.print("\n\n" + errMsg); LOG.warn("Error in looking up block", e); } } /** * Check files on DFS, starting from the indicated path. */ public void fsck() { final long startTime = Time.monotonicNow(); try { if(blockIds != null) { String[] blocks = blockIds.split(" "); StringBuilder sb = new StringBuilder(); sb.append("FSCK started by " + UserGroupInformation.getCurrentUser() + " from " + remoteAddress + " at " + new Date()); out.println(sb.toString()); sb.append(" for blockIds: \n"); for (String blk: blocks) { if(blk == null || !blk.contains(Block.BLOCK_FILE_PREFIX)) { out.println("Incorrect blockId format: " + blk); continue; } out.print("\n"); blockIdCK(blk); sb.append(blk + "\n"); } LOG.info(sb.toString()); namenode.getNamesystem().logFsckEvent("/", remoteAddress); out.flush(); return; } String msg = "FSCK started by " + UserGroupInformation.getCurrentUser() + " from " + remoteAddress + " for path " + path + " at " + new Date(); LOG.info(msg); out.println(msg); namenode.getNamesystem().logFsckEvent(path, remoteAddress); if (snapshottableDirs != null) { SnapshottableDirectoryStatus[] snapshotDirs = namenode.getRpcServer() .getSnapshottableDirListing(); if (snapshotDirs != null) { for (SnapshottableDirectoryStatus dir : snapshotDirs) { snapshottableDirs.add(dir.getFullPath().toString()); } } } final HdfsFileStatus file = namenode.getRpcServer().getFileInfo(path); if (file != null) { if (showCorruptFileBlocks) { listCorruptFileBlocks(); return; } if (this.showStoragePolcies) { storageTypeSummary = new StoragePolicySummary( namenode.getNamesystem().getBlockManager().getStoragePolicies()); } Result res = new Result(conf); check(path, file, res); out.println(res); out.println(" Number of data-nodes:\t\t" + totalDatanodes); out.println(" Number of racks:\t\t" + networktopology.getNumOfRacks()); if (this.showStoragePolcies) { out.print(storageTypeSummary.toString()); } out.println("FSCK ended at " + new Date() + " in " + (Time.monotonicNow() - startTime + " milliseconds")); // If there were internal errors during the fsck operation, we want to // return FAILURE_STATUS, even if those errors were not immediately // fatal. Otherwise many unit tests will pass even when there are bugs. if (internalError) { throw new IOException("fsck encountered internal errors!"); } // DFSck client scans for the string HEALTHY/CORRUPT to check the status // of file system and return appropriate code. Changing the output // string might break testcases. Also note this must be the last line // of the report. if (res.isHealthy()) { out.print("\n\nThe filesystem under path '" + path + "' " + HEALTHY_STATUS); } else { out.print("\n\nThe filesystem under path '" + path + "' " + CORRUPT_STATUS); } } else { out.print("\n\nPath '" + path + "' " + NONEXISTENT_STATUS); } } catch (Exception e) { String errMsg = "Fsck on path '" + path + "' " + FAILURE_STATUS; LOG.warn(errMsg, e); out.println("FSCK ended at " + new Date() + " in " + (Time.monotonicNow() - startTime + " milliseconds")); out.println(e.getMessage()); out.print("\n\n" + errMsg); } finally { out.close(); } } private void listCorruptFileBlocks() throws IOException { Collection<FSNamesystem.CorruptFileBlockInfo> corruptFiles = namenode. getNamesystem().listCorruptFileBlocks(path, currentCookie); int numCorruptFiles = corruptFiles.size(); String filler; if (numCorruptFiles > 0) { filler = Integer.toString(numCorruptFiles); } else if (currentCookie[0].equals("0")) { filler = "no"; } else { filler = "no more"; } out.println("Cookie:\t" + currentCookie[0]); for (FSNamesystem.CorruptFileBlockInfo c : corruptFiles) { out.println(c.toString()); } out.println("\n\nThe filesystem under path '" + path + "' has " + filler + " CORRUPT files"); out.println(); } @VisibleForTesting void check(String parent, HdfsFileStatus file, Result res) throws IOException { String path = file.getFullName(parent); if (file.isDir()) { checkDir(path, res); return; } if (file.isSymlink()) { if (showFiles) { out.println(path + " <symlink>"); } res.totalSymlinks++; return; } LocatedBlocks blocks = getBlockLocations(path, file); if (blocks == null) { // the file is deleted return; } collectFileSummary(path, file, res, blocks); collectBlocksSummary(parent, file, res, blocks); } private void checkDir(String path, Result res) throws IOException { if (snapshottableDirs != null && snapshottableDirs.contains(path)) { String snapshotPath = (path.endsWith(Path.SEPARATOR) ? path : path + Path.SEPARATOR) + HdfsConstants.DOT_SNAPSHOT_DIR; HdfsFileStatus snapshotFileInfo = namenode.getRpcServer().getFileInfo( snapshotPath); check(snapshotPath, snapshotFileInfo, res); } byte[] lastReturnedName = HdfsFileStatus.EMPTY_NAME; DirectoryListing thisListing; if (showFiles) { out.println(path + " <dir>"); } res.totalDirs++; do { assert lastReturnedName != null; thisListing = namenode.getRpcServer().getListing( path, lastReturnedName, false); if (thisListing == null) { return; } HdfsFileStatus[] files = thisListing.getPartialListing(); for (int i = 0; i < files.length; i++) { check(path, files[i], res); } lastReturnedName = thisListing.getLastName(); } while (thisListing.hasMore()); } private LocatedBlocks getBlockLocations(String path, HdfsFileStatus file) throws IOException { long fileLen = file.getLen(); LocatedBlocks blocks = null; final FSNamesystem fsn = namenode.getNamesystem(); fsn.readLock(); try { blocks = FSDirStatAndListingOp.getBlockLocations( fsn.getFSDirectory(), fsn.getPermissionChecker(), path, 0, fileLen, false) .blocks; } catch (FileNotFoundException fnfe) { blocks = null; } finally { fsn.readUnlock(); } return blocks; } private void collectFileSummary(String path, HdfsFileStatus file, Result res, LocatedBlocks blocks) throws IOException { long fileLen = file.getLen(); boolean isOpen = blocks.isUnderConstruction(); if (isOpen && !showOpenFiles) { // We collect these stats about open files to report with default options res.totalOpenFilesSize += fileLen; res.totalOpenFilesBlocks += blocks.locatedBlockCount(); res.totalOpenFiles++; return; } res.totalFiles++; res.totalSize += fileLen; res.totalBlocks += blocks.locatedBlockCount(); if (showOpenFiles && isOpen) { out.print(path + " " + fileLen + " bytes, " + blocks.locatedBlockCount() + " block(s), OPENFORWRITE: "); } else if (showFiles) { out.print(path + " " + fileLen + " bytes, " + blocks.locatedBlockCount() + " block(s): "); } else { out.print('.'); } if (res.totalFiles % 100 == 0) { out.println(); out.flush(); } } private void collectBlocksSummary(String parent, HdfsFileStatus file, Result res, LocatedBlocks blocks) throws IOException { String path = file.getFullName(parent); boolean isOpen = blocks.isUnderConstruction(); int missing = 0; int corrupt = 0; long missize = 0; int underReplicatedPerFile = 0; int misReplicatedPerFile = 0; StringBuilder report = new StringBuilder(); int blockNumber = 0; for (LocatedBlock lBlk : blocks.getLocatedBlocks()) { ExtendedBlock block = lBlk.getBlock(); BlockManager bm = namenode.getNamesystem().getBlockManager(); final BlockInfo storedBlock = bm.getStoredBlock( block.getLocalBlock()); // count decommissionedReplicas / decommissioningReplicas NumberReplicas numberReplicas = bm.countNodes(storedBlock); int decommissionedReplicas = numberReplicas.decommissioned();; int decommissioningReplicas = numberReplicas.decommissioning(); res.decommissionedReplicas += decommissionedReplicas; res.decommissioningReplicas += decommissioningReplicas; // count total replicas int liveReplicas = numberReplicas.liveReplicas(); int totalReplicasPerBlock = liveReplicas + decommissionedReplicas + decommissioningReplicas; res.totalReplicas += totalReplicasPerBlock; // count expected replicas short targetFileReplication = file.getReplication(); res.numExpectedReplicas += targetFileReplication; // count under min repl'd blocks if(totalReplicasPerBlock < res.minReplication){ res.numUnderMinReplicatedBlocks++; } // count excessive Replicas / over replicated blocks if (liveReplicas > targetFileReplication) { res.excessiveReplicas += (liveReplicas - targetFileReplication); res.numOverReplicatedBlocks += 1; } // count corrupt blocks boolean isCorrupt = lBlk.isCorrupt(); if (isCorrupt) { corrupt++; res.corruptBlocks++; out.print("\n" + path + ": CORRUPT blockpool " + block.getBlockPoolId() + " block " + block.getBlockName()+"\n"); } // count minimally replicated blocks if (totalReplicasPerBlock >= res.minReplication) res.numMinReplicatedBlocks++; // count missing replicas / under replicated blocks if (totalReplicasPerBlock < targetFileReplication && totalReplicasPerBlock > 0) { res.missingReplicas += (targetFileReplication - totalReplicasPerBlock); res.numUnderReplicatedBlocks += 1; underReplicatedPerFile++; if (!showFiles) { out.print("\n" + path + ": "); } out.println(" Under replicated " + block + ". Target Replicas is " + targetFileReplication + " but found " + liveReplicas + " live replica(s), " + decommissionedReplicas + " decommissioned replica(s) and " + decommissioningReplicas + " decommissioning replica(s)."); } // count mis replicated blocks BlockPlacementStatus blockPlacementStatus = bpPolicy .verifyBlockPlacement(path, lBlk, targetFileReplication); if (!blockPlacementStatus.isPlacementPolicySatisfied()) { res.numMisReplicatedBlocks++; misReplicatedPerFile++; if (!showFiles) { if(underReplicatedPerFile == 0) out.println(); out.print(path + ": "); } out.println(" Replica placement policy is violated for " + block + ". " + blockPlacementStatus.getErrorDescription()); } // count storage summary if (this.showStoragePolcies && lBlk.getStorageTypes() != null) { countStorageTypeSummary(file, lBlk); } // report String blkName = block.toString(); report.append(blockNumber + ". " + blkName + " len=" + block.getNumBytes()); if (totalReplicasPerBlock == 0) { report.append(" MISSING!"); res.addMissing(block.toString(), block.getNumBytes()); missing++; missize += block.getNumBytes(); } else { report.append(" Live_repl=" + liveReplicas); if (showLocations || showRacks || showReplicaDetails) { StringBuilder sb = new StringBuilder("["); Iterable<DatanodeStorageInfo> storages = bm.getStorages(block.getLocalBlock()); for (Iterator<DatanodeStorageInfo> iterator = storages.iterator(); iterator.hasNext();) { DatanodeStorageInfo storage = iterator.next(); DatanodeDescriptor dnDesc = storage.getDatanodeDescriptor(); if (showRacks) { sb.append(NodeBase.getPath(dnDesc)); } else { sb.append(new DatanodeInfoWithStorage(dnDesc, storage.getStorageID(), storage .getStorageType())); } if (showReplicaDetails) { LightWeightLinkedSet<Block> blocksExcess = bm.excessReplicateMap.get(dnDesc.getDatanodeUuid()); Collection<DatanodeDescriptor> corruptReplicas = bm.getCorruptReplicas(block.getLocalBlock()); sb.append("("); if (dnDesc.isDecommissioned()) { sb.append("DECOMMISSIONED)"); } else if (dnDesc.isDecommissionInProgress()) { sb.append("DECOMMISSIONING)"); } else if (corruptReplicas != null && corruptReplicas.contains(dnDesc)) { sb.append("CORRUPT)"); } else if (blocksExcess != null && blocksExcess.contains(block.getLocalBlock())) { sb.append("EXCESS)"); } else if (dnDesc.isStale(this.staleInterval)) { sb.append("STALE_NODE)"); } else if (storage.areBlockContentsStale()) { sb.append("STALE_BLOCK_CONTENT)"); } else { sb.append("LIVE)"); } } if (iterator.hasNext()) { sb.append(", "); } } sb.append(']'); report.append(" " + sb.toString()); } } report.append('\n'); blockNumber++; } // count corrupt file & move or delete if necessary if ((missing > 0) || (corrupt > 0)) { if (!showFiles && (missing > 0)) { out.print("\n" + path + ": MISSING " + missing + " blocks of total size " + missize + " B."); } res.corruptFiles++; if (isOpen) { LOG.info("Fsck: ignoring open file " + path); } else { if (doMove) copyBlocksToLostFound(parent, file, blocks); if (doDelete) deleteCorruptedFile(path); } } if (showFiles) { if (missing > 0) { out.print(" MISSING " + missing + " blocks of total size " + missize + " B\n"); } else if (underReplicatedPerFile == 0 && misReplicatedPerFile == 0) { out.print(" OK\n"); } if (showBlocks) { out.print(report.toString() + "\n"); } } } private void countStorageTypeSummary(HdfsFileStatus file, LocatedBlock lBlk) { StorageType[] storageTypes = lBlk.getStorageTypes(); storageTypeSummary.add(Arrays.copyOf(storageTypes, storageTypes.length), namenode.getNamesystem().getBlockManager() .getStoragePolicy(file.getStoragePolicy())); } private void deleteCorruptedFile(String path) { try { namenode.getRpcServer().delete(path, true); LOG.info("Fsck: deleted corrupt file " + path); } catch (Exception e) { LOG.error("Fsck: error deleting corrupted file " + path, e); internalError = true; } } boolean hdfsPathExists(String path) throws AccessControlException, UnresolvedLinkException, IOException { try { HdfsFileStatus hfs = namenode.getRpcServer().getFileInfo(path); return (hfs != null); } catch (FileNotFoundException e) { return false; } } private void copyBlocksToLostFound(String parent, HdfsFileStatus file, LocatedBlocks blocks) throws IOException { final DFSClient dfs = new DFSClient(NameNode.getAddress(conf), conf); final String fullName = file.getFullName(parent); OutputStream fos = null; try { if (!lfInited) { lostFoundInit(dfs); } if (!lfInitedOk) { throw new IOException("failed to initialize lost+found"); } String target = lostFound + fullName; if (hdfsPathExists(target)) { LOG.warn("Fsck: can't copy the remains of " + fullName + " to " + "lost+found, because " + target + " already exists."); return; } if (!namenode.getRpcServer().mkdirs( target, file.getPermission(), true)) { throw new IOException("failed to create directory " + target); } // create chains int chain = 0; boolean copyError = false; for (LocatedBlock lBlk : blocks.getLocatedBlocks()) { LocatedBlock lblock = lBlk; DatanodeInfo[] locs = lblock.getLocations(); if (locs == null || locs.length == 0) { if (fos != null) { fos.flush(); fos.close(); fos = null; } continue; } if (fos == null) { fos = dfs.create(target + "/" + chain, true); chain++; } // copy the block. It's a pity it's not abstracted from DFSInputStream ... try { copyBlock(dfs, lblock, fos); } catch (Exception e) { LOG.error("Fsck: could not copy block " + lblock.getBlock() + " to " + target, e); fos.flush(); fos.close(); fos = null; internalError = true; copyError = true; } } if (copyError) { LOG.warn("Fsck: there were errors copying the remains of the " + "corrupted file " + fullName + " to /lost+found"); } else { LOG.info("Fsck: copied the remains of the corrupted file " + fullName + " to /lost+found"); } } catch (Exception e) { LOG.error("copyBlocksToLostFound: error processing " + fullName, e); internalError = true; } finally { if (fos != null) fos.close(); dfs.close(); } } /* * XXX (ab) Bulk of this method is copied verbatim from {@link DFSClient}, which is * bad. Both places should be refactored to provide a method to copy blocks * around. */ private void copyBlock(final DFSClient dfs, LocatedBlock lblock, OutputStream fos) throws Exception { int failures = 0; InetSocketAddress targetAddr = null; TreeSet<DatanodeInfo> deadNodes = new TreeSet<DatanodeInfo>(); BlockReader blockReader = null; ExtendedBlock block = lblock.getBlock(); while (blockReader == null) { DatanodeInfo chosenNode; try { chosenNode = bestNode(dfs, lblock.getLocations(), deadNodes); targetAddr = NetUtils.createSocketAddr(chosenNode.getXferAddr()); } catch (IOException ie) { if (failures >= DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT) { throw new IOException("Could not obtain block " + lblock, ie); } LOG.info("Could not obtain block from any node: " + ie); try { Thread.sleep(10000); } catch (InterruptedException iex) { } deadNodes.clear(); failures++; continue; } try { String file = BlockReaderFactory.getFileName(targetAddr, block.getBlockPoolId(), block.getBlockId()); blockReader = new BlockReaderFactory(dfs.getConf()). setFileName(file). setBlock(block). setBlockToken(lblock.getBlockToken()). setStartOffset(0). setLength(-1). setVerifyChecksum(true). setClientName("fsck"). setDatanodeInfo(chosenNode). setInetSocketAddress(targetAddr). setCachingStrategy(CachingStrategy.newDropBehind()). setClientCacheContext(dfs.getClientContext()). setConfiguration(namenode.conf). setRemotePeerFactory(new RemotePeerFactory() { @Override public Peer newConnectedPeer(InetSocketAddress addr, Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId) throws IOException { Peer peer = null; Socket s = NetUtils.getDefaultSocketFactory(conf).createSocket(); try { s.connect(addr, HdfsServerConstants.READ_TIMEOUT); s.setSoTimeout(HdfsServerConstants.READ_TIMEOUT); peer = TcpPeerServer.peerFromSocketAndKey( dfs.getSaslDataTransferClient(), s, NamenodeFsck.this, blockToken, datanodeId); } finally { if (peer == null) { IOUtils.closeQuietly(s); } } return peer; } }). build(); } catch (IOException ex) { // Put chosen node into dead list, continue LOG.info("Failed to connect to " + targetAddr + ":" + ex); deadNodes.add(chosenNode); } } byte[] buf = new byte[1024]; int cnt = 0; boolean success = true; long bytesRead = 0; try { while ((cnt = blockReader.read(buf, 0, buf.length)) > 0) { fos.write(buf, 0, cnt); bytesRead += cnt; } if ( bytesRead != block.getNumBytes() ) { throw new IOException("Recorded block size is " + block.getNumBytes() + ", but datanode returned " +bytesRead+" bytes"); } } catch (Exception e) { LOG.error("Error reading block", e); success = false; } finally { blockReader.close(); } if (!success) { throw new Exception("Could not copy block data for " + lblock.getBlock()); } } @Override public DataEncryptionKey newDataEncryptionKey() throws IOException { return namenode.getRpcServer().getDataEncryptionKey(); } /* * XXX (ab) See comment above for copyBlock(). * * Pick the best node from which to stream the data. * That's the local one, if available. */ private DatanodeInfo bestNode(DFSClient dfs, DatanodeInfo[] nodes, TreeSet<DatanodeInfo> deadNodes) throws IOException { if ((nodes == null) || (nodes.length - deadNodes.size() < 1)) { throw new IOException("No live nodes contain current block"); } DatanodeInfo chosenNode; do { chosenNode = nodes[ThreadLocalRandom.current().nextInt(nodes.length)]; } while (deadNodes.contains(chosenNode)); return chosenNode; } private void lostFoundInit(DFSClient dfs) { lfInited = true; try { String lfName = "/lost+found"; final HdfsFileStatus lfStatus = dfs.getFileInfo(lfName); if (lfStatus == null) { // not exists lfInitedOk = dfs.mkdirs(lfName, null, true); lostFound = lfName; } else if (!lfStatus.isDir()) { // exists but not a directory LOG.warn("Cannot use /lost+found : a regular file with this name exists."); lfInitedOk = false; } else { // exists and is a directory lostFound = lfName; lfInitedOk = true; } } catch (Exception e) { e.printStackTrace(); lfInitedOk = false; } if (lostFound == null) { LOG.warn("Cannot initialize /lost+found ."); lfInitedOk = false; internalError = true; } } /** * FsckResult of checking, plus overall DFS statistics. */ @VisibleForTesting static class Result { final List<String> missingIds = new ArrayList<String>(); long missingSize = 0L; long corruptFiles = 0L; long corruptBlocks = 0L; long excessiveReplicas = 0L; long missingReplicas = 0L; long decommissionedReplicas = 0L; long decommissioningReplicas = 0L; long numUnderMinReplicatedBlocks=0L; long numOverReplicatedBlocks = 0L; long numUnderReplicatedBlocks = 0L; long numMisReplicatedBlocks = 0L; // blocks that do not satisfy block placement policy long numMinReplicatedBlocks = 0L; // minimally replicatedblocks long totalBlocks = 0L; long numExpectedReplicas = 0L; long totalOpenFilesBlocks = 0L; long totalFiles = 0L; long totalOpenFiles = 0L; long totalDirs = 0L; long totalSymlinks = 0L; long totalSize = 0L; long totalOpenFilesSize = 0L; long totalReplicas = 0L; final short replication; final int minReplication; Result(Configuration conf) { this.replication = (short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, DFSConfigKeys.DFS_REPLICATION_DEFAULT); this.minReplication = (short)conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY, DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_DEFAULT); } /** * DFS is considered healthy if there are no missing blocks. */ boolean isHealthy() { return ((missingIds.size() == 0) && (corruptBlocks == 0)); } /** Add a missing block name, plus its size. */ void addMissing(String id, long size) { missingIds.add(id); missingSize += size; } /** Return the actual replication factor. */ float getReplicationFactor() { if (totalBlocks == 0) return 0.0f; return (float) (totalReplicas) / (float) totalBlocks; } @Override public String toString() { StringBuilder res = new StringBuilder(); res.append("Status: ").append((isHealthy() ? "HEALTHY" : "CORRUPT")) .append("\n Total size:\t").append(totalSize).append(" B"); if (totalOpenFilesSize != 0) { res.append(" (Total open files size: ").append(totalOpenFilesSize) .append(" B)"); } res.append("\n Total dirs:\t").append(totalDirs).append( "\n Total files:\t").append(totalFiles); res.append("\n Total symlinks:\t\t").append(totalSymlinks); if (totalOpenFiles != 0) { res.append(" (Files currently being written: ").append(totalOpenFiles) .append(")"); } res.append("\n Total blocks (validated):\t").append(totalBlocks); if (totalBlocks > 0) { res.append(" (avg. block size ").append((totalSize / totalBlocks)) .append(" B)"); } if (totalOpenFilesBlocks != 0) { res.append(" (Total open file blocks (not validated): ").append( totalOpenFilesBlocks).append(")"); } if (corruptFiles > 0 || numUnderMinReplicatedBlocks > 0) { res.append("\n ********************************"); if(numUnderMinReplicatedBlocks>0){ res.append("\n UNDER MIN REPL'D BLOCKS:\t").append(numUnderMinReplicatedBlocks); if(totalBlocks>0){ res.append(" (").append( ((float) (numUnderMinReplicatedBlocks * 100) / (float) totalBlocks)) .append(" %)"); } res.append("\n ").append(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY + ":\t") .append(minReplication); } if(corruptFiles>0) { res.append( "\n CORRUPT FILES:\t").append(corruptFiles); if (missingSize > 0) { res.append("\n MISSING BLOCKS:\t").append(missingIds.size()).append( "\n MISSING SIZE:\t\t").append(missingSize).append(" B"); } if (corruptBlocks > 0) { res.append("\n CORRUPT BLOCKS: \t").append(corruptBlocks); } } res.append("\n ********************************"); } res.append("\n Minimally replicated blocks:\t").append( numMinReplicatedBlocks); if (totalBlocks > 0) { res.append(" (").append( ((float) (numMinReplicatedBlocks * 100) / (float) totalBlocks)) .append(" %)"); } res.append("\n Over-replicated blocks:\t") .append(numOverReplicatedBlocks); if (totalBlocks > 0) { res.append(" (").append( ((float) (numOverReplicatedBlocks * 100) / (float) totalBlocks)) .append(" %)"); } res.append("\n Under-replicated blocks:\t").append( numUnderReplicatedBlocks); if (totalBlocks > 0) { res.append(" (").append( ((float) (numUnderReplicatedBlocks * 100) / (float) totalBlocks)) .append(" %)"); } res.append("\n Mis-replicated blocks:\t\t") .append(numMisReplicatedBlocks); if (totalBlocks > 0) { res.append(" (").append( ((float) (numMisReplicatedBlocks * 100) / (float) totalBlocks)) .append(" %)"); } res.append("\n Default replication factor:\t").append(replication) .append("\n Average block replication:\t").append( getReplicationFactor()).append("\n Corrupt blocks:\t\t").append( corruptBlocks).append("\n Missing replicas:\t\t").append( missingReplicas); if (totalReplicas > 0) { res.append(" (").append( ((float) (missingReplicas * 100) / (float) numExpectedReplicas)).append( " %)"); } if (decommissionedReplicas > 0) { res.append("\n DecommissionedReplicas:\t").append( decommissionedReplicas); } if (decommissioningReplicas > 0) { res.append("\n DecommissioningReplicas:\t").append( decommissioningReplicas); } return res.toString(); } } }
41,921
36.904159
99
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StreamLimiter.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.namenode; /** * An object that allows you to set a limit on a stream. This limit * represents the number of bytes that can be read without getting an * exception. */ interface StreamLimiter { /** * Set a limit. Calling this function clears any existing limit. */ public void setLimit(long limit); /** * Disable limit. */ public void clearLimit(); }
1,223
33.971429
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/QuotaByStorageTypeEntry.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.namenode; import com.google.common.base.Objects; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.util.StringUtils; public class QuotaByStorageTypeEntry { private StorageType type; private long quota; public StorageType getStorageType() { return type; } public long getQuota() { return quota; } @Override public boolean equals(Object o){ if (o == null) { return false; } if (getClass() != o.getClass()) { return false; } QuotaByStorageTypeEntry other = (QuotaByStorageTypeEntry)o; return Objects.equal(type, other.type) && Objects.equal(quota, other.quota); } @Override public int hashCode() { return Objects.hashCode(type, quota); } @Override public String toString() { StringBuilder sb = new StringBuilder(); assert (type != null); sb.append(StringUtils.toLowerCase(type.toString())); sb.append(':'); sb.append(quota); return sb.toString(); } public static class Builder { private StorageType type; private long quota; public Builder setStorageType(StorageType type) { this.type = type; return this; } public Builder setQuota(long quota) { this.quota = quota; return this; } public QuotaByStorageTypeEntry build() { return new QuotaByStorageTypeEntry(type, quota); } } private QuotaByStorageTypeEntry(StorageType type, long quota) { this.type = type; this.quota = quota; } }
2,384
26.413793
81
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SerialNumberManager.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.namenode; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.atomic.AtomicInteger; /** Manage name-to-serial-number maps for users and groups. */ class SerialNumberManager { /** This is the only instance of {@link SerialNumberManager}.*/ static final SerialNumberManager INSTANCE = new SerialNumberManager(); private final SerialNumberMap<String> usermap = new SerialNumberMap<String>(); private final SerialNumberMap<String> groupmap = new SerialNumberMap<String>(); private SerialNumberManager() {} int getUserSerialNumber(String u) {return usermap.get(u);} int getGroupSerialNumber(String g) {return groupmap.get(g);} String getUser(int n) {return usermap.get(n);} String getGroup(int n) {return groupmap.get(n);} { getUserSerialNumber(null); getGroupSerialNumber(null); } private static class SerialNumberMap<T> { private final AtomicInteger max = new AtomicInteger(1); private final ConcurrentMap<T, Integer> t2i = new ConcurrentHashMap<T, Integer>(); private final ConcurrentMap<Integer, T> i2t = new ConcurrentHashMap<Integer, T>(); int get(T t) { if (t == null) { return 0; } Integer sn = t2i.get(t); if (sn == null) { sn = max.getAndIncrement(); Integer old = t2i.putIfAbsent(t, sn); if (old != null) { return old; } i2t.put(sn, t); } return sn; } T get(int i) { if (i == 0) { return null; } T t = i2t.get(i); if (t == null) { throw new IllegalStateException("!i2t.containsKey(" + i + "), this=" + this); } return t; } @Override public String toString() { return "max=" + max + ",\n t2i=" + t2i + ",\n i2t=" + i2t; } } }
2,692
31.445783
86
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclTransformation.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.namenode; import static org.apache.hadoop.fs.permission.AclEntryScope.*; import static org.apache.hadoop.fs.permission.AclEntryType.*; import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; import java.util.EnumMap; import java.util.EnumSet; import java.util.Iterator; import java.util.List; import com.google.common.base.Objects; import com.google.common.collect.ComparisonChain; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.google.common.collect.Ordering; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclEntryScope; import org.apache.hadoop.fs.permission.AclEntryType; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.ScopedAclEntries; import org.apache.hadoop.hdfs.protocol.AclException; /** * AclTransformation defines the operations that can modify an ACL. All ACL * modifications take as input an existing ACL and apply logic to add new * entries, modify existing entries or remove old entries. Some operations also * accept an ACL spec: a list of entries that further describes the requested * change. Different operations interpret the ACL spec differently. In the * case of adding an ACL to an inode that previously did not have one, the * existing ACL can be a "minimal ACL" containing exactly 3 entries for owner, * group and other, all derived from the {@link FsPermission} bits. * * The algorithms implemented here require sorted lists of ACL entries. For any * existing ACL, it is assumed that the entries are sorted. This is because all * ACL creation and modification is intended to go through these methods, and * they all guarantee correct sort order in their outputs. However, an ACL spec * is considered untrusted user input, so all operations pre-sort the ACL spec as * the first step. */ @InterfaceAudience.Private final class AclTransformation { private static final int MAX_ENTRIES = 32; /** * Filters (discards) any existing ACL entries that have the same scope, type * and name of any entry in the ACL spec. If necessary, recalculates the mask * entries. If necessary, default entries may be inferred by copying the * permissions of the corresponding access entries. It is invalid to request * removal of the mask entry from an ACL that would otherwise require a mask * entry, due to existing named entries or an unnamed group entry. * * @param existingAcl List<AclEntry> existing ACL * @param inAclSpec List<AclEntry> ACL spec describing entries to filter * @return List<AclEntry> new ACL * @throws AclException if validation fails */ public static List<AclEntry> filterAclEntriesByAclSpec( List<AclEntry> existingAcl, List<AclEntry> inAclSpec) throws AclException { ValidatedAclSpec aclSpec = new ValidatedAclSpec(inAclSpec); ArrayList<AclEntry> aclBuilder = Lists.newArrayListWithCapacity(MAX_ENTRIES); EnumMap<AclEntryScope, AclEntry> providedMask = Maps.newEnumMap(AclEntryScope.class); EnumSet<AclEntryScope> maskDirty = EnumSet.noneOf(AclEntryScope.class); EnumSet<AclEntryScope> scopeDirty = EnumSet.noneOf(AclEntryScope.class); for (AclEntry existingEntry: existingAcl) { if (aclSpec.containsKey(existingEntry)) { scopeDirty.add(existingEntry.getScope()); if (existingEntry.getType() == MASK) { maskDirty.add(existingEntry.getScope()); } } else { if (existingEntry.getType() == MASK) { providedMask.put(existingEntry.getScope(), existingEntry); } else { aclBuilder.add(existingEntry); } } } copyDefaultsIfNeeded(aclBuilder); calculateMasks(aclBuilder, providedMask, maskDirty, scopeDirty); return buildAndValidateAcl(aclBuilder); } /** * Filters (discards) any existing default ACL entries. The new ACL retains * only the access ACL entries. * * @param existingAcl List<AclEntry> existing ACL * @return List<AclEntry> new ACL * @throws AclException if validation fails */ public static List<AclEntry> filterDefaultAclEntries( List<AclEntry> existingAcl) throws AclException { ArrayList<AclEntry> aclBuilder = Lists.newArrayListWithCapacity(MAX_ENTRIES); for (AclEntry existingEntry: existingAcl) { if (existingEntry.getScope() == DEFAULT) { // Default entries sort after access entries, so we can exit early. break; } aclBuilder.add(existingEntry); } return buildAndValidateAcl(aclBuilder); } /** * Merges the entries of the ACL spec into the existing ACL. If necessary, * recalculates the mask entries. If necessary, default entries may be * inferred by copying the permissions of the corresponding access entries. * * @param existingAcl List<AclEntry> existing ACL * @param inAclSpec List<AclEntry> ACL spec containing entries to merge * @return List<AclEntry> new ACL * @throws AclException if validation fails */ public static List<AclEntry> mergeAclEntries(List<AclEntry> existingAcl, List<AclEntry> inAclSpec) throws AclException { ValidatedAclSpec aclSpec = new ValidatedAclSpec(inAclSpec); ArrayList<AclEntry> aclBuilder = Lists.newArrayListWithCapacity(MAX_ENTRIES); List<AclEntry> foundAclSpecEntries = Lists.newArrayListWithCapacity(MAX_ENTRIES); EnumMap<AclEntryScope, AclEntry> providedMask = Maps.newEnumMap(AclEntryScope.class); EnumSet<AclEntryScope> maskDirty = EnumSet.noneOf(AclEntryScope.class); EnumSet<AclEntryScope> scopeDirty = EnumSet.noneOf(AclEntryScope.class); for (AclEntry existingEntry: existingAcl) { AclEntry aclSpecEntry = aclSpec.findByKey(existingEntry); if (aclSpecEntry != null) { foundAclSpecEntries.add(aclSpecEntry); scopeDirty.add(aclSpecEntry.getScope()); if (aclSpecEntry.getType() == MASK) { providedMask.put(aclSpecEntry.getScope(), aclSpecEntry); maskDirty.add(aclSpecEntry.getScope()); } else { aclBuilder.add(aclSpecEntry); } } else { if (existingEntry.getType() == MASK) { providedMask.put(existingEntry.getScope(), existingEntry); } else { aclBuilder.add(existingEntry); } } } // ACL spec entries that were not replacements are new additions. for (AclEntry newEntry: aclSpec) { if (Collections.binarySearch(foundAclSpecEntries, newEntry, ACL_ENTRY_COMPARATOR) < 0) { scopeDirty.add(newEntry.getScope()); if (newEntry.getType() == MASK) { providedMask.put(newEntry.getScope(), newEntry); maskDirty.add(newEntry.getScope()); } else { aclBuilder.add(newEntry); } } } copyDefaultsIfNeeded(aclBuilder); calculateMasks(aclBuilder, providedMask, maskDirty, scopeDirty); return buildAndValidateAcl(aclBuilder); } /** * Completely replaces the ACL with the entries of the ACL spec. If * necessary, recalculates the mask entries. If necessary, default entries * are inferred by copying the permissions of the corresponding access * entries. Replacement occurs separately for each of the access ACL and the * default ACL. If the ACL spec contains only access entries, then the * existing default entries are retained. If the ACL spec contains only * default entries, then the existing access entries are retained. If the ACL * spec contains both access and default entries, then both are replaced. * * @param existingAcl List<AclEntry> existing ACL * @param inAclSpec List<AclEntry> ACL spec containing replacement entries * @return List<AclEntry> new ACL * @throws AclException if validation fails */ public static List<AclEntry> replaceAclEntries(List<AclEntry> existingAcl, List<AclEntry> inAclSpec) throws AclException { ValidatedAclSpec aclSpec = new ValidatedAclSpec(inAclSpec); ArrayList<AclEntry> aclBuilder = Lists.newArrayListWithCapacity(MAX_ENTRIES); // Replacement is done separately for each scope: access and default. EnumMap<AclEntryScope, AclEntry> providedMask = Maps.newEnumMap(AclEntryScope.class); EnumSet<AclEntryScope> maskDirty = EnumSet.noneOf(AclEntryScope.class); EnumSet<AclEntryScope> scopeDirty = EnumSet.noneOf(AclEntryScope.class); for (AclEntry aclSpecEntry: aclSpec) { scopeDirty.add(aclSpecEntry.getScope()); if (aclSpecEntry.getType() == MASK) { providedMask.put(aclSpecEntry.getScope(), aclSpecEntry); maskDirty.add(aclSpecEntry.getScope()); } else { aclBuilder.add(aclSpecEntry); } } // Copy existing entries if the scope was not replaced. for (AclEntry existingEntry: existingAcl) { if (!scopeDirty.contains(existingEntry.getScope())) { if (existingEntry.getType() == MASK) { providedMask.put(existingEntry.getScope(), existingEntry); } else { aclBuilder.add(existingEntry); } } } copyDefaultsIfNeeded(aclBuilder); calculateMasks(aclBuilder, providedMask, maskDirty, scopeDirty); return buildAndValidateAcl(aclBuilder); } /** * There is no reason to instantiate this class. */ private AclTransformation() { } /** * Comparator that enforces required ordering for entries within an ACL: * -owner entry (unnamed user) * -all named user entries (internal ordering undefined) * -owning group entry (unnamed group) * -all named group entries (internal ordering undefined) * -mask entry * -other entry * All access ACL entries sort ahead of all default ACL entries. */ static final Comparator<AclEntry> ACL_ENTRY_COMPARATOR = new Comparator<AclEntry>() { @Override public int compare(AclEntry entry1, AclEntry entry2) { return ComparisonChain.start() .compare(entry1.getScope(), entry2.getScope(), Ordering.explicit(ACCESS, DEFAULT)) .compare(entry1.getType(), entry2.getType(), Ordering.explicit(USER, GROUP, MASK, OTHER)) .compare(entry1.getName(), entry2.getName(), Ordering.natural().nullsFirst()) .result(); } }; /** * Builds the final list of ACL entries to return by trimming, sorting and * validating the ACL entries that have been added. * * @param aclBuilder ArrayList<AclEntry> containing entries to build * @return List<AclEntry> unmodifiable, sorted list of ACL entries * @throws AclException if validation fails */ private static List<AclEntry> buildAndValidateAcl( ArrayList<AclEntry> aclBuilder) throws AclException { aclBuilder.trimToSize(); Collections.sort(aclBuilder, ACL_ENTRY_COMPARATOR); // Full iteration to check for duplicates and invalid named entries. AclEntry prevEntry = null; for (AclEntry entry: aclBuilder) { if (prevEntry != null && ACL_ENTRY_COMPARATOR.compare(prevEntry, entry) == 0) { throw new AclException( "Invalid ACL: multiple entries with same scope, type and name."); } if (entry.getName() != null && (entry.getType() == MASK || entry.getType() == OTHER)) { throw new AclException( "Invalid ACL: this entry type must not have a name: " + entry + "."); } prevEntry = entry; } ScopedAclEntries scopedEntries = new ScopedAclEntries(aclBuilder); checkMaxEntries(scopedEntries); // Search for the required base access entries. If there is a default ACL, // then do the same check on the default entries. for (AclEntryType type: EnumSet.of(USER, GROUP, OTHER)) { AclEntry accessEntryKey = new AclEntry.Builder().setScope(ACCESS) .setType(type).build(); if (Collections.binarySearch(scopedEntries.getAccessEntries(), accessEntryKey, ACL_ENTRY_COMPARATOR) < 0) { throw new AclException( "Invalid ACL: the user, group and other entries are required."); } if (!scopedEntries.getDefaultEntries().isEmpty()) { AclEntry defaultEntryKey = new AclEntry.Builder().setScope(DEFAULT) .setType(type).build(); if (Collections.binarySearch(scopedEntries.getDefaultEntries(), defaultEntryKey, ACL_ENTRY_COMPARATOR) < 0) { throw new AclException( "Invalid default ACL: the user, group and other entries are required."); } } } return Collections.unmodifiableList(aclBuilder); } // Check the max entries separately on access and default entries // HDFS-7582 private static void checkMaxEntries(ScopedAclEntries scopedEntries) throws AclException { List<AclEntry> accessEntries = scopedEntries.getAccessEntries(); List<AclEntry> defaultEntries = scopedEntries.getDefaultEntries(); if (accessEntries.size() > MAX_ENTRIES) { throw new AclException("Invalid ACL: ACL has " + accessEntries.size() + " access entries, which exceeds maximum of " + MAX_ENTRIES + "."); } if (defaultEntries.size() > MAX_ENTRIES) { throw new AclException("Invalid ACL: ACL has " + defaultEntries.size() + " default entries, which exceeds maximum of " + MAX_ENTRIES + "."); } } /** * Calculates mask entries required for the ACL. Mask calculation is performed * separately for each scope: access and default. This method is responsible * for handling the following cases of mask calculation: * 1. Throws an exception if the caller attempts to remove the mask entry of an * existing ACL that requires it. If the ACL has any named entries, then a * mask entry is required. * 2. If the caller supplied a mask in the ACL spec, use it. * 3. If the caller did not supply a mask, but there are ACL entry changes in * this scope, then automatically calculate a new mask. The permissions of * the new mask are the union of the permissions on the group entry and all * named entries. * * @param aclBuilder ArrayList<AclEntry> containing entries to build * @param providedMask EnumMap<AclEntryScope, AclEntry> mapping each scope to * the mask entry that was provided for that scope (if provided) * @param maskDirty EnumSet<AclEntryScope> which contains a scope if the mask * entry is dirty (added or deleted) in that scope * @param scopeDirty EnumSet<AclEntryScope> which contains a scope if any entry * is dirty (added or deleted) in that scope * @throws AclException if validation fails */ private static void calculateMasks(List<AclEntry> aclBuilder, EnumMap<AclEntryScope, AclEntry> providedMask, EnumSet<AclEntryScope> maskDirty, EnumSet<AclEntryScope> scopeDirty) throws AclException { EnumSet<AclEntryScope> scopeFound = EnumSet.noneOf(AclEntryScope.class); EnumMap<AclEntryScope, FsAction> unionPerms = Maps.newEnumMap(AclEntryScope.class); EnumSet<AclEntryScope> maskNeeded = EnumSet.noneOf(AclEntryScope.class); // Determine which scopes are present, which scopes need a mask, and the // union of group class permissions in each scope. for (AclEntry entry: aclBuilder) { scopeFound.add(entry.getScope()); if (entry.getType() == GROUP || entry.getName() != null) { FsAction scopeUnionPerms = Objects.firstNonNull( unionPerms.get(entry.getScope()), FsAction.NONE); unionPerms.put(entry.getScope(), scopeUnionPerms.or(entry.getPermission())); } if (entry.getName() != null) { maskNeeded.add(entry.getScope()); } } // Add mask entry if needed in each scope. for (AclEntryScope scope: scopeFound) { if (!providedMask.containsKey(scope) && maskNeeded.contains(scope) && maskDirty.contains(scope)) { // Caller explicitly removed mask entry, but it's required. throw new AclException( "Invalid ACL: mask is required and cannot be deleted."); } else if (providedMask.containsKey(scope) && (!scopeDirty.contains(scope) || maskDirty.contains(scope))) { // Caller explicitly provided new mask, or we are preserving the existing // mask in an unchanged scope. aclBuilder.add(providedMask.get(scope)); } else if (maskNeeded.contains(scope) || providedMask.containsKey(scope)) { // Otherwise, if there are maskable entries present, or the ACL // previously had a mask, then recalculate a mask automatically. aclBuilder.add(new AclEntry.Builder() .setScope(scope) .setType(MASK) .setPermission(unionPerms.get(scope)) .build()); } } } /** * Adds unspecified default entries by copying permissions from the * corresponding access entries. * * @param aclBuilder ArrayList<AclEntry> containing entries to build */ private static void copyDefaultsIfNeeded(List<AclEntry> aclBuilder) { Collections.sort(aclBuilder, ACL_ENTRY_COMPARATOR); ScopedAclEntries scopedEntries = new ScopedAclEntries(aclBuilder); if (!scopedEntries.getDefaultEntries().isEmpty()) { List<AclEntry> accessEntries = scopedEntries.getAccessEntries(); List<AclEntry> defaultEntries = scopedEntries.getDefaultEntries(); List<AclEntry> copiedEntries = Lists.newArrayListWithCapacity(3); for (AclEntryType type: EnumSet.of(USER, GROUP, OTHER)) { AclEntry defaultEntryKey = new AclEntry.Builder().setScope(DEFAULT) .setType(type).build(); int defaultEntryIndex = Collections.binarySearch(defaultEntries, defaultEntryKey, ACL_ENTRY_COMPARATOR); if (defaultEntryIndex < 0) { AclEntry accessEntryKey = new AclEntry.Builder().setScope(ACCESS) .setType(type).build(); int accessEntryIndex = Collections.binarySearch(accessEntries, accessEntryKey, ACL_ENTRY_COMPARATOR); if (accessEntryIndex >= 0) { copiedEntries.add(new AclEntry.Builder() .setScope(DEFAULT) .setType(type) .setPermission(accessEntries.get(accessEntryIndex).getPermission()) .build()); } } } // Add all copied entries when done to prevent potential issues with binary // search on a modified aclBulider during the main loop. aclBuilder.addAll(copiedEntries); } } /** * An ACL spec that has been pre-validated and sorted. */ private static final class ValidatedAclSpec implements Iterable<AclEntry> { private final List<AclEntry> aclSpec; /** * Creates a ValidatedAclSpec by pre-validating and sorting the given ACL * entries. Pre-validation checks that it does not exceed the maximum * entries. This check is performed before modifying the ACL, and it's * actually insufficient for enforcing the maximum number of entries. * Transformation logic can create additional entries automatically,such as * the mask and some of the default entries, so we also need additional * checks during transformation. The up-front check is still valuable here * so that we don't run a lot of expensive transformation logic while * holding the namesystem lock for an attacker who intentionally sent a huge * ACL spec. * * @param aclSpec List<AclEntry> containing unvalidated input ACL spec * @throws AclException if validation fails */ public ValidatedAclSpec(List<AclEntry> aclSpec) throws AclException { Collections.sort(aclSpec, ACL_ENTRY_COMPARATOR); checkMaxEntries(new ScopedAclEntries(aclSpec)); this.aclSpec = aclSpec; } /** * Returns true if this contains an entry matching the given key. An ACL * entry's key consists of scope, type and name (but not permission). * * @param key AclEntry search key * @return boolean true if found */ public boolean containsKey(AclEntry key) { return Collections.binarySearch(aclSpec, key, ACL_ENTRY_COMPARATOR) >= 0; } /** * Returns the entry matching the given key or null if not found. An ACL * entry's key consists of scope, type and name (but not permission). * * @param key AclEntry search key * @return AclEntry entry matching the given key or null if not found */ public AclEntry findByKey(AclEntry key) { int index = Collections.binarySearch(aclSpec, key, ACL_ENTRY_COMPARATOR); if (index >= 0) { return aclSpec.get(index); } return null; } @Override public Iterator<AclEntry> iterator() { return aclSpec.iterator(); } } }
21,702
42.492986
84
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNodeInfoMXBean.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.namenode; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * JMX information of the secondary NameNode */ @InterfaceAudience.Private @InterfaceStability.Evolving public interface SecondaryNameNodeInfoMXBean extends VersionInfoMXBean { /** * Gets the host and port colon separated. */ public String getHostAndPort(); /** * @return the timestamp of when the SNN starts */ public long getStartTime(); /** * @return the timestamp of the last checkpoint */ public long getLastCheckpointTime(); /** * @return the number of msec since the last checkpoint, or -1 if no * checkpoint has been done yet. */ public long getLastCheckpointDeltaMs(); /** * @return the directories that store the checkpoint images */ public String[] getCheckpointDirectories(); /** * @return the directories that store the edit logs */ public String[] getCheckpointEditlogDirectories(); }
1,847
30.322034
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeStatusMXBean.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.namenode; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * This is the JMX management interface for NameNode status information */ @InterfaceAudience.Public @InterfaceStability.Evolving public interface NameNodeStatusMXBean { /** * Gets the NameNode role. * * @return the NameNode role. */ public String getNNRole(); /** * Gets the NameNode state. * * @return the NameNode state. */ public String getState(); /** * Gets the host and port colon separated. * * @return host and port colon separated. */ public String getHostAndPort(); /** * Gets if security is enabled. * * @return true, if security is enabled. */ public boolean isSecurityEnabled(); /** * Gets the most recent HA transition time in milliseconds from the epoch. * * @return the most recent HA transition time in milliseconds from the epoch. */ public long getLastHATransitionTime(); }
1,860
27.630769
79
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SaveNamespaceContext.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.namenode; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.concurrent.CountDownLatch; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.util.Canceler; import com.google.common.base.Preconditions; /** * Context for an ongoing SaveNamespace operation. This class * allows cancellation, and also is responsible for accumulating * failed storage directories. */ @InterfaceAudience.Private public class SaveNamespaceContext { private final FSNamesystem sourceNamesystem; private final long txid; private final List<StorageDirectory> errorSDs = Collections.synchronizedList(new ArrayList<StorageDirectory>()); private final Canceler canceller; private final CountDownLatch completionLatch = new CountDownLatch(1); SaveNamespaceContext( FSNamesystem sourceNamesystem, long txid, Canceler canceller) { this.sourceNamesystem = sourceNamesystem; this.txid = txid; this.canceller = canceller; } FSNamesystem getSourceNamesystem() { return sourceNamesystem; } long getTxId() { return txid; } void reportErrorOnStorageDirectory(StorageDirectory sd) { errorSDs.add(sd); } List<StorageDirectory> getErrorSDs() { return errorSDs; } void markComplete() { Preconditions.checkState(completionLatch.getCount() == 1, "Context already completed!"); completionLatch.countDown(); } public void checkCancelled() throws SaveNamespaceCancelledException { if (canceller.isCancelled()) { throw new SaveNamespaceCancelledException( canceller.getCancellationReason()); } } }
2,593
29.880952
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.namenode; import java.io.PrintStream; import java.io.PrintWriter; import java.io.StringWriter; import java.util.List; import java.util.Map; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Maps; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.server.namenode.INodeReference.DstReference; import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithName; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.hdfs.util.Diff; import org.apache.hadoop.util.ChunkedArrayList; import org.apache.hadoop.util.StringUtils; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; /** * We keep an in-memory representation of the file/block hierarchy. * This is a base INode class containing common fields for file and * directory inodes. */ @InterfaceAudience.Private public abstract class INode implements INodeAttributes, Diff.Element<byte[]> { public static final Log LOG = LogFactory.getLog(INode.class); /** parent is either an {@link INodeDirectory} or an {@link INodeReference}.*/ private INode parent = null; INode(INode parent) { this.parent = parent; } /** Get inode id */ public abstract long getId(); /** * Check whether this is the root inode. */ final boolean isRoot() { return getLocalNameBytes().length == 0; } /** Get the {@link PermissionStatus} */ abstract PermissionStatus getPermissionStatus(int snapshotId); /** The same as getPermissionStatus(null). */ final PermissionStatus getPermissionStatus() { return getPermissionStatus(Snapshot.CURRENT_STATE_ID); } /** * @param snapshotId * if it is not {@link Snapshot#CURRENT_STATE_ID}, get the result * from the given snapshot; otherwise, get the result from the * current inode. * @return user name */ abstract String getUserName(int snapshotId); /** The same as getUserName(Snapshot.CURRENT_STATE_ID). */ @Override public final String getUserName() { return getUserName(Snapshot.CURRENT_STATE_ID); } /** Set user */ abstract void setUser(String user); /** Set user */ final INode setUser(String user, int latestSnapshotId) { recordModification(latestSnapshotId); setUser(user); return this; } /** * @param snapshotId * if it is not {@link Snapshot#CURRENT_STATE_ID}, get the result * from the given snapshot; otherwise, get the result from the * current inode. * @return group name */ abstract String getGroupName(int snapshotId); /** The same as getGroupName(Snapshot.CURRENT_STATE_ID). */ @Override public final String getGroupName() { return getGroupName(Snapshot.CURRENT_STATE_ID); } /** Set group */ abstract void setGroup(String group); /** Set group */ final INode setGroup(String group, int latestSnapshotId) { recordModification(latestSnapshotId); setGroup(group); return this; } /** * @param snapshotId * if it is not {@link Snapshot#CURRENT_STATE_ID}, get the result * from the given snapshot; otherwise, get the result from the * current inode. * @return permission. */ abstract FsPermission getFsPermission(int snapshotId); /** The same as getFsPermission(Snapshot.CURRENT_STATE_ID). */ @Override public final FsPermission getFsPermission() { return getFsPermission(Snapshot.CURRENT_STATE_ID); } /** Set the {@link FsPermission} of this {@link INode} */ abstract void setPermission(FsPermission permission); /** Set the {@link FsPermission} of this {@link INode} */ INode setPermission(FsPermission permission, int latestSnapshotId) { recordModification(latestSnapshotId); setPermission(permission); return this; } abstract AclFeature getAclFeature(int snapshotId); @Override public final AclFeature getAclFeature() { return getAclFeature(Snapshot.CURRENT_STATE_ID); } abstract void addAclFeature(AclFeature aclFeature); final INode addAclFeature(AclFeature aclFeature, int latestSnapshotId) { recordModification(latestSnapshotId); addAclFeature(aclFeature); return this; } abstract void removeAclFeature(); final INode removeAclFeature(int latestSnapshotId) { recordModification(latestSnapshotId); removeAclFeature(); return this; } /** * @param snapshotId * if it is not {@link Snapshot#CURRENT_STATE_ID}, get the result * from the given snapshot; otherwise, get the result from the * current inode. * @return XAttrFeature */ abstract XAttrFeature getXAttrFeature(int snapshotId); @Override public final XAttrFeature getXAttrFeature() { return getXAttrFeature(Snapshot.CURRENT_STATE_ID); } /** * Set <code>XAttrFeature</code> */ abstract void addXAttrFeature(XAttrFeature xAttrFeature); final INode addXAttrFeature(XAttrFeature xAttrFeature, int latestSnapshotId) { recordModification(latestSnapshotId); addXAttrFeature(xAttrFeature); return this; } /** * Remove <code>XAttrFeature</code> */ abstract void removeXAttrFeature(); final INode removeXAttrFeature(int lastestSnapshotId) { recordModification(lastestSnapshotId); removeXAttrFeature(); return this; } /** * @return if the given snapshot id is {@link Snapshot#CURRENT_STATE_ID}, * return this; otherwise return the corresponding snapshot inode. */ public INodeAttributes getSnapshotINode(final int snapshotId) { return this; } /** Is this inode in the latest snapshot? */ public final boolean isInLatestSnapshot(final int latestSnapshotId) { if (latestSnapshotId == Snapshot.CURRENT_STATE_ID || latestSnapshotId == Snapshot.NO_SNAPSHOT_ID) { return false; } // if parent is a reference node, parent must be a renamed node. We can // stop the check at the reference node. if (parent != null && parent.isReference()) { return true; } final INodeDirectory parentDir = getParent(); if (parentDir == null) { // root return true; } if (!parentDir.isInLatestSnapshot(latestSnapshotId)) { return false; } final INode child = parentDir.getChild(getLocalNameBytes(), latestSnapshotId); if (this == child) { return true; } return child != null && child.isReference() && this == child.asReference().getReferredINode(); } /** @return true if the given inode is an ancestor directory of this inode. */ public final boolean isAncestorDirectory(final INodeDirectory dir) { for(INodeDirectory p = getParent(); p != null; p = p.getParent()) { if (p == dir) { return true; } } return false; } /** * When {@link #recordModification} is called on a referred node, * this method tells which snapshot the modification should be * associated with: the snapshot that belongs to the SRC tree of the rename * operation, or the snapshot belonging to the DST tree. * * @param latestInDst * id of the latest snapshot in the DST tree above the reference node * @return True: the modification should be recorded in the snapshot that * belongs to the SRC tree. False: the modification should be * recorded in the snapshot that belongs to the DST tree. */ public final boolean shouldRecordInSrcSnapshot(final int latestInDst) { Preconditions.checkState(!isReference()); if (latestInDst == Snapshot.CURRENT_STATE_ID) { return true; } INodeReference withCount = getParentReference(); if (withCount != null) { int dstSnapshotId = withCount.getParentReference().getDstSnapshotId(); if (dstSnapshotId != Snapshot.CURRENT_STATE_ID && dstSnapshotId >= latestInDst) { return true; } } return false; } /** * This inode is being modified. The previous version of the inode needs to * be recorded in the latest snapshot. * * @param latestSnapshotId The id of the latest snapshot that has been taken. * Note that it is {@link Snapshot#CURRENT_STATE_ID} * if no snapshots have been taken. */ abstract void recordModification(final int latestSnapshotId); /** Check whether it's a reference. */ public boolean isReference() { return false; } /** Cast this inode to an {@link INodeReference}. */ public INodeReference asReference() { throw new IllegalStateException("Current inode is not a reference: " + this.toDetailString()); } /** * Check whether it's a file. */ public boolean isFile() { return false; } /** Cast this inode to an {@link INodeFile}. */ public INodeFile asFile() { throw new IllegalStateException("Current inode is not a file: " + this.toDetailString()); } /** * Check whether it's a directory */ public boolean isDirectory() { return false; } /** Cast this inode to an {@link INodeDirectory}. */ public INodeDirectory asDirectory() { throw new IllegalStateException("Current inode is not a directory: " + this.toDetailString()); } /** * Check whether it's a symlink */ public boolean isSymlink() { return false; } /** Cast this inode to an {@link INodeSymlink}. */ public INodeSymlink asSymlink() { throw new IllegalStateException("Current inode is not a symlink: " + this.toDetailString()); } /** * Clean the subtree under this inode and collect the blocks from the descents * for further block deletion/update. The current inode can either resides in * the current tree or be stored as a snapshot copy. * * <pre> * In general, we have the following rules. * 1. When deleting a file/directory in the current tree, we have different * actions according to the type of the node to delete. * * 1.1 The current inode (this) is an {@link INodeFile}. * 1.1.1 If {@code prior} is null, there is no snapshot taken on ancestors * before. Thus we simply destroy (i.e., to delete completely, no need to save * snapshot copy) the current INode and collect its blocks for further * cleansing. * 1.1.2 Else do nothing since the current INode will be stored as a snapshot * copy. * * 1.2 The current inode is an {@link INodeDirectory}. * 1.2.1 If {@code prior} is null, there is no snapshot taken on ancestors * before. Similarly, we destroy the whole subtree and collect blocks. * 1.2.2 Else do nothing with the current INode. Recursively clean its * children. * * 1.3 The current inode is a file with snapshot. * Call recordModification(..) to capture the current states. * Mark the INode as deleted. * * 1.4 The current inode is an {@link INodeDirectory} with snapshot feature. * Call recordModification(..) to capture the current states. * Destroy files/directories created after the latest snapshot * (i.e., the inodes stored in the created list of the latest snapshot). * Recursively clean remaining children. * * 2. When deleting a snapshot. * 2.1 To clean {@link INodeFile}: do nothing. * 2.2 To clean {@link INodeDirectory}: recursively clean its children. * 2.3 To clean INodeFile with snapshot: delete the corresponding snapshot in * its diff list. * 2.4 To clean {@link INodeDirectory} with snapshot: delete the corresponding * snapshot in its diff list. Recursively clean its children. * </pre> * * @param reclaimContext * Record blocks and inodes that need to be reclaimed. * @param snapshotId * The id of the snapshot to delete. * {@link Snapshot#CURRENT_STATE_ID} means to delete the current * file/directory. * @param priorSnapshotId * The id of the latest snapshot before the to-be-deleted snapshot. * When deleting a current inode, this parameter captures the latest * snapshot. */ public abstract void cleanSubtree(ReclaimContext reclaimContext, final int snapshotId, int priorSnapshotId); /** * Destroy self and clear everything! If the INode is a file, this method * collects its blocks for further block deletion. If the INode is a * directory, the method goes down the subtree and collects blocks from the * descents, and clears its parent/children references as well. The method * also clears the diff list if the INode contains snapshot diff list. * * @param reclaimContext * Record blocks and inodes that need to be reclaimed. */ public abstract void destroyAndCollectBlocks(ReclaimContext reclaimContext); /** Compute {@link ContentSummary}. Blocking call */ public final ContentSummary computeContentSummary(BlockStoragePolicySuite bsps) { return computeAndConvertContentSummary( new ContentSummaryComputationContext(bsps)); } /** * Compute {@link ContentSummary}. */ public final ContentSummary computeAndConvertContentSummary( ContentSummaryComputationContext summary) { ContentCounts counts = computeContentSummary(summary).getCounts(); final QuotaCounts q = getQuotaCounts(); return new ContentSummary.Builder(). length(counts.getLength()). fileCount(counts.getFileCount() + counts.getSymlinkCount()). directoryCount(counts.getDirectoryCount()). quota(q.getNameSpace()). spaceConsumed(counts.getStoragespace()). spaceQuota(q.getStorageSpace()). typeConsumed(counts.getTypeSpaces()). typeQuota(q.getTypeSpaces().asArray()). build(); } /** * Count subtree content summary with a {@link ContentCounts}. * * @param summary the context object holding counts for the subtree. * @return The same objects as summary. */ public abstract ContentSummaryComputationContext computeContentSummary( ContentSummaryComputationContext summary); /** * Check and add namespace/storagespace/storagetype consumed to itself and the ancestors. * @throws QuotaExceededException if quote is violated. */ public void addSpaceConsumed(QuotaCounts counts, boolean verify) throws QuotaExceededException { addSpaceConsumed2Parent(counts, verify); } /** * Check and add namespace/storagespace/storagetype consumed to itself and the ancestors. * @throws QuotaExceededException if quote is violated. */ void addSpaceConsumed2Parent(QuotaCounts counts, boolean verify) throws QuotaExceededException { if (parent != null) { parent.addSpaceConsumed(counts, verify); } } /** * Get the quota set for this inode * @return the quota counts. The count is -1 if it is not set. */ public QuotaCounts getQuotaCounts() { return new QuotaCounts.Builder(). nameSpace(HdfsConstants.QUOTA_RESET). storageSpace(HdfsConstants.QUOTA_RESET). typeSpaces(HdfsConstants.QUOTA_RESET). build(); } public final boolean isQuotaSet() { final QuotaCounts qc = getQuotaCounts(); return qc.anyNsSsCountGreaterOrEqual(0) || qc.anyTypeSpaceCountGreaterOrEqual(0); } /** * Count subtree {@link Quota#NAMESPACE} and {@link Quota#STORAGESPACE} usages. * Entry point for FSDirectory where blockStoragePolicyId is given its initial * value. */ public final QuotaCounts computeQuotaUsage(BlockStoragePolicySuite bsps) { final byte storagePolicyId = isSymlink() ? HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED : getStoragePolicyID(); return computeQuotaUsage(bsps, storagePolicyId, true, Snapshot.CURRENT_STATE_ID); } /** * Count subtree {@link Quota#NAMESPACE} and {@link Quota#STORAGESPACE} usages. * * With the existence of {@link INodeReference}, the same inode and its * subtree may be referred by multiple {@link WithName} nodes and a * {@link DstReference} node. To avoid circles while quota usage computation, * we have the following rules: * * <pre> * 1. For a {@link DstReference} node, since the node must be in the current * tree (or has been deleted as the end point of a series of rename * operations), we compute the quota usage of the referred node (and its * subtree) in the regular manner, i.e., including every inode in the current * tree and in snapshot copies, as well as the size of diff list. * * 2. For a {@link WithName} node, since the node must be in a snapshot, we * only count the quota usage for those nodes that still existed at the * creation time of the snapshot associated with the {@link WithName} node. * We do not count in the size of the diff list. * <pre> * * @param bsps Block storage policy suite to calculate intended storage type usage * @param blockStoragePolicyId block storage policy id of the current INode * @param useCache Whether to use cached quota usage. Note that * {@link WithName} node never uses cache for its subtree. * @param lastSnapshotId {@link Snapshot#CURRENT_STATE_ID} indicates the * computation is in the current tree. Otherwise the id * indicates the computation range for a * {@link WithName} node. * @return The subtree quota counts. */ public abstract QuotaCounts computeQuotaUsage(BlockStoragePolicySuite bsps, byte blockStoragePolicyId, boolean useCache, int lastSnapshotId); public final QuotaCounts computeQuotaUsage(BlockStoragePolicySuite bsps, boolean useCache) { final byte storagePolicyId = isSymlink() ? HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED : getStoragePolicyID(); return computeQuotaUsage(bsps, storagePolicyId, useCache, Snapshot.CURRENT_STATE_ID); } /** * @return null if the local name is null; otherwise, return the local name. */ public final String getLocalName() { final byte[] name = getLocalNameBytes(); return name == null? null: DFSUtil.bytes2String(name); } @Override public final byte[] getKey() { return getLocalNameBytes(); } /** * Set local file name */ public abstract void setLocalName(byte[] name); public String getFullPathName() { // Get the full path name of this inode. return FSDirectory.getFullPathName(this); } @Override public String toString() { return getLocalName(); } @VisibleForTesting public final String getObjectString() { return getClass().getSimpleName() + "@" + Integer.toHexString(super.hashCode()); } /** @return a string description of the parent. */ @VisibleForTesting public final String getParentString() { final INodeReference parentRef = getParentReference(); if (parentRef != null) { return "parentRef=" + parentRef.getLocalName() + "->"; } else { final INodeDirectory parentDir = getParent(); if (parentDir != null) { return "parentDir=" + parentDir.getLocalName() + "/"; } else { return "parent=null"; } } } @VisibleForTesting public String toDetailString() { return toString() + "(" + getObjectString() + "), " + getParentString(); } /** @return the parent directory */ public final INodeDirectory getParent() { return parent == null? null : parent.isReference()? getParentReference().getParent(): parent.asDirectory(); } /** * @return the parent as a reference if this is a referred inode; * otherwise, return null. */ public INodeReference getParentReference() { return parent == null || !parent.isReference()? null: (INodeReference)parent; } /** Set parent directory */ public final void setParent(INodeDirectory parent) { this.parent = parent; } /** Set container. */ public final void setParentReference(INodeReference parent) { this.parent = parent; } /** Clear references to other objects. */ public void clear() { setParent(null); } /** * @param snapshotId * if it is not {@link Snapshot#CURRENT_STATE_ID}, get the result * from the given snapshot; otherwise, get the result from the * current inode. * @return modification time. */ abstract long getModificationTime(int snapshotId); /** The same as getModificationTime(Snapshot.CURRENT_STATE_ID). */ @Override public final long getModificationTime() { return getModificationTime(Snapshot.CURRENT_STATE_ID); } /** Update modification time if it is larger than the current value. */ public abstract INode updateModificationTime(long mtime, int latestSnapshotId); /** Set the last modification time of inode. */ public abstract void setModificationTime(long modificationTime); /** Set the last modification time of inode. */ public final INode setModificationTime(long modificationTime, int latestSnapshotId) { recordModification(latestSnapshotId); setModificationTime(modificationTime); return this; } /** * @param snapshotId * if it is not {@link Snapshot#CURRENT_STATE_ID}, get the result * from the given snapshot; otherwise, get the result from the * current inode. * @return access time */ abstract long getAccessTime(int snapshotId); /** The same as getAccessTime(Snapshot.CURRENT_STATE_ID). */ @Override public final long getAccessTime() { return getAccessTime(Snapshot.CURRENT_STATE_ID); } /** * Set last access time of inode. */ public abstract void setAccessTime(long accessTime); /** * Set last access time of inode. */ public final INode setAccessTime(long accessTime, int latestSnapshotId) { recordModification(latestSnapshotId); setAccessTime(accessTime); return this; } /** * @return the latest block storage policy id of the INode. Specifically, * if a storage policy is directly specified on the INode then return the ID * of that policy. Otherwise follow the latest parental path and return the * ID of the first specified storage policy. */ public abstract byte getStoragePolicyID(); /** * @return the storage policy directly specified on the INode. Return * {@link HdfsConstants#BLOCK_STORAGE_POLICY_ID_UNSPECIFIED} if no policy has * been specified. */ public abstract byte getLocalStoragePolicyID(); /** * Get the storage policy ID while computing quota usage * @param parentStoragePolicyId the storage policy ID of the parent directory * @return the storage policy ID of this INode. Note that for an * {@link INodeSymlink} we return {@link HdfsConstants#BLOCK_STORAGE_POLICY_ID_UNSPECIFIED} * instead of throwing Exception */ public byte getStoragePolicyIDForQuota(byte parentStoragePolicyId) { byte localId = isSymlink() ? HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED : getLocalStoragePolicyID(); return localId != HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED ? localId : parentStoragePolicyId; } /** * Breaks {@code path} into components. * @return array of byte arrays each of which represents * a single path component. */ @VisibleForTesting public static byte[][] getPathComponents(String path) { return getPathComponents(getPathNames(path)); } /** Convert strings to byte arrays for path components. */ static byte[][] getPathComponents(String[] strings) { if (strings.length == 0) { return new byte[][]{null}; } byte[][] bytes = new byte[strings.length][]; for (int i = 0; i < strings.length; i++) bytes[i] = DFSUtil.string2Bytes(strings[i]); return bytes; } /** * Splits an absolute {@code path} into an array of path components. * @throws AssertionError if the given path is invalid. * @return array of path components. */ public static String[] getPathNames(String path) { if (path == null || !path.startsWith(Path.SEPARATOR)) { throw new AssertionError("Absolute path required"); } return StringUtils.split(path, Path.SEPARATOR_CHAR); } @Override public final int compareTo(byte[] bytes) { return DFSUtilClient.compareBytes(getLocalNameBytes(), bytes); } @Override public final boolean equals(Object that) { if (this == that) { return true; } if (that == null || !(that instanceof INode)) { return false; } return getId() == ((INode) that).getId(); } @Override public final int hashCode() { long id = getId(); return (int)(id^(id>>>32)); } /** * Dump the subtree starting from this inode. * @return a text representation of the tree. */ @VisibleForTesting public final StringBuffer dumpTreeRecursively() { final StringWriter out = new StringWriter(); dumpTreeRecursively(new PrintWriter(out, true), new StringBuilder(), Snapshot.CURRENT_STATE_ID); return out.getBuffer(); } @VisibleForTesting public final void dumpTreeRecursively(PrintStream out) { out.println(dumpTreeRecursively().toString()); } /** * Dump tree recursively. * @param prefix The prefix string that each line should print. */ @VisibleForTesting public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix, int snapshotId) { out.print(prefix); out.print(" "); final String name = getLocalName(); out.print(name.isEmpty()? "/": name); out.print(" ("); out.print(getObjectString()); out.print("), "); out.print(getParentString()); out.print(", " + getPermissionStatus(snapshotId)); } /** * Information used to record quota usage delta. This data structure is * usually passed along with an operation like {@link #cleanSubtree}. Note * that after the operation the delta counts should be decremented from the * ancestral directories' quota usage. */ public static class QuotaDelta { private final QuotaCounts counts; /** * The main usage of this map is to track the quota delta that should be * applied to another path. This usually happens when we reclaim INodes and * blocks while deleting snapshots, and hit an INodeReference. Because the * quota usage for a renamed+snapshotted file/directory is counted in both * the current and historical parents, any change of its quota usage may * need to be propagated along its parent paths both before and after the * rename. */ private final Map<INode, QuotaCounts> updateMap; /** * When deleting a snapshot we may need to update the quota for directories * with quota feature. This map is used to capture these directories and * their quota usage updates. */ private final Map<INodeDirectory, QuotaCounts> quotaDirMap; public QuotaDelta() { counts = new QuotaCounts.Builder().build(); updateMap = Maps.newHashMap(); quotaDirMap = Maps.newHashMap(); } public void add(QuotaCounts update) { counts.add(update); } public void addUpdatePath(INodeReference inode, QuotaCounts update) { QuotaCounts c = updateMap.get(inode); if (c == null) { c = new QuotaCounts.Builder().build(); updateMap.put(inode, c); } c.add(update); } public void addQuotaDirUpdate(INodeDirectory dir, QuotaCounts update) { Preconditions.checkState(dir.isQuotaSet()); QuotaCounts c = quotaDirMap.get(dir); if (c == null) { quotaDirMap.put(dir, update); } else { c.add(update); } } public QuotaCounts getCountsCopy() { final QuotaCounts copy = new QuotaCounts.Builder().build(); copy.add(counts); return copy; } public void setCounts(QuotaCounts c) { this.counts.setNameSpace(c.getNameSpace()); this.counts.setStorageSpace(c.getStorageSpace()); this.counts.setTypeSpaces(c.getTypeSpaces()); } public long getNsDelta() { long nsDelta = counts.getNameSpace(); for (Map.Entry<INode, QuotaCounts> entry : updateMap.entrySet()) { nsDelta += entry.getValue().getNameSpace(); } return nsDelta; } public Map<INode, QuotaCounts> getUpdateMap() { return ImmutableMap.copyOf(updateMap); } public Map<INodeDirectory, QuotaCounts> getQuotaDirMap() { return ImmutableMap.copyOf(quotaDirMap); } } /** * Context object to record blocks and inodes that need to be reclaimed */ public static class ReclaimContext { protected final BlockStoragePolicySuite bsps; protected final BlocksMapUpdateInfo collectedBlocks; protected final List<INode> removedINodes; protected final List<Long> removedUCFiles; /** Used to collect quota usage delta */ private final QuotaDelta quotaDelta; /** * @param bsps * block storage policy suite to calculate intended storage type * usage * @param collectedBlocks * blocks collected from the descents for further block * deletion/update will be added to the given map. * @param removedINodes * INodes collected from the descents for further cleaning up of * @param removedUCFiles * files that the NN need to remove the leases */ public ReclaimContext( BlockStoragePolicySuite bsps, BlocksMapUpdateInfo collectedBlocks, List<INode> removedINodes, List<Long> removedUCFiles) { this.bsps = bsps; this.collectedBlocks = collectedBlocks; this.removedINodes = removedINodes; this.removedUCFiles = removedUCFiles; this.quotaDelta = new QuotaDelta(); } public BlockStoragePolicySuite storagePolicySuite() { return bsps; } public BlocksMapUpdateInfo collectedBlocks() { return collectedBlocks; } public QuotaDelta quotaDelta() { return quotaDelta; } /** * make a copy with the same collectedBlocks, removedINodes, and * removedUCFiles but a new quotaDelta. */ public ReclaimContext getCopy() { return new ReclaimContext(bsps, collectedBlocks, removedINodes, removedUCFiles); } } /** * Information used for updating the blocksMap when deleting files. */ public static class BlocksMapUpdateInfo { /** * The list of blocks that need to be removed from blocksMap */ private final List<BlockInfo> toDeleteList; public BlocksMapUpdateInfo() { toDeleteList = new ChunkedArrayList<>(); } /** * @return The list of blocks that need to be removed from blocksMap */ public List<BlockInfo> getToDeleteList() { return toDeleteList; } /** * Add a to-be-deleted block into the * {@link BlocksMapUpdateInfo#toDeleteList} * @param toDelete the to-be-deleted block */ public void addDeleteBlock(BlockInfo toDelete) { assert toDelete != null : "toDelete is null"; toDeleteList.add(toDelete); } public void removeDeleteBlock(BlockInfo block) { assert block != null : "block is null"; toDeleteList.remove(block); } /** * Clear {@link BlocksMapUpdateInfo#toDeleteList} */ public void clear() { toDeleteList.clear(); } } /** * INode feature such as {@link FileUnderConstructionFeature} * and {@link DirectoryWithQuotaFeature}. */ public interface Feature { } }
32,988
32.121486
93
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNUpgradeUtil.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.namenode; import java.io.File; import java.io.FilenameFilter; import java.io.IOException; import java.nio.file.Files; import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.common.StorageInfo; import com.google.common.base.Preconditions; import org.apache.hadoop.io.IOUtils; public abstract class NNUpgradeUtil { private static final Log LOG = LogFactory.getLog(NNUpgradeUtil.class); /** * Return true if this storage dir can roll back to the previous storage * state, false otherwise. The NN will refuse to run the rollback operation * unless at least one JM or fsimage storage directory can roll back. * * @param storage the storage info for the current state * @param prevStorage the storage info for the previous (unupgraded) state * @param targetLayoutVersion the layout version we intend to roll back to * @return true if this JM can roll back, false otherwise. * @throws IOException in the event of error */ static boolean canRollBack(StorageDirectory sd, StorageInfo storage, StorageInfo prevStorage, int targetLayoutVersion) throws IOException { File prevDir = sd.getPreviousDir(); if (!prevDir.exists()) { // use current directory then LOG.info("Storage directory " + sd.getRoot() + " does not contain previous fs state."); // read and verify consistency with other directories storage.readProperties(sd); return false; } // read and verify consistency of the prev dir prevStorage.readPreviousVersionProperties(sd); if (prevStorage.getLayoutVersion() != targetLayoutVersion) { throw new IOException( "Cannot rollback to storage version " + prevStorage.getLayoutVersion() + " using this version of the NameNode, which uses storage version " + targetLayoutVersion + ". " + "Please use the previous version of HDFS to perform the rollback."); } return true; } /** * Finalize the upgrade. The previous dir, if any, will be renamed and * removed. After this is completed, rollback is no longer allowed. * * @param sd the storage directory to finalize * @throws IOException in the event of error */ static void doFinalize(StorageDirectory sd) throws IOException { File prevDir = sd.getPreviousDir(); if (!prevDir.exists()) { // already discarded LOG.info("Directory " + prevDir + " does not exist."); LOG.info("Finalize upgrade for " + sd.getRoot()+ " is not required."); return; } LOG.info("Finalizing upgrade of storage directory " + sd.getRoot()); Preconditions.checkState(sd.getCurrentDir().exists(), "Current directory must exist."); final File tmpDir = sd.getFinalizedTmp(); // rename previous to tmp and remove NNStorage.rename(prevDir, tmpDir); NNStorage.deleteDir(tmpDir); LOG.info("Finalize upgrade for " + sd.getRoot()+ " is complete."); } /** * Perform any steps that must succeed across all storage dirs/JournalManagers * involved in an upgrade before proceeding onto the actual upgrade stage. If * a call to any JM's or local storage dir's doPreUpgrade method fails, then * doUpgrade will not be called for any JM. The existing current dir is * renamed to previous.tmp, and then a new, empty current dir is created. * * @param conf configuration for creating {@link EditLogFileOutputStream} * @param sd the storage directory to perform the pre-upgrade procedure. * @throws IOException in the event of error */ static void doPreUpgrade(Configuration conf, StorageDirectory sd) throws IOException { LOG.info("Starting upgrade of storage directory " + sd.getRoot()); // rename current to tmp renameCurToTmp(sd); final File curDir = sd.getCurrentDir(); final File tmpDir = sd.getPreviousTmp(); List<String> fileNameList = IOUtils.listDirectory(tmpDir, new FilenameFilter() { @Override public boolean accept(File dir, String name) { return dir.equals(tmpDir) && name.startsWith(NNStorage.NameNodeFile.EDITS.getName()); } }); for (String s : fileNameList) { File prevFile = new File(tmpDir, s); File newFile = new File(curDir, prevFile.getName()); Files.createLink(newFile.toPath(), prevFile.toPath()); } } /** * Rename the existing current dir to previous.tmp, and create a new empty * current dir. */ public static void renameCurToTmp(StorageDirectory sd) throws IOException { File curDir = sd.getCurrentDir(); File prevDir = sd.getPreviousDir(); final File tmpDir = sd.getPreviousTmp(); Preconditions.checkState(curDir.exists(), "Current directory must exist for preupgrade."); Preconditions.checkState(!prevDir.exists(), "Previous directory must not exist for preupgrade."); Preconditions.checkState(!tmpDir.exists(), "Previous.tmp directory must not exist for preupgrade." + "Consider restarting for recovery."); // rename current to tmp NNStorage.rename(curDir, tmpDir); if (!curDir.mkdir()) { throw new IOException("Cannot create directory " + curDir); } } /** * Perform the upgrade of the storage dir to the given storage info. The new * storage info is written into the current directory, and the previous.tmp * directory is renamed to previous. * * @param sd the storage directory to upgrade * @param storage info about the new upgraded versions. * @throws IOException in the event of error */ public static void doUpgrade(StorageDirectory sd, Storage storage) throws IOException { LOG.info("Performing upgrade of storage directory " + sd.getRoot()); try { // Write the version file, since saveFsImage only makes the // fsimage_<txid>, and the directory is otherwise empty. storage.writeProperties(sd); File prevDir = sd.getPreviousDir(); File tmpDir = sd.getPreviousTmp(); Preconditions.checkState(!prevDir.exists(), "previous directory must not exist for upgrade."); Preconditions.checkState(tmpDir.exists(), "previous.tmp directory must exist for upgrade."); // rename tmp to previous NNStorage.rename(tmpDir, prevDir); } catch (IOException ioe) { LOG.error("Unable to rename temp to previous for " + sd.getRoot(), ioe); throw ioe; } } /** * Perform rollback of the storage dir to the previous state. The existing * current dir is removed, and the previous dir is renamed to current. * * @param sd the storage directory to roll back. * @throws IOException in the event of error */ static void doRollBack(StorageDirectory sd) throws IOException { File prevDir = sd.getPreviousDir(); if (!prevDir.exists()) { return; } File tmpDir = sd.getRemovedTmp(); Preconditions.checkState(!tmpDir.exists(), "removed.tmp directory must not exist for rollback." + "Consider restarting for recovery."); // rename current to tmp File curDir = sd.getCurrentDir(); Preconditions.checkState(curDir.exists(), "Current directory must exist for rollback."); NNStorage.rename(curDir, tmpDir); // rename previous to current NNStorage.rename(prevDir, curDir); // delete tmp dir NNStorage.deleteDir(tmpDir); LOG.info("Rollback of " + sd.getRoot() + " is complete."); } }
8,557
36.867257
84
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.namenode; import java.io.PrintWriter; import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; import java.util.List; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite; import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import com.google.common.base.Preconditions; /** * An anonymous reference to an inode. * * This class and its subclasses are used to support multiple access paths. * A file/directory may have multiple access paths when it is stored in some * snapshots and it is renamed/moved to other locations. * * For example, * (1) Suppose we have /abc/foo, say the inode of foo is inode(id=1000,name=foo) * (2) create snapshot s0 for /abc * (3) mv /abc/foo /xyz/bar, i.e. inode(id=1000,name=...) is renamed from "foo" * to "bar" and its parent becomes /xyz. * * Then, /xyz/bar and /abc/.snapshot/s0/foo are two different access paths to * the same inode, inode(id=1000,name=bar). * * With references, we have the following * - /abc has a child ref(id=1001,name=foo). * - /xyz has a child ref(id=1002) * - Both ref(id=1001,name=foo) and ref(id=1002) point to another reference, * ref(id=1003,count=2). * - Finally, ref(id=1003,count=2) points to inode(id=1000,name=bar). * * Note 1: For a reference without name, e.g. ref(id=1002), it uses the name * of the referred inode. * Note 2: getParent() always returns the parent in the current state, e.g. * inode(id=1000,name=bar).getParent() returns /xyz but not /abc. */ public abstract class INodeReference extends INode { /** * Try to remove the given reference and then return the reference count. * If the given inode is not a reference, return -1; */ public static int tryRemoveReference(INode inode) { if (!inode.isReference()) { return -1; } return removeReference(inode.asReference()); } /** * Remove the given reference and then return the reference count. * If the referred inode is not a WithCount, return -1; */ private static int removeReference(INodeReference ref) { final INode referred = ref.getReferredINode(); if (!(referred instanceof WithCount)) { return -1; } WithCount wc = (WithCount) referred; wc.removeReference(ref); return wc.getReferenceCount(); } /** * When destroying a reference node (WithName or DstReference), we call this * method to identify the snapshot which is the latest snapshot before the * reference node's creation. */ static int getPriorSnapshot(INodeReference ref) { WithCount wc = (WithCount) ref.getReferredINode(); WithName wn = null; if (ref instanceof DstReference) { wn = wc.getLastWithName(); } else if (ref instanceof WithName) { wn = wc.getPriorWithName((WithName) ref); } if (wn != null) { INode referred = wc.getReferredINode(); if (referred.isFile() && referred.asFile().isWithSnapshot()) { return referred.asFile().getDiffs().getPrior(wn.lastSnapshotId); } else if (referred.isDirectory()) { DirectoryWithSnapshotFeature sf = referred.asDirectory() .getDirectoryWithSnapshotFeature(); if (sf != null) { return sf.getDiffs().getPrior(wn.lastSnapshotId); } } } return Snapshot.NO_SNAPSHOT_ID; } private INode referred; public INodeReference(INode parent, INode referred) { super(parent); this.referred = referred; } public final INode getReferredINode() { return referred; } public final void setReferredINode(INode referred) { this.referred = referred; } @Override public final boolean isReference() { return true; } @Override public final INodeReference asReference() { return this; } @Override public final boolean isFile() { return referred.isFile(); } @Override public final INodeFile asFile() { return referred.asFile(); } @Override public final boolean isDirectory() { return referred.isDirectory(); } @Override public final INodeDirectory asDirectory() { return referred.asDirectory(); } @Override public final boolean isSymlink() { return referred.isSymlink(); } @Override public final INodeSymlink asSymlink() { return referred.asSymlink(); } @Override public byte[] getLocalNameBytes() { return referred.getLocalNameBytes(); } @Override public void setLocalName(byte[] name) { referred.setLocalName(name); } @Override public final long getId() { return referred.getId(); } @Override public final PermissionStatus getPermissionStatus(int snapshotId) { return referred.getPermissionStatus(snapshotId); } @Override public final String getUserName(int snapshotId) { return referred.getUserName(snapshotId); } @Override final void setUser(String user) { referred.setUser(user); } @Override public final String getGroupName(int snapshotId) { return referred.getGroupName(snapshotId); } @Override final void setGroup(String group) { referred.setGroup(group); } @Override public final FsPermission getFsPermission(int snapshotId) { return referred.getFsPermission(snapshotId); } @Override final AclFeature getAclFeature(int snapshotId) { return referred.getAclFeature(snapshotId); } @Override final void addAclFeature(AclFeature aclFeature) { referred.addAclFeature(aclFeature); } @Override final void removeAclFeature() { referred.removeAclFeature(); } @Override final XAttrFeature getXAttrFeature(int snapshotId) { return referred.getXAttrFeature(snapshotId); } @Override final void addXAttrFeature(XAttrFeature xAttrFeature) { referred.addXAttrFeature(xAttrFeature); } @Override final void removeXAttrFeature() { referred.removeXAttrFeature(); } @Override public final short getFsPermissionShort() { return referred.getFsPermissionShort(); } @Override void setPermission(FsPermission permission) { referred.setPermission(permission); } @Override public long getPermissionLong() { return referred.getPermissionLong(); } @Override public final long getModificationTime(int snapshotId) { return referred.getModificationTime(snapshotId); } @Override public final INode updateModificationTime(long mtime, int latestSnapshotId) { return referred.updateModificationTime(mtime, latestSnapshotId); } @Override public final void setModificationTime(long modificationTime) { referred.setModificationTime(modificationTime); } @Override public final long getAccessTime(int snapshotId) { return referred.getAccessTime(snapshotId); } @Override public final void setAccessTime(long accessTime) { referred.setAccessTime(accessTime); } @Override public final byte getStoragePolicyID() { return referred.getStoragePolicyID(); } @Override public final byte getLocalStoragePolicyID() { return referred.getLocalStoragePolicyID(); } @Override final void recordModification(int latestSnapshotId) { referred.recordModification(latestSnapshotId); } @Override // used by WithCount public void cleanSubtree( ReclaimContext reclaimContext, int snapshot, int prior) { referred.cleanSubtree(reclaimContext, snapshot, prior); } @Override // used by WithCount public void destroyAndCollectBlocks(ReclaimContext reclaimContext) { if (removeReference(this) <= 0) { referred.destroyAndCollectBlocks(reclaimContext); } } @Override public ContentSummaryComputationContext computeContentSummary( ContentSummaryComputationContext summary) { return referred.computeContentSummary(summary); } @Override public QuotaCounts computeQuotaUsage(BlockStoragePolicySuite bsps, byte blockStoragePolicyId, boolean useCache, int lastSnapshotId) { return referred.computeQuotaUsage(bsps, blockStoragePolicyId, useCache, lastSnapshotId); } @Override public final INodeAttributes getSnapshotINode(int snapshotId) { return referred.getSnapshotINode(snapshotId); } @Override public QuotaCounts getQuotaCounts() { return referred.getQuotaCounts(); } @Override public final void clear() { super.clear(); referred = null; } @Override public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix, final int snapshot) { super.dumpTreeRecursively(out, prefix, snapshot); if (this instanceof DstReference) { out.print(", dstSnapshotId=" + ((DstReference) this).dstSnapshotId); } if (this instanceof WithCount) { out.print(", count=" + ((WithCount)this).getReferenceCount()); } out.println(); final StringBuilder b = new StringBuilder(); for(int i = 0; i < prefix.length(); i++) { b.append(' '); } b.append("->"); getReferredINode().dumpTreeRecursively(out, b, snapshot); } public int getDstSnapshotId() { return Snapshot.CURRENT_STATE_ID; } /** An anonymous reference with reference count. */ public static class WithCount extends INodeReference { private final List<WithName> withNameList = new ArrayList<>(); /** * Compare snapshot with IDs, where null indicates the current status thus * is greater than any non-null snapshot. */ public static final Comparator<WithName> WITHNAME_COMPARATOR = new Comparator<WithName>() { @Override public int compare(WithName left, WithName right) { return left.lastSnapshotId - right.lastSnapshotId; } }; public WithCount(INodeReference parent, INode referred) { super(parent, referred); Preconditions.checkArgument(!referred.isReference()); referred.setParentReference(this); } public int getReferenceCount() { int count = withNameList.size(); if (getParentReference() != null) { count++; } return count; } /** Increment and then return the reference count. */ public void addReference(INodeReference ref) { if (ref instanceof WithName) { WithName refWithName = (WithName) ref; int i = Collections.binarySearch(withNameList, refWithName, WITHNAME_COMPARATOR); Preconditions.checkState(i < 0); withNameList.add(-i - 1, refWithName); } else if (ref instanceof DstReference) { setParentReference(ref); } } /** Decrement and then return the reference count. */ public void removeReference(INodeReference ref) { if (ref instanceof WithName) { int i = Collections.binarySearch(withNameList, (WithName) ref, WITHNAME_COMPARATOR); if (i >= 0) { withNameList.remove(i); } } else if (ref == getParentReference()) { setParent(null); } } WithName getLastWithName() { return withNameList.size() > 0 ? withNameList.get(withNameList.size() - 1) : null; } WithName getPriorWithName(WithName post) { int i = Collections.binarySearch(withNameList, post, WITHNAME_COMPARATOR); if (i > 0) { return withNameList.get(i - 1); } else if (i == 0 || i == -1) { return null; } else { return withNameList.get(-i - 2); } } /** * @return the WithName/DstReference node contained in the given snapshot. */ public INodeReference getParentRef(int snapshotId) { int start = 0; int end = withNameList.size() - 1; while (start < end) { int mid = start + (end - start) / 2; int sid = withNameList.get(mid).lastSnapshotId; if (sid == snapshotId) { return withNameList.get(mid); } else if (sid < snapshotId) { start = mid + 1; } else { end = mid; } } if (start < withNameList.size() && withNameList.get(start).lastSnapshotId >= snapshotId) { return withNameList.get(start); } else { return this.getParentReference(); } } } /** A reference with a fixed name. */ public static class WithName extends INodeReference { private final byte[] name; /** * The id of the last snapshot in the src tree when this WithName node was * generated. When calculating the quota usage of the referred node, only * the files/dirs existing when this snapshot was taken will be counted for * this WithName node and propagated along its ancestor path. */ private final int lastSnapshotId; public WithName(INodeDirectory parent, WithCount referred, byte[] name, int lastSnapshotId) { super(parent, referred); this.name = name; this.lastSnapshotId = lastSnapshotId; referred.addReference(this); } @Override public final byte[] getLocalNameBytes() { return name; } @Override public final void setLocalName(byte[] name) { throw new UnsupportedOperationException("Cannot set name: " + getClass() + " is immutable."); } public int getLastSnapshotId() { return lastSnapshotId; } @Override public final ContentSummaryComputationContext computeContentSummary( ContentSummaryComputationContext summary) { // only count storagespace for WithName final QuotaCounts q = computeQuotaUsage( summary.getBlockStoragePolicySuite(), getStoragePolicyID(), false, lastSnapshotId); summary.getCounts().addContent(Content.DISKSPACE, q.getStorageSpace()); summary.getCounts().addTypeSpaces(q.getTypeSpaces()); return summary; } @Override public final QuotaCounts computeQuotaUsage(BlockStoragePolicySuite bsps, byte blockStoragePolicyId, boolean useCache, int lastSnapshotId) { // if this.lastSnapshotId < lastSnapshotId, the rename of the referred // node happened before the rename of its ancestor. This should be // impossible since for WithName node we only count its children at the // time of the rename. Preconditions.checkState(lastSnapshotId == Snapshot.CURRENT_STATE_ID || this.lastSnapshotId >= lastSnapshotId); final INode referred = this.getReferredINode().asReference() .getReferredINode(); // We will continue the quota usage computation using the same snapshot id // as time line (if the given snapshot id is valid). Also, we cannot use // cache for the referred node since its cached quota may have already // been updated by changes in the current tree. int id = lastSnapshotId != Snapshot.CURRENT_STATE_ID ? lastSnapshotId : this.lastSnapshotId; return referred.computeQuotaUsage(bsps, blockStoragePolicyId, false, id); } @Override public void cleanSubtree(ReclaimContext reclaimContext, final int snapshot, int prior) { // since WithName node resides in deleted list acting as a snapshot copy, // the parameter snapshot must be non-null Preconditions.checkArgument(snapshot != Snapshot.CURRENT_STATE_ID); // if prior is NO_SNAPSHOT_ID, we need to check snapshot belonging to the // previous WithName instance if (prior == Snapshot.NO_SNAPSHOT_ID) { prior = getPriorSnapshot(this); } if (prior != Snapshot.NO_SNAPSHOT_ID && Snapshot.ID_INTEGER_COMPARATOR.compare(snapshot, prior) <= 0) { return; } // record the old quota delta QuotaCounts old = reclaimContext.quotaDelta().getCountsCopy(); getReferredINode().cleanSubtree(reclaimContext, snapshot, prior); INodeReference ref = getReferredINode().getParentReference(); if (ref != null) { QuotaCounts current = reclaimContext.quotaDelta().getCountsCopy(); current.subtract(old); // we need to update the quota usage along the parent path from ref reclaimContext.quotaDelta().addUpdatePath(ref, current); } if (snapshot < lastSnapshotId) { // for a WithName node, when we compute its quota usage, we only count // in all the nodes existing at the time of the corresponding rename op. // Thus if we are deleting a snapshot before/at the snapshot associated // with lastSnapshotId, we do not need to update the quota upwards. reclaimContext.quotaDelta().setCounts(old); } } @Override public void destroyAndCollectBlocks(ReclaimContext reclaimContext) { int snapshot = getSelfSnapshot(); reclaimContext.quotaDelta().add(computeQuotaUsage(reclaimContext.bsps)); if (removeReference(this) <= 0) { getReferredINode().destroyAndCollectBlocks(reclaimContext.getCopy()); } else { int prior = getPriorSnapshot(this); INode referred = getReferredINode().asReference().getReferredINode(); if (snapshot != Snapshot.NO_SNAPSHOT_ID) { if (prior != Snapshot.NO_SNAPSHOT_ID && snapshot <= prior) { // the snapshot to be deleted has been deleted while traversing // the src tree of the previous rename operation. This usually // happens when rename's src and dst are under the same // snapshottable directory. E.g., the following operation sequence: // 1. create snapshot s1 on /test // 2. rename /test/foo/bar to /test/foo2/bar // 3. create snapshot s2 on /test // 4. rename foo2 again // 5. delete snapshot s2 return; } ReclaimContext newCtx = reclaimContext.getCopy(); referred.cleanSubtree(newCtx, snapshot, prior); INodeReference ref = getReferredINode().getParentReference(); if (ref != null) { // we need to update the quota usage along the parent path from ref reclaimContext.quotaDelta().addUpdatePath(ref, newCtx.quotaDelta().getCountsCopy()); } } } } private int getSelfSnapshot() { INode referred = getReferredINode().asReference().getReferredINode(); int snapshot = Snapshot.NO_SNAPSHOT_ID; if (referred.isFile() && referred.asFile().isWithSnapshot()) { snapshot = referred.asFile().getDiffs().getPrior(lastSnapshotId); } else if (referred.isDirectory()) { DirectoryWithSnapshotFeature sf = referred.asDirectory() .getDirectoryWithSnapshotFeature(); if (sf != null) { snapshot = sf.getDiffs().getPrior(lastSnapshotId); } } return snapshot; } } public static class DstReference extends INodeReference { /** * Record the latest snapshot of the dst subtree before the rename. For * later operations on the moved/renamed files/directories, if the latest * snapshot is after this dstSnapshot, changes will be recorded to the * latest snapshot. Otherwise changes will be recorded to the snapshot * belonging to the src of the rename. * * {@link Snapshot#NO_SNAPSHOT_ID} means no dstSnapshot (e.g., src of the * first-time rename). */ private final int dstSnapshotId; @Override public final int getDstSnapshotId() { return dstSnapshotId; } public DstReference(INodeDirectory parent, WithCount referred, final int dstSnapshotId) { super(parent, referred); this.dstSnapshotId = dstSnapshotId; referred.addReference(this); } @Override public void cleanSubtree(ReclaimContext reclaimContext, int snapshot, int prior) { if (snapshot == Snapshot.CURRENT_STATE_ID && prior == Snapshot.NO_SNAPSHOT_ID) { destroyAndCollectBlocks(reclaimContext); } else { // if prior is NO_SNAPSHOT_ID, we need to check snapshot belonging to // the previous WithName instance if (prior == Snapshot.NO_SNAPSHOT_ID) { prior = getPriorSnapshot(this); } // if prior is not NO_SNAPSHOT_ID, and prior is not before the // to-be-deleted snapshot, we can quit here and leave the snapshot // deletion work to the src tree of rename if (snapshot != Snapshot.CURRENT_STATE_ID && prior != Snapshot.NO_SNAPSHOT_ID && Snapshot.ID_INTEGER_COMPARATOR.compare(snapshot, prior) <= 0) { return; } getReferredINode().cleanSubtree(reclaimContext, snapshot, prior); } } /** * {@inheritDoc} * <br/> * To destroy a DstReference node, we first remove its link with the * referred node. If the reference number of the referred node is <= 0, we * destroy the subtree of the referred node. Otherwise, we clean the * referred node's subtree and delete everything created after the last * rename operation, i.e., everything outside of the scope of the prior * WithName nodes. * @param reclaimContext */ @Override public void destroyAndCollectBlocks(ReclaimContext reclaimContext) { // since we count everything of the subtree for the quota usage of a // dst reference node, here we should just simply do a quota computation. // then to avoid double counting, we pass a different QuotaDelta to other // calls reclaimContext.quotaDelta().add(computeQuotaUsage(reclaimContext.bsps)); ReclaimContext newCtx = reclaimContext.getCopy(); if (removeReference(this) <= 0) { getReferredINode().destroyAndCollectBlocks(newCtx); } else { // we will clean everything, including files, directories, and // snapshots, that were created after this prior snapshot int prior = getPriorSnapshot(this); // prior must be non-null, otherwise we do not have any previous // WithName nodes, and the reference number will be 0. Preconditions.checkState(prior != Snapshot.NO_SNAPSHOT_ID); // identify the snapshot created after prior int snapshot = getSelfSnapshot(prior); INode referred = getReferredINode().asReference().getReferredINode(); if (referred.isFile()) { // if referred is a file, it must be a file with snapshot since we did // recordModification before the rename INodeFile file = referred.asFile(); Preconditions.checkState(file.isWithSnapshot()); // make sure we mark the file as deleted file.getFileWithSnapshotFeature().deleteCurrentFile(); // when calling cleanSubtree of the referred node, since we // compute quota usage updates before calling this destroy // function, we use true for countDiffChange referred.cleanSubtree(newCtx, snapshot, prior); } else if (referred.isDirectory()) { // similarly, if referred is a directory, it must be an // INodeDirectory with snapshot INodeDirectory dir = referred.asDirectory(); Preconditions.checkState(dir.isWithSnapshot()); DirectoryWithSnapshotFeature.destroyDstSubtree(newCtx, dir, snapshot, prior); } } } private int getSelfSnapshot(final int prior) { WithCount wc = (WithCount) getReferredINode().asReference(); INode referred = wc.getReferredINode(); int lastSnapshot = Snapshot.CURRENT_STATE_ID; if (referred.isFile() && referred.asFile().isWithSnapshot()) { lastSnapshot = referred.asFile().getDiffs().getLastSnapshotId(); } else if (referred.isDirectory()) { DirectoryWithSnapshotFeature sf = referred.asDirectory() .getDirectoryWithSnapshotFeature(); if (sf != null) { lastSnapshot = sf.getLastSnapshotId(); } } if (lastSnapshot != Snapshot.CURRENT_STATE_ID && lastSnapshot != prior) { return lastSnapshot; } else { return Snapshot.CURRENT_STATE_ID; } } } }
25,143
32.614973
84
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.namenode; import java.util.Arrays; import java.util.Collections; import java.util.HashSet; import java.util.Set; import java.util.Stack; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.permission.AclEntryScope; import org.apache.hadoop.fs.permission.AclEntryType; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider.AccessControlEnforcer; import org.apache.hadoop.hdfs.util.ReadOnlyList; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; /** * Class that helps in checking file system permission. * The state of this class need not be synchronized as it has data structures that * are read-only. * * Some of the helper methods are gaurded by {@link FSNamesystem#readLock()}. */ class FSPermissionChecker implements AccessControlEnforcer { static final Log LOG = LogFactory.getLog(UserGroupInformation.class); /** @return a string for throwing {@link AccessControlException} */ private String toAccessControlString(INodeAttributes inodeAttrib, String path, FsAction access, FsPermission mode) { return toAccessControlString(inodeAttrib, path, access, mode, false); } /** @return a string for throwing {@link AccessControlException} */ private String toAccessControlString(INodeAttributes inodeAttrib, String path, FsAction access, FsPermission mode, boolean deniedFromAcl) { StringBuilder sb = new StringBuilder("Permission denied: ") .append("user=").append(getUser()).append(", ") .append("access=").append(access).append(", ") .append("inode=\"").append(path).append("\":") .append(inodeAttrib.getUserName()).append(':') .append(inodeAttrib.getGroupName()).append(':') .append(inodeAttrib.isDirectory() ? 'd' : '-') .append(mode); if (deniedFromAcl) { sb.append("+"); } return sb.toString(); } private final String fsOwner; private final String supergroup; private final UserGroupInformation callerUgi; private final String user; private final Set<String> groups; private final boolean isSuper; private final INodeAttributeProvider attributeProvider; FSPermissionChecker(String fsOwner, String supergroup, UserGroupInformation callerUgi, INodeAttributeProvider attributeProvider) { this.fsOwner = fsOwner; this.supergroup = supergroup; this.callerUgi = callerUgi; HashSet<String> s = new HashSet<String>(Arrays.asList(callerUgi.getGroupNames())); groups = Collections.unmodifiableSet(s); user = callerUgi.getShortUserName(); isSuper = user.equals(fsOwner) || groups.contains(supergroup); this.attributeProvider = attributeProvider; } public boolean containsGroup(String group) { return groups.contains(group); } public String getUser() { return user; } public Set<String> getGroups() { return groups; } public boolean isSuperUser() { return isSuper; } public INodeAttributeProvider getAttributesProvider() { return attributeProvider; } /** * Verify if the caller has the required permission. This will result into * an exception if the caller is not allowed to access the resource. */ public void checkSuperuserPrivilege() throws AccessControlException { if (!isSuperUser()) { throw new AccessControlException("Access denied for user " + getUser() + ". Superuser privilege is required"); } } /** * Check whether current user have permissions to access the path. * Traverse is always checked. * * Parent path means the parent directory for the path. * Ancestor path means the last (the closest) existing ancestor directory * of the path. * Note that if the parent path exists, * then the parent path and the ancestor path are the same. * * For example, suppose the path is "/foo/bar/baz". * No matter baz is a file or a directory, * the parent path is "/foo/bar". * If bar exists, then the ancestor path is also "/foo/bar". * If bar does not exist and foo exists, * then the ancestor path is "/foo". * Further, if both foo and bar do not exist, * then the ancestor path is "/". * * @param doCheckOwner Require user to be the owner of the path? * @param ancestorAccess The access required by the ancestor of the path. * @param parentAccess The access required by the parent of the path. * @param access The access required by the path. * @param subAccess If path is a directory, * it is the access required of the path and all the sub-directories. * If path is not a directory, there is no effect. * @param ignoreEmptyDir Ignore permission checking for empty directory? * @throws AccessControlException * * Guarded by {@link FSNamesystem#readLock()} * Caller of this method must hold that lock. */ void checkPermission(INodesInPath inodesInPath, boolean doCheckOwner, FsAction ancestorAccess, FsAction parentAccess, FsAction access, FsAction subAccess, boolean ignoreEmptyDir) throws AccessControlException { if (LOG.isDebugEnabled()) { LOG.debug("ACCESS CHECK: " + this + ", doCheckOwner=" + doCheckOwner + ", ancestorAccess=" + ancestorAccess + ", parentAccess=" + parentAccess + ", access=" + access + ", subAccess=" + subAccess + ", ignoreEmptyDir=" + ignoreEmptyDir); } // check if (parentAccess != null) && file exists, then check sb // If resolveLink, the check is performed on the link target. final int snapshotId = inodesInPath.getPathSnapshotId(); final INode[] inodes = inodesInPath.getINodesArray(); final INodeAttributes[] inodeAttrs = new INodeAttributes[inodes.length]; final byte[][] pathByNameArr = new byte[inodes.length][]; for (int i = 0; i < inodes.length && inodes[i] != null; i++) { if (inodes[i] != null) { pathByNameArr[i] = inodes[i].getLocalNameBytes(); inodeAttrs[i] = getINodeAttrs(pathByNameArr, i, inodes[i], snapshotId); } } String path = inodesInPath.getPath(); int ancestorIndex = inodes.length - 2; AccessControlEnforcer enforcer = getAttributesProvider().getExternalAccessControlEnforcer(this); enforcer.checkPermission(fsOwner, supergroup, callerUgi, inodeAttrs, inodes, pathByNameArr, snapshotId, path, ancestorIndex, doCheckOwner, ancestorAccess, parentAccess, access, subAccess, ignoreEmptyDir); } @Override public void checkPermission(String fsOwner, String supergroup, UserGroupInformation callerUgi, INodeAttributes[] inodeAttrs, INode[] inodes, byte[][] pathByNameArr, int snapshotId, String path, int ancestorIndex, boolean doCheckOwner, FsAction ancestorAccess, FsAction parentAccess, FsAction access, FsAction subAccess, boolean ignoreEmptyDir) throws AccessControlException { for(; ancestorIndex >= 0 && inodes[ancestorIndex] == null; ancestorIndex--); checkTraverse(inodeAttrs, path, ancestorIndex); final INodeAttributes last = inodeAttrs[inodeAttrs.length - 1]; if (parentAccess != null && parentAccess.implies(FsAction.WRITE) && inodeAttrs.length > 1 && last != null) { checkStickyBit(inodeAttrs[inodeAttrs.length - 2], last); } if (ancestorAccess != null && inodeAttrs.length > 1) { check(inodeAttrs, path, ancestorIndex, ancestorAccess); } if (parentAccess != null && inodeAttrs.length > 1) { check(inodeAttrs, path, inodeAttrs.length - 2, parentAccess); } if (access != null) { check(last, path, access); } if (subAccess != null) { INode rawLast = inodes[inodeAttrs.length - 1]; checkSubAccess(pathByNameArr, inodeAttrs.length - 1, rawLast, snapshotId, subAccess, ignoreEmptyDir); } if (doCheckOwner) { checkOwner(last); } } private INodeAttributes getINodeAttrs(byte[][] pathByNameArr, int pathIdx, INode inode, int snapshotId) { INodeAttributes inodeAttrs = inode.getSnapshotINode(snapshotId); if (getAttributesProvider() != null) { String[] elements = new String[pathIdx + 1]; for (int i = 0; i < elements.length; i++) { elements[i] = DFSUtil.bytes2String(pathByNameArr[i]); } inodeAttrs = getAttributesProvider().getAttributes(elements, inodeAttrs); } return inodeAttrs; } /** Guarded by {@link FSNamesystem#readLock()} */ private void checkOwner(INodeAttributes inode ) throws AccessControlException { if (getUser().equals(inode.getUserName())) { return; } throw new AccessControlException( "Permission denied. user=" + getUser() + " is not the owner of inode=" + inode); } /** Guarded by {@link FSNamesystem#readLock()} */ private void checkTraverse(INodeAttributes[] inodes, String path, int last ) throws AccessControlException { for(int j = 0; j <= last; j++) { check(inodes[j], path, FsAction.EXECUTE); } } /** Guarded by {@link FSNamesystem#readLock()} */ private void checkSubAccess(byte[][] pathByNameArr, int pathIdx, INode inode, int snapshotId, FsAction access, boolean ignoreEmptyDir) throws AccessControlException { if (inode == null || !inode.isDirectory()) { return; } Stack<INodeDirectory> directories = new Stack<INodeDirectory>(); for(directories.push(inode.asDirectory()); !directories.isEmpty(); ) { INodeDirectory d = directories.pop(); ReadOnlyList<INode> cList = d.getChildrenList(snapshotId); if (!(cList.isEmpty() && ignoreEmptyDir)) { //TODO have to figure this out with inodeattribute provider check(getINodeAttrs(pathByNameArr, pathIdx, d, snapshotId), inode.getFullPathName(), access); } for(INode child : cList) { if (child.isDirectory()) { directories.push(child.asDirectory()); } } } } /** Guarded by {@link FSNamesystem#readLock()} */ private void check(INodeAttributes[] inodes, String path, int i, FsAction access ) throws AccessControlException { check(i >= 0 ? inodes[i] : null, path, access); } private void check(INodeAttributes inode, String path, FsAction access ) throws AccessControlException { if (inode == null) { return; } final FsPermission mode = inode.getFsPermission(); final AclFeature aclFeature = inode.getAclFeature(); if (aclFeature != null) { // It's possible that the inode has a default ACL but no access ACL. int firstEntry = aclFeature.getEntryAt(0); if (AclEntryStatusFormat.getScope(firstEntry) == AclEntryScope.ACCESS) { checkAccessAcl(inode, path, access, mode, aclFeature); return; } } if (getUser().equals(inode.getUserName())) { //user class if (mode.getUserAction().implies(access)) { return; } } else if (getGroups().contains(inode.getGroupName())) { //group class if (mode.getGroupAction().implies(access)) { return; } } else { //other class if (mode.getOtherAction().implies(access)) { return; } } throw new AccessControlException( toAccessControlString(inode, path, access, mode)); } /** * Checks requested access against an Access Control List. This method relies * on finding the ACL data in the relevant portions of {@link FsPermission} and * {@link AclFeature} as implemented in the logic of {@link AclStorage}. This * method also relies on receiving the ACL entries in sorted order. This is * assumed to be true, because the ACL modification methods in * {@link AclTransformation} sort the resulting entries. * * More specifically, this method depends on these invariants in an ACL: * - The list must be sorted. * - Each entry in the list must be unique by scope + type + name. * - There is exactly one each of the unnamed user/group/other entries. * - The mask entry must not have a name. * - The other entry must not have a name. * - Default entries may be present, but they are ignored during enforcement. * * @param inode INodeAttributes accessed inode * @param snapshotId int snapshot ID * @param access FsAction requested permission * @param mode FsPermission mode from inode * @param aclFeature AclFeature of inode * @throws AccessControlException if the ACL denies permission */ private void checkAccessAcl(INodeAttributes inode, String path, FsAction access, FsPermission mode, AclFeature aclFeature) throws AccessControlException { boolean foundMatch = false; // Use owner entry from permission bits if user is owner. if (getUser().equals(inode.getUserName())) { if (mode.getUserAction().implies(access)) { return; } foundMatch = true; } // Check named user and group entries if user was not denied by owner entry. if (!foundMatch) { for (int pos = 0, entry; pos < aclFeature.getEntriesSize(); pos++) { entry = aclFeature.getEntryAt(pos); if (AclEntryStatusFormat.getScope(entry) == AclEntryScope.DEFAULT) { break; } AclEntryType type = AclEntryStatusFormat.getType(entry); String name = AclEntryStatusFormat.getName(entry); if (type == AclEntryType.USER) { // Use named user entry with mask from permission bits applied if user // matches name. if (getUser().equals(name)) { FsAction masked = AclEntryStatusFormat.getPermission(entry).and( mode.getGroupAction()); if (masked.implies(access)) { return; } foundMatch = true; break; } } else if (type == AclEntryType.GROUP) { // Use group entry (unnamed or named) with mask from permission bits // applied if user is a member and entry grants access. If user is a // member of multiple groups that have entries that grant access, then // it doesn't matter which is chosen, so exit early after first match. String group = name == null ? inode.getGroupName() : name; if (getGroups().contains(group)) { FsAction masked = AclEntryStatusFormat.getPermission(entry).and( mode.getGroupAction()); if (masked.implies(access)) { return; } foundMatch = true; } } } } // Use other entry if user was not denied by an earlier match. if (!foundMatch && mode.getOtherAction().implies(access)) { return; } throw new AccessControlException( toAccessControlString(inode, path, access, mode)); } /** Guarded by {@link FSNamesystem#readLock()} */ private void checkStickyBit(INodeAttributes parent, INodeAttributes inode ) throws AccessControlException { if (!parent.getFsPermission().getStickyBit()) { return; } // If this user is the directory owner, return if (parent.getUserName().equals(getUser())) { return; } // if this user is the file owner, return if (inode.getUserName().equals(getUser())) { return; } throw new AccessControlException("Permission denied by sticky bit setting:" + " user=" + getUser() + ", inode=" + inode); } /** * Whether a cache pool can be accessed by the current context * * @param pool CachePool being accessed * @param access type of action being performed on the cache pool * @throws AccessControlException if pool cannot be accessed */ public void checkPermission(CachePool pool, FsAction access) throws AccessControlException { FsPermission mode = pool.getMode(); if (isSuperUser()) { return; } if (getUser().equals(pool.getOwnerName()) && mode.getUserAction().implies(access)) { return; } if (getGroups().contains(pool.getGroupName()) && mode.getGroupAction().implies(access)) { return; } if (mode.getOtherAction().implies(access)) { return; } throw new AccessControlException("Permission denied while accessing pool " + pool.getPoolName() + ": user " + getUser() + " does not have " + access.toString() + " permissions."); } }
17,390
37.054705
91
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.namenode; import java.io.IOException; import java.util.Collection; import java.util.Iterator; import java.util.List; import java.util.zip.Checksum; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.common.Storage.StorageState; import org.apache.hadoop.util.StringUtils; import com.google.common.base.Preconditions; import com.google.common.collect.Lists; /** * Extension of FSImage for the backup node. * This class handles the setup of the journaling * spool on the backup namenode. */ @InterfaceAudience.Private public class BackupImage extends FSImage { /** Backup input stream for loading edits into memory */ private final EditLogBackupInputStream backupInputStream = new EditLogBackupInputStream("Data from remote NameNode"); /** * Current state of the BackupNode. The BackupNode's state * transitions are as follows: * * Initial: DROP_UNTIL_NEXT_ROLL * - Transitions to JOURNAL_ONLY the next time the log rolls * - Transitions to IN_SYNC in convergeJournalSpool * - Transitions back to JOURNAL_ONLY if the log rolls while * stopApplyingOnNextRoll is true. */ volatile BNState bnState; static enum BNState { /** * Edits from the NN should be dropped. On the next log roll, * transition to JOURNAL_ONLY state */ DROP_UNTIL_NEXT_ROLL, /** * Edits from the NN should be written to the local edits log * but not applied to the namespace. */ JOURNAL_ONLY, /** * Edits should be written to the local edits log and applied * to the local namespace. */ IN_SYNC; } /** * Flag to indicate that the next time the NN rolls, the BN * should transition from to JOURNAL_ONLY state. * {@see #freezeNamespaceAtNextRoll()} */ private boolean stopApplyingEditsOnNextRoll = false; private FSNamesystem namesystem; /** * Construct a backup image. * @param conf Configuration * @throws IOException if storage cannot be initialised. */ BackupImage(Configuration conf) throws IOException { super(conf); storage.setDisablePreUpgradableLayoutCheck(true); bnState = BNState.DROP_UNTIL_NEXT_ROLL; } synchronized FSNamesystem getNamesystem() { return namesystem; } synchronized void setNamesystem(FSNamesystem fsn) { // Avoids overriding this.namesystem object if (namesystem == null) { this.namesystem = fsn; } } /** * Analyze backup storage directories for consistency.<br> * Recover from incomplete checkpoints if required.<br> * Read VERSION and fstime files if exist.<br> * Do not load image or edits. * * @throws IOException if the node should shutdown. */ void recoverCreateRead() throws IOException { for (Iterator<StorageDirectory> it = storage.dirIterator(); it.hasNext();) { StorageDirectory sd = it.next(); StorageState curState; try { curState = sd.analyzeStorage(HdfsServerConstants.StartupOption.REGULAR, storage); // sd is locked but not opened switch(curState) { case NON_EXISTENT: // fail if any of the configured storage dirs are inaccessible throw new InconsistentFSStateException(sd.getRoot(), "checkpoint directory does not exist or is not accessible."); case NOT_FORMATTED: // for backup node all directories may be unformatted initially LOG.info("Storage directory " + sd.getRoot() + " is not formatted."); LOG.info("Formatting ..."); sd.clearDirectory(); // create empty current break; case NORMAL: break; default: // recovery is possible sd.doRecover(curState); } if(curState != StorageState.NOT_FORMATTED) { // read and verify consistency with other directories storage.readProperties(sd); } } catch(IOException ioe) { sd.unlock(); throw ioe; } } } /** * Receive a batch of edits from the NameNode. * * Depending on bnState, different actions are taken. See * {@link BackupImage.BNState} * * @param firstTxId first txid in batch * @param numTxns number of transactions * @param data serialized journal records. * @throws IOException * @see #convergeJournalSpool() */ synchronized void journal(long firstTxId, int numTxns, byte[] data) throws IOException { if (LOG.isTraceEnabled()) { LOG.trace("Got journal, " + "state = " + bnState + "; firstTxId = " + firstTxId + "; numTxns = " + numTxns); } switch(bnState) { case DROP_UNTIL_NEXT_ROLL: return; case IN_SYNC: // update NameSpace in memory applyEdits(firstTxId, numTxns, data); break; case JOURNAL_ONLY: break; default: throw new AssertionError("Unhandled state: " + bnState); } // write to BN's local edit log. logEditsLocally(firstTxId, numTxns, data); } /** * Write the batch of edits to the local copy of the edit logs. */ private void logEditsLocally(long firstTxId, int numTxns, byte[] data) { long expectedTxId = editLog.getLastWrittenTxId() + 1; Preconditions.checkState(firstTxId == expectedTxId, "received txid batch starting at %s but expected txn %s", firstTxId, expectedTxId); editLog.setNextTxId(firstTxId + numTxns - 1); editLog.logEdit(data.length, data); editLog.logSync(); } /** * Apply the batch of edits to the local namespace. */ private synchronized void applyEdits(long firstTxId, int numTxns, byte[] data) throws IOException { Preconditions.checkArgument(firstTxId == lastAppliedTxId + 1, "Received txn batch starting at %s but expected %s", firstTxId, lastAppliedTxId + 1); assert backupInputStream.length() == 0 : "backup input stream is not empty"; try { if (LOG.isTraceEnabled()) { LOG.debug("data:" + StringUtils.byteToHexString(data)); } FSEditLogLoader logLoader = new FSEditLogLoader(getNamesystem(), lastAppliedTxId); int logVersion = storage.getLayoutVersion(); backupInputStream.setBytes(data, logVersion); long numTxnsAdvanced = logLoader.loadEditRecords( backupInputStream, true, lastAppliedTxId + 1, null, null); if (numTxnsAdvanced != numTxns) { throw new IOException("Batch of txns starting at txnid " + firstTxId + " was supposed to contain " + numTxns + " transactions, but we were only able to advance by " + numTxnsAdvanced); } lastAppliedTxId = logLoader.getLastAppliedTxId(); FSImage.updateCountForQuota( getNamesystem().dir.getBlockStoragePolicySuite(), getNamesystem().dir.rootDir); // inefficient! } finally { backupInputStream.clear(); } } /** * Transition the BackupNode from JOURNAL_ONLY state to IN_SYNC state. * This is done by repeated invocations of tryConvergeJournalSpool until * we are caught up to the latest in-progress edits file. */ void convergeJournalSpool() throws IOException { Preconditions.checkState(bnState == BNState.JOURNAL_ONLY, "bad state: %s", bnState); while (!tryConvergeJournalSpool()) { ; } assert bnState == BNState.IN_SYNC; } private boolean tryConvergeJournalSpool() throws IOException { Preconditions.checkState(bnState == BNState.JOURNAL_ONLY, "bad state: %s", bnState); // This section is unsynchronized so we can continue to apply // ahead of where we're reading, concurrently. Since the state // is JOURNAL_ONLY at this point, we know that lastAppliedTxId // doesn't change, and curSegmentTxId only increases while (lastAppliedTxId < editLog.getCurSegmentTxId() - 1) { long target = editLog.getCurSegmentTxId(); LOG.info("Loading edits into backupnode to try to catch up from txid " + lastAppliedTxId + " to " + target); FSImageTransactionalStorageInspector inspector = new FSImageTransactionalStorageInspector(); storage.inspectStorageDirs(inspector); editLog.recoverUnclosedStreams(); Iterable<EditLogInputStream> editStreamsAll = editLog.selectInputStreams(lastAppliedTxId, target - 1); // remove inprogress List<EditLogInputStream> editStreams = Lists.newArrayList(); for (EditLogInputStream s : editStreamsAll) { if (s.getFirstTxId() != editLog.getCurSegmentTxId()) { editStreams.add(s); } } loadEdits(editStreams, getNamesystem()); } // now, need to load the in-progress file synchronized (this) { if (lastAppliedTxId != editLog.getCurSegmentTxId() - 1) { LOG.debug("Logs rolled while catching up to current segment"); return false; // drop lock and try again to load local logs } EditLogInputStream stream = null; Collection<EditLogInputStream> editStreams = getEditLog().selectInputStreams( getEditLog().getCurSegmentTxId(), getEditLog().getCurSegmentTxId()); for (EditLogInputStream s : editStreams) { if (s.getFirstTxId() == getEditLog().getCurSegmentTxId()) { stream = s; } break; } if (stream == null) { LOG.warn("Unable to find stream starting with " + editLog.getCurSegmentTxId() + ". This indicates that there is an error in synchronization in BackupImage"); return false; } try { long remainingTxns = getEditLog().getLastWrittenTxId() - lastAppliedTxId; LOG.info("Going to finish converging with remaining " + remainingTxns + " txns from in-progress stream " + stream); FSEditLogLoader loader = new FSEditLogLoader(getNamesystem(), lastAppliedTxId); loader.loadFSEdits(stream, lastAppliedTxId + 1); lastAppliedTxId = loader.getLastAppliedTxId(); assert lastAppliedTxId == getEditLog().getLastWrittenTxId(); } finally { FSEditLog.closeAllStreams(editStreams); } LOG.info("Successfully synced BackupNode with NameNode at txnid " + lastAppliedTxId); setState(BNState.IN_SYNC); } return true; } /** * Transition edit log to a new state, logging as necessary. */ private synchronized void setState(BNState newState) { if (LOG.isDebugEnabled()) { LOG.debug("State transition " + bnState + " -> " + newState); } bnState = newState; } /** * Receive a notification that the NameNode has begun a new edit log. * This causes the BN to also start the new edit log in its local * directories. */ synchronized void namenodeStartedLogSegment(long txid) throws IOException { LOG.info("NameNode started a new log segment at txid " + txid); if (editLog.isSegmentOpen()) { if (editLog.getLastWrittenTxId() == txid - 1) { // We are in sync with the NN, so end and finalize the current segment editLog.endCurrentLogSegment(false); } else { // We appear to have missed some transactions -- the NN probably // lost contact with us temporarily. So, mark the current segment // as aborted. LOG.warn("NN started new log segment at txid " + txid + ", but BN had only written up to txid " + editLog.getLastWrittenTxId() + "in the log segment starting at " + editLog.getCurSegmentTxId() + ". Aborting this " + "log segment."); editLog.abortCurrentLogSegment(); } } editLog.setNextTxId(txid); editLog.startLogSegment(txid, false, namesystem.getEffectiveLayoutVersion()); if (bnState == BNState.DROP_UNTIL_NEXT_ROLL) { setState(BNState.JOURNAL_ONLY); } if (stopApplyingEditsOnNextRoll) { if (bnState == BNState.IN_SYNC) { LOG.info("Stopped applying edits to prepare for checkpoint."); setState(BNState.JOURNAL_ONLY); } stopApplyingEditsOnNextRoll = false; notifyAll(); } } /** * Request that the next time the BN receives a log roll, it should * stop applying the edits log to the local namespace. This is * typically followed on by a call to {@link #waitUntilNamespaceFrozen()} */ synchronized void freezeNamespaceAtNextRoll() { stopApplyingEditsOnNextRoll = true; } /** * After {@link #freezeNamespaceAtNextRoll()} has been called, wait until * the BN receives notification of the next log roll. */ synchronized void waitUntilNamespaceFrozen() throws IOException { if (bnState != BNState.IN_SYNC) return; LOG.info("Waiting until the NameNode rolls its edit logs in order " + "to freeze the BackupNode namespace."); while (bnState == BNState.IN_SYNC) { Preconditions.checkState(stopApplyingEditsOnNextRoll, "If still in sync, we should still have the flag set to " + "freeze at next roll"); try { wait(); } catch (InterruptedException ie) { LOG.warn("Interrupted waiting for namespace to freeze", ie); throw new IOException(ie); } } LOG.info("BackupNode namespace frozen."); } /** * Override close() so that we don't finalize edit logs. */ @Override public synchronized void close() throws IOException { editLog.abortCurrentLogSegment(); storage.close(); } }
14,700
33.50939
96
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RedundantEditLogInputStream.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.namenode; import java.io.IOException; import java.util.Arrays; import java.util.Collection; import java.util.Comparator; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.io.IOUtils; import com.google.common.base.Preconditions; import com.google.common.primitives.Longs; /** * A merged input stream that handles failover between different edit logs. * * We will currently try each edit log stream exactly once. In other words, we * don't handle the "ping pong" scenario where different edit logs contain a * different subset of the available edits. */ class RedundantEditLogInputStream extends EditLogInputStream { public static final Log LOG = LogFactory.getLog(EditLogInputStream.class.getName()); private int curIdx; private long prevTxId; private final EditLogInputStream[] streams; /** * States that the RedundantEditLogInputStream can be in. * * <pre> * start (if no streams) * | * V * PrematureEOFException +----------------+ * +-------------->| EOF |<--------------+ * | +----------------+ | * | | * | start (if there are streams) | * | | | * | V | EOF * | resync +----------------+ skipUntil +---------+ * | +---------->| SKIP_UNTIL |----------->| OK | * | | +----------------+ +---------+ * | | | IOE ^ fail over to | IOE * | | V | next stream | * +----------------------+ +----------------+ | * | STREAM_FAILED_RESYNC | | STREAM_FAILED |<----------+ * +----------------------+ +----------------+ * ^ Recovery mode | * +--------------------+ * </pre> */ static private enum State { /** We need to skip until prevTxId + 1 */ SKIP_UNTIL, /** We're ready to read opcodes out of the current stream */ OK, /** The current stream has failed. */ STREAM_FAILED, /** The current stream has failed, and resync() was called. */ STREAM_FAILED_RESYNC, /** There are no more opcodes to read from this * RedundantEditLogInputStream */ EOF; } private State state; private IOException prevException; RedundantEditLogInputStream(Collection<EditLogInputStream> streams, long startTxId) { this.curIdx = 0; this.prevTxId = (startTxId == HdfsServerConstants.INVALID_TXID) ? HdfsServerConstants.INVALID_TXID : (startTxId - 1); this.state = (streams.isEmpty()) ? State.EOF : State.SKIP_UNTIL; this.prevException = null; // EditLogInputStreams in a RedundantEditLogInputStream must be finalized, // and can't be pre-transactional. EditLogInputStream first = null; for (EditLogInputStream s : streams) { Preconditions.checkArgument(s.getFirstTxId() != HdfsServerConstants.INVALID_TXID, "invalid first txid in stream: %s", s); Preconditions.checkArgument(s.getLastTxId() != HdfsServerConstants.INVALID_TXID, "invalid last txid in stream: %s", s); if (first == null) { first = s; } else { Preconditions.checkArgument(s.getFirstTxId() == first.getFirstTxId(), "All streams in the RedundantEditLogInputStream must have the same " + "start transaction ID! " + first + " had start txId " + first.getFirstTxId() + ", but " + s + " had start txId " + s.getFirstTxId()); } } this.streams = streams.toArray(new EditLogInputStream[0]); // We sort the streams here so that the streams that end later come first. Arrays.sort(this.streams, new Comparator<EditLogInputStream>() { @Override public int compare(EditLogInputStream a, EditLogInputStream b) { return Longs.compare(b.getLastTxId(), a.getLastTxId()); } }); } @Override public String getCurrentStreamName() { return streams[curIdx].getCurrentStreamName(); } @Override public String getName() { StringBuilder bld = new StringBuilder(); String prefix = ""; for (EditLogInputStream elis : streams) { bld.append(prefix); bld.append(elis.getName()); prefix = ", "; } return bld.toString(); } @Override public long getFirstTxId() { return streams[curIdx].getFirstTxId(); } @Override public long getLastTxId() { return streams[curIdx].getLastTxId(); } @Override public void close() throws IOException { IOUtils.cleanup(LOG, streams); } @Override protected FSEditLogOp nextValidOp() { try { if (state == State.STREAM_FAILED) { state = State.STREAM_FAILED_RESYNC; } return nextOp(); } catch (IOException e) { return null; } } @Override protected FSEditLogOp nextOp() throws IOException { while (true) { switch (state) { case SKIP_UNTIL: try { if (prevTxId != HdfsServerConstants.INVALID_TXID) { LOG.info("Fast-forwarding stream '" + streams[curIdx].getName() + "' to transaction ID " + (prevTxId + 1)); streams[curIdx].skipUntil(prevTxId + 1); } } catch (IOException e) { prevException = e; state = State.STREAM_FAILED; } state = State.OK; break; case OK: try { FSEditLogOp op = streams[curIdx].readOp(); if (op == null) { state = State.EOF; if (streams[curIdx].getLastTxId() == prevTxId) { return null; } else { throw new PrematureEOFException("got premature end-of-file " + "at txid " + prevTxId + "; expected file to go up to " + streams[curIdx].getLastTxId()); } } prevTxId = op.getTransactionId(); return op; } catch (IOException e) { prevException = e; state = State.STREAM_FAILED; } break; case STREAM_FAILED: if (curIdx + 1 == streams.length) { throw prevException; } long oldLast = streams[curIdx].getLastTxId(); long newLast = streams[curIdx + 1].getLastTxId(); if (newLast < oldLast) { throw new IOException("We encountered an error reading " + streams[curIdx].getName() + ". During automatic edit log " + "failover, we noticed that all of the remaining edit log " + "streams are shorter than the current one! The best " + "remaining edit log ends at transaction " + newLast + ", but we thought we could read up to transaction " + oldLast + ". If you continue, metadata will be lost forever!"); } LOG.error("Got error reading edit log input stream " + streams[curIdx].getName() + "; failing over to edit log " + streams[curIdx + 1].getName(), prevException); curIdx++; state = State.SKIP_UNTIL; break; case STREAM_FAILED_RESYNC: if (curIdx + 1 == streams.length) { if (prevException instanceof PrematureEOFException) { // bypass early EOF check state = State.EOF; } else { streams[curIdx].resync(); state = State.SKIP_UNTIL; } } else { LOG.error("failing over to edit log " + streams[curIdx + 1].getName()); curIdx++; state = State.SKIP_UNTIL; } break; case EOF: return null; } } } @Override public int getVersion(boolean verifyVersion) throws IOException { return streams[curIdx].getVersion(verifyVersion); } @Override public long getPosition() { return streams[curIdx].getPosition(); } @Override public long length() throws IOException { return streams[curIdx].length(); } @Override public boolean isInProgress() { return streams[curIdx].isInProgress(); } static private final class PrematureEOFException extends IOException { private static final long serialVersionUID = 1L; PrematureEOFException(String msg) { super(msg); } } @Override public void setMaxOpSize(int maxOpSize) { for (EditLogInputStream elis : streams) { elis.setMaxOpSize(maxOpSize); } } @Override public boolean isLocalLog() { return streams[curIdx].isLocalLog(); } }
9,686
32.635417
86
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.namenode; import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.TruncateOp; import static org.apache.hadoop.hdfs.server.namenode.FSImageFormat.renameReservedPathsOnUpgrade; import static org.apache.hadoop.util.Time.monotonicNow; import java.io.FilterInputStream; import java.io.IOException; import java.io.InputStream; import java.util.Arrays; import java.util.EnumMap; import java.util.EnumSet; import java.util.List; import com.google.common.collect.Lists; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus; import org.apache.hadoop.hdfs.protocol.LayoutVersion; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeStartupOption; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddBlockOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddCacheDirectiveInfoOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddCachePoolOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddCloseOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AllocateBlockIdOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AllowSnapshotOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AppendOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.BlockListUpdatingOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.CancelDelegationTokenOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ClearNSQuotaOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ConcatDeleteOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.CreateSnapshotOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.DeleteOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.DeleteSnapshotOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.DisallowSnapshotOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.GetDelegationTokenOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.MkdirOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ModifyCacheDirectiveInfoOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ModifyCachePoolOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ReassignLeaseOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemoveCacheDirectiveInfoOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemoveCachePoolOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RemoveXAttrOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOldOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenameSnapshotOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RenewDelegationTokenOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetAclOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.RollingUpgradeOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetGenstampV1Op; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetGenstampV2Op; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetNSQuotaOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetOwnerOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetPermissionsOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetQuotaOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetReplicationOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetStoragePolicyOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetXAttrOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SymlinkOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.TimesOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateBlocksOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateMasterKeyOp; import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase; import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.Counter; import org.apache.hadoop.hdfs.server.namenode.startupprogress.Step; import org.apache.hadoop.hdfs.util.Holder; import org.apache.hadoop.util.ChunkedArrayList; import com.google.common.base.Joiner; import com.google.common.base.Preconditions; @InterfaceAudience.Private @InterfaceStability.Evolving public class FSEditLogLoader { static final Log LOG = LogFactory.getLog(FSEditLogLoader.class.getName()); static final long REPLAY_TRANSACTION_LOG_INTERVAL = 1000; // 1sec private final FSNamesystem fsNamesys; private long lastAppliedTxId; /** Total number of end transactions loaded. */ private int totalEdits = 0; public FSEditLogLoader(FSNamesystem fsNamesys, long lastAppliedTxId) { this.fsNamesys = fsNamesys; this.lastAppliedTxId = lastAppliedTxId; } long loadFSEdits(EditLogInputStream edits, long expectedStartingTxId) throws IOException { return loadFSEdits(edits, expectedStartingTxId, null, null); } /** * Load an edit log, and apply the changes to the in-memory structure * This is where we apply edits that we've been writing to disk all * along. */ long loadFSEdits(EditLogInputStream edits, long expectedStartingTxId, StartupOption startOpt, MetaRecoveryContext recovery) throws IOException { StartupProgress prog = NameNode.getStartupProgress(); Step step = createStartupProgressStep(edits); prog.beginStep(Phase.LOADING_EDITS, step); fsNamesys.writeLock(); try { long startTime = monotonicNow(); FSImage.LOG.info("Start loading edits file " + edits.getName()); long numEdits = loadEditRecords(edits, false, expectedStartingTxId, startOpt, recovery); FSImage.LOG.info("Edits file " + edits.getName() + " of size " + edits.length() + " edits # " + numEdits + " loaded in " + (monotonicNow()-startTime)/1000 + " seconds"); return numEdits; } finally { edits.close(); fsNamesys.writeUnlock(); prog.endStep(Phase.LOADING_EDITS, step); } } long loadEditRecords(EditLogInputStream in, boolean closeOnExit, long expectedStartingTxId, StartupOption startOpt, MetaRecoveryContext recovery) throws IOException { FSDirectory fsDir = fsNamesys.dir; EnumMap<FSEditLogOpCodes, Holder<Integer>> opCounts = new EnumMap<FSEditLogOpCodes, Holder<Integer>>(FSEditLogOpCodes.class); if (LOG.isTraceEnabled()) { LOG.trace("Acquiring write lock to replay edit log"); } fsNamesys.writeLock(); fsDir.writeLock(); long recentOpcodeOffsets[] = new long[4]; Arrays.fill(recentOpcodeOffsets, -1); long expectedTxId = expectedStartingTxId; long numEdits = 0; long lastTxId = in.getLastTxId(); long numTxns = (lastTxId - expectedStartingTxId) + 1; StartupProgress prog = NameNode.getStartupProgress(); Step step = createStartupProgressStep(in); prog.setTotal(Phase.LOADING_EDITS, step, numTxns); Counter counter = prog.getCounter(Phase.LOADING_EDITS, step); long lastLogTime = monotonicNow(); long lastInodeId = fsNamesys.dir.getLastInodeId(); try { while (true) { try { FSEditLogOp op; try { op = in.readOp(); if (op == null) { break; } } catch (Throwable e) { // Handle a problem with our input check203UpgradeFailure(in.getVersion(true), e); String errorMessage = formatEditLogReplayError(in, recentOpcodeOffsets, expectedTxId); FSImage.LOG.error(errorMessage, e); if (recovery == null) { // We will only try to skip over problematic opcodes when in // recovery mode. throw new EditLogInputException(errorMessage, e, numEdits); } MetaRecoveryContext.editLogLoaderPrompt( "We failed to read txId " + expectedTxId, recovery, "skipping the bad section in the log"); in.resync(); continue; } recentOpcodeOffsets[(int)(numEdits % recentOpcodeOffsets.length)] = in.getPosition(); if (op.hasTransactionId()) { if (op.getTransactionId() > expectedTxId) { MetaRecoveryContext.editLogLoaderPrompt("There appears " + "to be a gap in the edit log. We expected txid " + expectedTxId + ", but got txid " + op.getTransactionId() + ".", recovery, "ignoring missing " + " transaction IDs"); } else if (op.getTransactionId() < expectedTxId) { MetaRecoveryContext.editLogLoaderPrompt("There appears " + "to be an out-of-order edit in the edit log. We " + "expected txid " + expectedTxId + ", but got txid " + op.getTransactionId() + ".", recovery, "skipping the out-of-order edit"); continue; } } try { if (LOG.isTraceEnabled()) { LOG.trace("op=" + op + ", startOpt=" + startOpt + ", numEdits=" + numEdits + ", totalEdits=" + totalEdits); } long inodeId = applyEditLogOp(op, fsDir, startOpt, in.getVersion(true), lastInodeId); if (lastInodeId < inodeId) { lastInodeId = inodeId; } } catch (RollingUpgradeOp.RollbackException e) { throw e; } catch (Throwable e) { LOG.error("Encountered exception on operation " + op, e); if (recovery == null) { throw e instanceof IOException? (IOException)e: new IOException(e); } MetaRecoveryContext.editLogLoaderPrompt("Failed to " + "apply edit log operation " + op + ": error " + e.getMessage(), recovery, "applying edits"); } // Now that the operation has been successfully decoded and // applied, update our bookkeeping. incrOpCount(op.opCode, opCounts, step, counter); if (op.hasTransactionId()) { lastAppliedTxId = op.getTransactionId(); expectedTxId = lastAppliedTxId + 1; } else { expectedTxId = lastAppliedTxId = expectedStartingTxId; } // log progress if (op.hasTransactionId()) { long now = monotonicNow(); if (now - lastLogTime > REPLAY_TRANSACTION_LOG_INTERVAL) { long deltaTxId = lastAppliedTxId - expectedStartingTxId + 1; int percent = Math.round((float) deltaTxId / numTxns * 100); LOG.info("replaying edit log: " + deltaTxId + "/" + numTxns + " transactions completed. (" + percent + "%)"); lastLogTime = now; } } numEdits++; totalEdits++; } catch (RollingUpgradeOp.RollbackException e) { LOG.info("Stopped at OP_START_ROLLING_UPGRADE for rollback."); break; } catch (MetaRecoveryContext.RequestStopException e) { MetaRecoveryContext.LOG.warn("Stopped reading edit log at " + in.getPosition() + "/" + in.length()); break; } } } finally { fsNamesys.dir.resetLastInodeId(lastInodeId); if(closeOnExit) { in.close(); } fsDir.writeUnlock(); fsNamesys.writeUnlock(); if (LOG.isTraceEnabled()) { LOG.trace("replaying edit log finished"); } if (FSImage.LOG.isDebugEnabled()) { dumpOpCounts(opCounts); } } return numEdits; } // allocate and update last allocated inode id private long getAndUpdateLastInodeId(long inodeIdFromOp, int logVersion, long lastInodeId) throws IOException { long inodeId = inodeIdFromOp; if (inodeId == HdfsConstants.GRANDFATHER_INODE_ID) { if (NameNodeLayoutVersion.supports( LayoutVersion.Feature.ADD_INODE_ID, logVersion)) { throw new IOException("The layout version " + logVersion + " supports inodeId but gave bogus inodeId"); } inodeId = fsNamesys.dir.allocateNewInodeId(); } else { // need to reset lastInodeId. fsnamesys gets lastInodeId firstly from // fsimage but editlog captures more recent inodeId allocations if (inodeId > lastInodeId) { fsNamesys.dir.resetLastInodeId(inodeId); } } return inodeId; } @SuppressWarnings("deprecation") private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir, StartupOption startOpt, int logVersion, long lastInodeId) throws IOException { long inodeId = HdfsConstants.GRANDFATHER_INODE_ID; if (LOG.isTraceEnabled()) { LOG.trace("replaying edit log: " + op); } final boolean toAddRetryCache = fsNamesys.hasRetryCache() && op.hasRpcIds(); switch (op.opCode) { case OP_ADD: { AddCloseOp addCloseOp = (AddCloseOp)op; final String path = renameReservedPathsOnUpgrade(addCloseOp.path, logVersion); if (FSNamesystem.LOG.isDebugEnabled()) { FSNamesystem.LOG.debug(op.opCode + ": " + path + " numblocks : " + addCloseOp.blocks.length + " clientHolder " + addCloseOp.clientName + " clientMachine " + addCloseOp.clientMachine); } // There are 3 cases here: // 1. OP_ADD to create a new file // 2. OP_ADD to update file blocks // 3. OP_ADD to open file for append (old append) // See if the file already exists (persistBlocks call) INodesInPath iip = fsDir.getINodesInPath(path, true); INodeFile oldFile = INodeFile.valueOf(iip.getLastINode(), path, true); if (oldFile != null && addCloseOp.overwrite) { // This is OP_ADD with overwrite FSDirDeleteOp.deleteForEditLog(fsDir, path, addCloseOp.mtime); iip = INodesInPath.replace(iip, iip.length() - 1, null); oldFile = null; } INodeFile newFile = oldFile; if (oldFile == null) { // this is OP_ADD on a new file (case 1) // versions > 0 support per file replication // get name and replication final short replication = fsNamesys.getBlockManager() .adjustReplication(addCloseOp.replication); assert addCloseOp.blocks.length == 0; // add to the file tree inodeId = getAndUpdateLastInodeId(addCloseOp.inodeId, logVersion, lastInodeId); newFile = FSDirWriteFileOp.addFileForEditLog(fsDir, inodeId, iip.getExistingINodes(), iip.getLastLocalName(), addCloseOp.permissions, addCloseOp.aclEntries, addCloseOp.xAttrs, replication, addCloseOp.mtime, addCloseOp.atime, addCloseOp.blockSize, true, addCloseOp.clientName, addCloseOp.clientMachine, addCloseOp.storagePolicyId); iip = INodesInPath.replace(iip, iip.length() - 1, newFile); fsNamesys.leaseManager.addLease(addCloseOp.clientName, newFile.getId()); // add the op into retry cache if necessary if (toAddRetryCache) { HdfsFileStatus stat = FSDirStatAndListingOp.createFileStatusForEditLog( fsNamesys.dir, path, HdfsFileStatus.EMPTY_NAME, newFile, HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, Snapshot.CURRENT_STATE_ID, false, iip); fsNamesys.addCacheEntryWithPayload(addCloseOp.rpcClientId, addCloseOp.rpcCallId, stat); } } else { // This is OP_ADD on an existing file (old append) if (!oldFile.isUnderConstruction()) { // This is case 3: a call to append() on an already-closed file. if (FSNamesystem.LOG.isDebugEnabled()) { FSNamesystem.LOG.debug("Reopening an already-closed file " + "for append"); } LocatedBlock lb = FSDirAppendOp.prepareFileForAppend(fsNamesys, iip, addCloseOp.clientName, addCloseOp.clientMachine, false, false, false); // add the op into retry cache if necessary if (toAddRetryCache) { HdfsFileStatus stat = FSDirStatAndListingOp.createFileStatusForEditLog( fsNamesys.dir, path, HdfsFileStatus.EMPTY_NAME, newFile, HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, Snapshot.CURRENT_STATE_ID, false, iip); fsNamesys.addCacheEntryWithPayload(addCloseOp.rpcClientId, addCloseOp.rpcCallId, new LastBlockWithStatus(lb, stat)); } } } // Fall-through for case 2. // Regardless of whether it's a new file or an updated file, // update the block list. // Update the salient file attributes. newFile.setAccessTime(addCloseOp.atime, Snapshot.CURRENT_STATE_ID); newFile.setModificationTime(addCloseOp.mtime, Snapshot.CURRENT_STATE_ID); updateBlocks(fsDir, addCloseOp, iip, newFile); break; } case OP_CLOSE: { AddCloseOp addCloseOp = (AddCloseOp)op; final String path = renameReservedPathsOnUpgrade(addCloseOp.path, logVersion); if (FSNamesystem.LOG.isDebugEnabled()) { FSNamesystem.LOG.debug(op.opCode + ": " + path + " numblocks : " + addCloseOp.blocks.length + " clientHolder " + addCloseOp.clientName + " clientMachine " + addCloseOp.clientMachine); } final INodesInPath iip = fsDir.getINodesInPath(path, true); final INodeFile file = INodeFile.valueOf(iip.getLastINode(), path); // Update the salient file attributes. file.setAccessTime(addCloseOp.atime, Snapshot.CURRENT_STATE_ID); file.setModificationTime(addCloseOp.mtime, Snapshot.CURRENT_STATE_ID); updateBlocks(fsDir, addCloseOp, iip, file); // Now close the file if (!file.isUnderConstruction() && logVersion <= LayoutVersion.BUGFIX_HDFS_2991_VERSION) { // There was a bug (HDFS-2991) in hadoop < 0.23.1 where OP_CLOSE // could show up twice in a row. But after that version, this // should be fixed, so we should treat it as an error. throw new IOException( "File is not under construction: " + path); } // One might expect that you could use removeLease(holder, path) here, // but OP_CLOSE doesn't serialize the holder. So, remove the inode. if (file.isUnderConstruction()) { fsNamesys.leaseManager.removeLeases(Lists.newArrayList(file.getId())); file.toCompleteFile(file.getModificationTime()); } break; } case OP_APPEND: { AppendOp appendOp = (AppendOp) op; final String path = renameReservedPathsOnUpgrade(appendOp.path, logVersion); if (FSNamesystem.LOG.isDebugEnabled()) { FSNamesystem.LOG.debug(op.opCode + ": " + path + " clientName " + appendOp.clientName + " clientMachine " + appendOp.clientMachine + " newBlock " + appendOp.newBlock); } INodesInPath iip = fsDir.getINodesInPath4Write(path); INodeFile file = INodeFile.valueOf(iip.getLastINode(), path); if (!file.isUnderConstruction()) { LocatedBlock lb = FSDirAppendOp.prepareFileForAppend(fsNamesys, iip, appendOp.clientName, appendOp.clientMachine, appendOp.newBlock, false, false); // add the op into retry cache if necessary if (toAddRetryCache) { HdfsFileStatus stat = FSDirStatAndListingOp.createFileStatusForEditLog( fsNamesys.dir, path, HdfsFileStatus.EMPTY_NAME, file, HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, Snapshot.CURRENT_STATE_ID, false, iip); fsNamesys.addCacheEntryWithPayload(appendOp.rpcClientId, appendOp.rpcCallId, new LastBlockWithStatus(lb, stat)); } } break; } case OP_UPDATE_BLOCKS: { UpdateBlocksOp updateOp = (UpdateBlocksOp)op; final String path = renameReservedPathsOnUpgrade(updateOp.path, logVersion); if (FSNamesystem.LOG.isDebugEnabled()) { FSNamesystem.LOG.debug(op.opCode + ": " + path + " numblocks : " + updateOp.blocks.length); } INodesInPath iip = fsDir.getINodesInPath(path, true); INodeFile oldFile = INodeFile.valueOf(iip.getLastINode(), path); // Update in-memory data structures updateBlocks(fsDir, updateOp, iip, oldFile); if (toAddRetryCache) { fsNamesys.addCacheEntry(updateOp.rpcClientId, updateOp.rpcCallId); } break; } case OP_ADD_BLOCK: { AddBlockOp addBlockOp = (AddBlockOp) op; String path = renameReservedPathsOnUpgrade(addBlockOp.getPath(), logVersion); if (FSNamesystem.LOG.isDebugEnabled()) { FSNamesystem.LOG.debug(op.opCode + ": " + path + " new block id : " + addBlockOp.getLastBlock().getBlockId()); } INodeFile oldFile = INodeFile.valueOf(fsDir.getINode(path), path); // add the new block to the INodeFile addNewBlock(fsDir, addBlockOp, oldFile); break; } case OP_SET_REPLICATION: { SetReplicationOp setReplicationOp = (SetReplicationOp)op; short replication = fsNamesys.getBlockManager().adjustReplication( setReplicationOp.replication); FSDirAttrOp.unprotectedSetReplication(fsDir, renameReservedPathsOnUpgrade( setReplicationOp.path, logVersion), replication, null); break; } case OP_CONCAT_DELETE: { ConcatDeleteOp concatDeleteOp = (ConcatDeleteOp)op; String trg = renameReservedPathsOnUpgrade(concatDeleteOp.trg, logVersion); String[] srcs = new String[concatDeleteOp.srcs.length]; for (int i=0; i<srcs.length; i++) { srcs[i] = renameReservedPathsOnUpgrade(concatDeleteOp.srcs[i], logVersion); } INodesInPath targetIIP = fsDir.getINodesInPath4Write(trg); INodeFile[] srcFiles = new INodeFile[srcs.length]; for (int i = 0; i < srcs.length; i++) { INodesInPath srcIIP = fsDir.getINodesInPath4Write(srcs[i]); srcFiles[i] = srcIIP.getLastINode().asFile(); } FSDirConcatOp.unprotectedConcat(fsDir, targetIIP, srcFiles, concatDeleteOp.timestamp); if (toAddRetryCache) { fsNamesys.addCacheEntry(concatDeleteOp.rpcClientId, concatDeleteOp.rpcCallId); } break; } case OP_RENAME_OLD: { RenameOldOp renameOp = (RenameOldOp)op; final String src = renameReservedPathsOnUpgrade(renameOp.src, logVersion); final String dst = renameReservedPathsOnUpgrade(renameOp.dst, logVersion); FSDirRenameOp.renameForEditLog(fsDir, src, dst, renameOp.timestamp); if (toAddRetryCache) { fsNamesys.addCacheEntry(renameOp.rpcClientId, renameOp.rpcCallId); } break; } case OP_DELETE: { DeleteOp deleteOp = (DeleteOp)op; FSDirDeleteOp.deleteForEditLog( fsDir, renameReservedPathsOnUpgrade(deleteOp.path, logVersion), deleteOp.timestamp); if (toAddRetryCache) { fsNamesys.addCacheEntry(deleteOp.rpcClientId, deleteOp.rpcCallId); } break; } case OP_MKDIR: { MkdirOp mkdirOp = (MkdirOp)op; inodeId = getAndUpdateLastInodeId(mkdirOp.inodeId, logVersion, lastInodeId); FSDirMkdirOp.mkdirForEditLog(fsDir, inodeId, renameReservedPathsOnUpgrade(mkdirOp.path, logVersion), mkdirOp.permissions, mkdirOp.aclEntries, mkdirOp.timestamp); break; } case OP_SET_GENSTAMP_V1: { SetGenstampV1Op setGenstampV1Op = (SetGenstampV1Op)op; fsNamesys.getBlockIdManager().setGenerationStampV1( setGenstampV1Op.genStampV1); break; } case OP_SET_PERMISSIONS: { SetPermissionsOp setPermissionsOp = (SetPermissionsOp)op; FSDirAttrOp.unprotectedSetPermission(fsDir, renameReservedPathsOnUpgrade( setPermissionsOp.src, logVersion), setPermissionsOp.permissions); break; } case OP_SET_OWNER: { SetOwnerOp setOwnerOp = (SetOwnerOp)op; FSDirAttrOp.unprotectedSetOwner( fsDir, renameReservedPathsOnUpgrade(setOwnerOp.src, logVersion), setOwnerOp.username, setOwnerOp.groupname); break; } case OP_SET_NS_QUOTA: { SetNSQuotaOp setNSQuotaOp = (SetNSQuotaOp)op; FSDirAttrOp.unprotectedSetQuota( fsDir, renameReservedPathsOnUpgrade(setNSQuotaOp.src, logVersion), setNSQuotaOp.nsQuota, HdfsConstants.QUOTA_DONT_SET, null); break; } case OP_CLEAR_NS_QUOTA: { ClearNSQuotaOp clearNSQuotaOp = (ClearNSQuotaOp)op; FSDirAttrOp.unprotectedSetQuota( fsDir, renameReservedPathsOnUpgrade(clearNSQuotaOp.src, logVersion), HdfsConstants.QUOTA_RESET, HdfsConstants.QUOTA_DONT_SET, null); break; } case OP_SET_QUOTA: SetQuotaOp setQuotaOp = (SetQuotaOp) op; FSDirAttrOp.unprotectedSetQuota(fsDir, renameReservedPathsOnUpgrade(setQuotaOp.src, logVersion), setQuotaOp.nsQuota, setQuotaOp.dsQuota, null); break; case OP_SET_QUOTA_BY_STORAGETYPE: FSEditLogOp.SetQuotaByStorageTypeOp setQuotaByStorageTypeOp = (FSEditLogOp.SetQuotaByStorageTypeOp) op; FSDirAttrOp.unprotectedSetQuota(fsDir, renameReservedPathsOnUpgrade(setQuotaByStorageTypeOp.src, logVersion), HdfsConstants.QUOTA_DONT_SET, setQuotaByStorageTypeOp.dsQuota, setQuotaByStorageTypeOp.type); break; case OP_TIMES: { TimesOp timesOp = (TimesOp)op; FSDirAttrOp.unprotectedSetTimes( fsDir, renameReservedPathsOnUpgrade(timesOp.path, logVersion), timesOp.mtime, timesOp.atime, true); break; } case OP_SYMLINK: { if (!FileSystem.areSymlinksEnabled()) { throw new IOException("Symlinks not supported - please remove symlink before upgrading to this version of HDFS"); } SymlinkOp symlinkOp = (SymlinkOp)op; inodeId = getAndUpdateLastInodeId(symlinkOp.inodeId, logVersion, lastInodeId); final String path = renameReservedPathsOnUpgrade(symlinkOp.path, logVersion); final INodesInPath iip = fsDir.getINodesInPath(path, false); FSDirSymlinkOp.unprotectedAddSymlink(fsDir, iip.getExistingINodes(), iip.getLastLocalName(), inodeId, symlinkOp.value, symlinkOp.mtime, symlinkOp.atime, symlinkOp.permissionStatus); if (toAddRetryCache) { fsNamesys.addCacheEntry(symlinkOp.rpcClientId, symlinkOp.rpcCallId); } break; } case OP_RENAME: { RenameOp renameOp = (RenameOp)op; FSDirRenameOp.renameForEditLog(fsDir, renameReservedPathsOnUpgrade(renameOp.src, logVersion), renameReservedPathsOnUpgrade(renameOp.dst, logVersion), renameOp.timestamp, renameOp.options); if (toAddRetryCache) { fsNamesys.addCacheEntry(renameOp.rpcClientId, renameOp.rpcCallId); } break; } case OP_GET_DELEGATION_TOKEN: { GetDelegationTokenOp getDelegationTokenOp = (GetDelegationTokenOp)op; fsNamesys.getDelegationTokenSecretManager() .addPersistedDelegationToken(getDelegationTokenOp.token, getDelegationTokenOp.expiryTime); break; } case OP_RENEW_DELEGATION_TOKEN: { RenewDelegationTokenOp renewDelegationTokenOp = (RenewDelegationTokenOp)op; fsNamesys.getDelegationTokenSecretManager() .updatePersistedTokenRenewal(renewDelegationTokenOp.token, renewDelegationTokenOp.expiryTime); break; } case OP_CANCEL_DELEGATION_TOKEN: { CancelDelegationTokenOp cancelDelegationTokenOp = (CancelDelegationTokenOp)op; fsNamesys.getDelegationTokenSecretManager() .updatePersistedTokenCancellation( cancelDelegationTokenOp.token); break; } case OP_UPDATE_MASTER_KEY: { UpdateMasterKeyOp updateMasterKeyOp = (UpdateMasterKeyOp)op; fsNamesys.getDelegationTokenSecretManager() .updatePersistedMasterKey(updateMasterKeyOp.key); break; } case OP_REASSIGN_LEASE: { ReassignLeaseOp reassignLeaseOp = (ReassignLeaseOp)op; Lease lease = fsNamesys.leaseManager.getLease( reassignLeaseOp.leaseHolder); final String path = renameReservedPathsOnUpgrade(reassignLeaseOp.path, logVersion); INodeFile pendingFile = fsDir.getINode(path).asFile(); Preconditions.checkState(pendingFile.isUnderConstruction()); fsNamesys.reassignLeaseInternal(lease, reassignLeaseOp.newHolder, pendingFile); break; } case OP_START_LOG_SEGMENT: case OP_END_LOG_SEGMENT: { // no data in here currently. break; } case OP_CREATE_SNAPSHOT: { CreateSnapshotOp createSnapshotOp = (CreateSnapshotOp) op; final String snapshotRoot = renameReservedPathsOnUpgrade(createSnapshotOp.snapshotRoot, logVersion); INodesInPath iip = fsDir.getINodesInPath4Write(snapshotRoot); String path = fsNamesys.getSnapshotManager().createSnapshot(iip, snapshotRoot, createSnapshotOp.snapshotName); if (toAddRetryCache) { fsNamesys.addCacheEntryWithPayload(createSnapshotOp.rpcClientId, createSnapshotOp.rpcCallId, path); } break; } case OP_DELETE_SNAPSHOT: { DeleteSnapshotOp deleteSnapshotOp = (DeleteSnapshotOp) op; BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo(); List<INode> removedINodes = new ChunkedArrayList<INode>(); final String snapshotRoot = renameReservedPathsOnUpgrade(deleteSnapshotOp.snapshotRoot, logVersion); INodesInPath iip = fsDir.getINodesInPath4Write(snapshotRoot); fsNamesys.getSnapshotManager().deleteSnapshot(iip, deleteSnapshotOp.snapshotName, new INode.ReclaimContext(fsNamesys.dir.getBlockStoragePolicySuite(), collectedBlocks, removedINodes, null)); fsNamesys.removeBlocksAndUpdateSafemodeTotal(collectedBlocks); collectedBlocks.clear(); fsNamesys.dir.removeFromInodeMap(removedINodes); removedINodes.clear(); if (toAddRetryCache) { fsNamesys.addCacheEntry(deleteSnapshotOp.rpcClientId, deleteSnapshotOp.rpcCallId); } break; } case OP_RENAME_SNAPSHOT: { RenameSnapshotOp renameSnapshotOp = (RenameSnapshotOp) op; final String snapshotRoot = renameReservedPathsOnUpgrade(renameSnapshotOp.snapshotRoot, logVersion); INodesInPath iip = fsDir.getINodesInPath4Write(snapshotRoot); fsNamesys.getSnapshotManager().renameSnapshot(iip, snapshotRoot, renameSnapshotOp.snapshotOldName, renameSnapshotOp.snapshotNewName); if (toAddRetryCache) { fsNamesys.addCacheEntry(renameSnapshotOp.rpcClientId, renameSnapshotOp.rpcCallId); } break; } case OP_ALLOW_SNAPSHOT: { AllowSnapshotOp allowSnapshotOp = (AllowSnapshotOp) op; final String snapshotRoot = renameReservedPathsOnUpgrade(allowSnapshotOp.snapshotRoot, logVersion); fsNamesys.getSnapshotManager().setSnapshottable( snapshotRoot, false); break; } case OP_DISALLOW_SNAPSHOT: { DisallowSnapshotOp disallowSnapshotOp = (DisallowSnapshotOp) op; final String snapshotRoot = renameReservedPathsOnUpgrade(disallowSnapshotOp.snapshotRoot, logVersion); fsNamesys.getSnapshotManager().resetSnapshottable( snapshotRoot); break; } case OP_SET_GENSTAMP_V2: { SetGenstampV2Op setGenstampV2Op = (SetGenstampV2Op) op; fsNamesys.getBlockIdManager().setGenerationStampV2( setGenstampV2Op.genStampV2); break; } case OP_ALLOCATE_BLOCK_ID: { AllocateBlockIdOp allocateBlockIdOp = (AllocateBlockIdOp) op; fsNamesys.getBlockIdManager().setLastAllocatedBlockId( allocateBlockIdOp.blockId); break; } case OP_ROLLING_UPGRADE_START: { if (startOpt == StartupOption.ROLLINGUPGRADE) { final RollingUpgradeStartupOption rollingUpgradeOpt = startOpt.getRollingUpgradeStartupOption(); if (rollingUpgradeOpt == RollingUpgradeStartupOption.ROLLBACK) { throw new RollingUpgradeOp.RollbackException(); } else if (rollingUpgradeOpt == RollingUpgradeStartupOption.DOWNGRADE) { //ignore upgrade marker break; } } // start rolling upgrade final long startTime = ((RollingUpgradeOp) op).getTime(); fsNamesys.startRollingUpgradeInternal(startTime); fsNamesys.triggerRollbackCheckpoint(); break; } case OP_ROLLING_UPGRADE_FINALIZE: { final long finalizeTime = ((RollingUpgradeOp) op).getTime(); if (fsNamesys.isRollingUpgrade()) { // Only do it when NN is actually doing rolling upgrade. // We can get FINALIZE without corresponding START, if NN is restarted // before this op is consumed and a new checkpoint is created. fsNamesys.finalizeRollingUpgradeInternal(finalizeTime); } fsNamesys.getFSImage().updateStorageVersion(); fsNamesys.getFSImage().renameCheckpoint(NameNodeFile.IMAGE_ROLLBACK, NameNodeFile.IMAGE); break; } case OP_ADD_CACHE_DIRECTIVE: { AddCacheDirectiveInfoOp addOp = (AddCacheDirectiveInfoOp) op; CacheDirectiveInfo result = fsNamesys. getCacheManager().addDirectiveFromEditLog(addOp.directive); if (toAddRetryCache) { Long id = result.getId(); fsNamesys.addCacheEntryWithPayload(op.rpcClientId, op.rpcCallId, id); } break; } case OP_MODIFY_CACHE_DIRECTIVE: { ModifyCacheDirectiveInfoOp modifyOp = (ModifyCacheDirectiveInfoOp) op; fsNamesys.getCacheManager().modifyDirectiveFromEditLog( modifyOp.directive); if (toAddRetryCache) { fsNamesys.addCacheEntry(op.rpcClientId, op.rpcCallId); } break; } case OP_REMOVE_CACHE_DIRECTIVE: { RemoveCacheDirectiveInfoOp removeOp = (RemoveCacheDirectiveInfoOp) op; fsNamesys.getCacheManager().removeDirective(removeOp.id, null); if (toAddRetryCache) { fsNamesys.addCacheEntry(op.rpcClientId, op.rpcCallId); } break; } case OP_ADD_CACHE_POOL: { AddCachePoolOp addOp = (AddCachePoolOp) op; fsNamesys.getCacheManager().addCachePool(addOp.info); if (toAddRetryCache) { fsNamesys.addCacheEntry(op.rpcClientId, op.rpcCallId); } break; } case OP_MODIFY_CACHE_POOL: { ModifyCachePoolOp modifyOp = (ModifyCachePoolOp) op; fsNamesys.getCacheManager().modifyCachePool(modifyOp.info); if (toAddRetryCache) { fsNamesys.addCacheEntry(op.rpcClientId, op.rpcCallId); } break; } case OP_REMOVE_CACHE_POOL: { RemoveCachePoolOp removeOp = (RemoveCachePoolOp) op; fsNamesys.getCacheManager().removeCachePool(removeOp.poolName); if (toAddRetryCache) { fsNamesys.addCacheEntry(op.rpcClientId, op.rpcCallId); } break; } case OP_SET_ACL: { SetAclOp setAclOp = (SetAclOp) op; FSDirAclOp.unprotectedSetAcl(fsDir, setAclOp.src, setAclOp.aclEntries, true); break; } case OP_SET_XATTR: { SetXAttrOp setXAttrOp = (SetXAttrOp) op; FSDirXAttrOp.unprotectedSetXAttrs(fsDir, setXAttrOp.src, setXAttrOp.xAttrs, EnumSet.of(XAttrSetFlag.CREATE, XAttrSetFlag.REPLACE)); if (toAddRetryCache) { fsNamesys.addCacheEntry(setXAttrOp.rpcClientId, setXAttrOp.rpcCallId); } break; } case OP_REMOVE_XATTR: { RemoveXAttrOp removeXAttrOp = (RemoveXAttrOp) op; FSDirXAttrOp.unprotectedRemoveXAttrs(fsDir, removeXAttrOp.src, removeXAttrOp.xAttrs); if (toAddRetryCache) { fsNamesys.addCacheEntry(removeXAttrOp.rpcClientId, removeXAttrOp.rpcCallId); } break; } case OP_TRUNCATE: { TruncateOp truncateOp = (TruncateOp) op; FSDirTruncateOp.unprotectedTruncate(fsNamesys, truncateOp.src, truncateOp.clientName, truncateOp.clientMachine, truncateOp.newLength, truncateOp.timestamp, truncateOp.truncateBlock); break; } case OP_SET_STORAGE_POLICY: { SetStoragePolicyOp setStoragePolicyOp = (SetStoragePolicyOp) op; final String path = renameReservedPathsOnUpgrade(setStoragePolicyOp.path, logVersion); final INodesInPath iip = fsDir.getINodesInPath4Write(path); FSDirAttrOp.unprotectedSetStoragePolicy( fsDir, fsNamesys.getBlockManager(), iip, setStoragePolicyOp.policyId); break; } default: throw new IOException("Invalid operation read " + op.opCode); } return inodeId; } private static String formatEditLogReplayError(EditLogInputStream in, long recentOpcodeOffsets[], long txid) { StringBuilder sb = new StringBuilder(); sb.append("Error replaying edit log at offset " + in.getPosition()); sb.append(". Expected transaction ID was ").append(txid); if (recentOpcodeOffsets[0] != -1) { Arrays.sort(recentOpcodeOffsets); sb.append("\nRecent opcode offsets:"); for (long offset : recentOpcodeOffsets) { if (offset != -1) { sb.append(' ').append(offset); } } } return sb.toString(); } /** * Add a new block into the given INodeFile */ private void addNewBlock(FSDirectory fsDir, AddBlockOp op, INodeFile file) throws IOException { BlockInfo[] oldBlocks = file.getBlocks(); Block pBlock = op.getPenultimateBlock(); Block newBlock= op.getLastBlock(); if (pBlock != null) { // the penultimate block is not null Preconditions.checkState(oldBlocks != null && oldBlocks.length > 0); // compare pBlock with the last block of oldBlocks Block oldLastBlock = oldBlocks[oldBlocks.length - 1]; if (oldLastBlock.getBlockId() != pBlock.getBlockId() || oldLastBlock.getGenerationStamp() != pBlock.getGenerationStamp()) { throw new IOException( "Mismatched block IDs or generation stamps for the old last block of file " + op.getPath() + ", the old last block is " + oldLastBlock + ", and the block read from editlog is " + pBlock); } oldLastBlock.setNumBytes(pBlock.getNumBytes()); if (oldLastBlock instanceof BlockInfoContiguousUnderConstruction) { fsNamesys.getBlockManager().forceCompleteBlock(file, (BlockInfoContiguousUnderConstruction) oldLastBlock); fsNamesys.getBlockManager().processQueuedMessagesForBlock(pBlock); } } else { // the penultimate block is null Preconditions.checkState(oldBlocks == null || oldBlocks.length == 0); } // add the new block BlockInfo newBI = new BlockInfoContiguousUnderConstruction( newBlock, file.getPreferredBlockReplication()); fsNamesys.getBlockManager().addBlockCollection(newBI, file); file.addBlock(newBI); fsNamesys.getBlockManager().processQueuedMessagesForBlock(newBlock); } /** * Update in-memory data structures with new block information. * @throws IOException */ private void updateBlocks(FSDirectory fsDir, BlockListUpdatingOp op, INodesInPath iip, INodeFile file) throws IOException { // Update its block list BlockInfo[] oldBlocks = file.getBlocks(); Block[] newBlocks = op.getBlocks(); String path = op.getPath(); // Are we only updating the last block's gen stamp. boolean isGenStampUpdate = oldBlocks.length == newBlocks.length; // First, update blocks in common for (int i = 0; i < oldBlocks.length && i < newBlocks.length; i++) { BlockInfo oldBlock = oldBlocks[i]; Block newBlock = newBlocks[i]; boolean isLastBlock = i == newBlocks.length - 1; if (oldBlock.getBlockId() != newBlock.getBlockId() || (oldBlock.getGenerationStamp() != newBlock.getGenerationStamp() && !(isGenStampUpdate && isLastBlock))) { throw new IOException("Mismatched block IDs or generation stamps, " + "attempting to replace block " + oldBlock + " with " + newBlock + " as block # " + i + "/" + newBlocks.length + " of " + path); } oldBlock.setNumBytes(newBlock.getNumBytes()); boolean changeMade = oldBlock.getGenerationStamp() != newBlock.getGenerationStamp(); oldBlock.setGenerationStamp(newBlock.getGenerationStamp()); if (oldBlock instanceof BlockInfoContiguousUnderConstruction && (!isLastBlock || op.shouldCompleteLastBlock())) { changeMade = true; fsNamesys.getBlockManager().forceCompleteBlock(file, (BlockInfoContiguousUnderConstruction) oldBlock); } if (changeMade) { // The state or gen-stamp of the block has changed. So, we may be // able to process some messages from datanodes that we previously // were unable to process. fsNamesys.getBlockManager().processQueuedMessagesForBlock(newBlock); } } if (newBlocks.length < oldBlocks.length) { // We're removing a block from the file, e.g. abandonBlock(...) if (!file.isUnderConstruction()) { throw new IOException("Trying to remove a block from file " + path + " which is not under construction."); } if (newBlocks.length != oldBlocks.length - 1) { throw new IOException("Trying to remove more than one block from file " + path); } Block oldBlock = oldBlocks[oldBlocks.length - 1]; boolean removed = FSDirWriteFileOp.unprotectedRemoveBlock( fsDir, path, iip, file, oldBlock); if (!removed && !(op instanceof UpdateBlocksOp)) { throw new IOException("Trying to delete non-existant block " + oldBlock); } } else if (newBlocks.length > oldBlocks.length) { // We're adding blocks for (int i = oldBlocks.length; i < newBlocks.length; i++) { Block newBlock = newBlocks[i]; BlockInfo newBI; if (!op.shouldCompleteLastBlock()) { // TODO: shouldn't this only be true for the last block? // what about an old-version fsync() where fsync isn't called // until several blocks in? newBI = new BlockInfoContiguousUnderConstruction( newBlock, file.getPreferredBlockReplication()); } else { // OP_CLOSE should add finalized blocks. This code path // is only executed when loading edits written by prior // versions of Hadoop. Current versions always log // OP_ADD operations as each block is allocated. newBI = new BlockInfoContiguous(newBlock, file.getPreferredBlockReplication()); } fsNamesys.getBlockManager().addBlockCollection(newBI, file); file.addBlock(newBI); fsNamesys.getBlockManager().processQueuedMessagesForBlock(newBlock); } } } private static void dumpOpCounts( EnumMap<FSEditLogOpCodes, Holder<Integer>> opCounts) { StringBuilder sb = new StringBuilder(); sb.append("Summary of operations loaded from edit log:\n "); Joiner.on("\n ").withKeyValueSeparator("=").appendTo(sb, opCounts); FSImage.LOG.debug(sb.toString()); } private void incrOpCount(FSEditLogOpCodes opCode, EnumMap<FSEditLogOpCodes, Holder<Integer>> opCounts, Step step, Counter counter) { Holder<Integer> holder = opCounts.get(opCode); if (holder == null) { holder = new Holder<Integer>(1); opCounts.put(opCode, holder); } else { holder.held++; } counter.increment(); } /** * Throw appropriate exception during upgrade from 203, when editlog loading * could fail due to opcode conflicts. */ private void check203UpgradeFailure(int logVersion, Throwable e) throws IOException { // 0.20.203 version version has conflicting opcodes with the later releases. // The editlog must be emptied by restarting the namenode, before proceeding // with the upgrade. if (Storage.is203LayoutVersion(logVersion) && logVersion != HdfsServerConstants.NAMENODE_LAYOUT_VERSION) { String msg = "During upgrade failed to load the editlog version " + logVersion + " from release 0.20.203. Please go back to the old " + " release and restart the namenode. This empties the editlog " + " and saves the namespace. Resume the upgrade after this step."; throw new IOException(msg, e); } } /** * Find the last valid transaction ID in the stream. * If there are invalid or corrupt transactions in the middle of the stream, * validateEditLog will skip over them. * This reads through the stream but does not close it. */ static EditLogValidation validateEditLog(EditLogInputStream in) { long lastPos = 0; long lastTxId = HdfsServerConstants.INVALID_TXID; long numValid = 0; FSEditLogOp op = null; while (true) { lastPos = in.getPosition(); try { if ((op = in.readOp()) == null) { break; } } catch (Throwable t) { FSImage.LOG.warn("Caught exception after reading " + numValid + " ops from " + in + " while determining its valid length." + "Position was " + lastPos, t); in.resync(); FSImage.LOG.warn("After resync, position is " + in.getPosition()); continue; } if (lastTxId == HdfsServerConstants.INVALID_TXID || op.getTransactionId() > lastTxId) { lastTxId = op.getTransactionId(); } numValid++; } return new EditLogValidation(lastPos, lastTxId, false); } static EditLogValidation scanEditLog(EditLogInputStream in) { long lastPos = 0; long lastTxId = HdfsServerConstants.INVALID_TXID; long numValid = 0; FSEditLogOp op = null; while (true) { lastPos = in.getPosition(); try { if ((op = in.readOp()) == null) { // TODO break; } } catch (Throwable t) { FSImage.LOG.warn("Caught exception after reading " + numValid + " ops from " + in + " while determining its valid length." + "Position was " + lastPos, t); in.resync(); FSImage.LOG.warn("After resync, position is " + in.getPosition()); continue; } if (lastTxId == HdfsServerConstants.INVALID_TXID || op.getTransactionId() > lastTxId) { lastTxId = op.getTransactionId(); } numValid++; } return new EditLogValidation(lastPos, lastTxId, false); } static class EditLogValidation { private final long validLength; private final long endTxId; private final boolean hasCorruptHeader; EditLogValidation(long validLength, long endTxId, boolean hasCorruptHeader) { this.validLength = validLength; this.endTxId = endTxId; this.hasCorruptHeader = hasCorruptHeader; } long getValidLength() { return validLength; } long getEndTxId() { return endTxId; } boolean hasCorruptHeader() { return hasCorruptHeader; } } /** * Stream wrapper that keeps track of the current stream position. * * This stream also allows us to set a limit on how many bytes we can read * without getting an exception. */ public static class PositionTrackingInputStream extends FilterInputStream implements StreamLimiter { private long curPos = 0; private long markPos = -1; private long limitPos = Long.MAX_VALUE; public PositionTrackingInputStream(InputStream is) { super(is); } private void checkLimit(long amt) throws IOException { long extra = (curPos + amt) - limitPos; if (extra > 0) { throw new IOException("Tried to read " + amt + " byte(s) past " + "the limit at offset " + limitPos); } } @Override public int read() throws IOException { checkLimit(1); int ret = super.read(); if (ret != -1) curPos++; return ret; } @Override public int read(byte[] data) throws IOException { checkLimit(data.length); int ret = super.read(data); if (ret > 0) curPos += ret; return ret; } @Override public int read(byte[] data, int offset, int length) throws IOException { checkLimit(length); int ret = super.read(data, offset, length); if (ret > 0) curPos += ret; return ret; } @Override public void setLimit(long limit) { limitPos = curPos + limit; } @Override public void clearLimit() { limitPos = Long.MAX_VALUE; } @Override public void mark(int limit) { super.mark(limit); markPos = curPos; } @Override public void reset() throws IOException { if (markPos == -1) { throw new IOException("Not marked!"); } super.reset(); curPos = markPos; markPos = -1; } public long getPos() { return curPos; } @Override public long skip(long amt) throws IOException { long extra = (curPos + amt) - limitPos; if (extra > 0) { throw new IOException("Tried to skip " + extra + " bytes past " + "the limit at offset " + limitPos); } long ret = super.skip(amt); curPos += ret; return ret; } } public long getLastAppliedTxId() { return lastAppliedTxId; } /** * Creates a Step used for updating startup progress, populated with * information from the given edits. The step always includes the log's name. * If the log has a known length, then the length is included in the step too. * * @param edits EditLogInputStream to use for populating step * @return Step populated with information from edits * @throws IOException thrown if there is an I/O error */ private static Step createStartupProgressStep(EditLogInputStream edits) throws IOException { long length = edits.length(); String name = edits.getCurrentStreamName(); return length != -1 ? new Step(name, length) : new Step(name); } }
52,950
39.606595
121
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.namenode; import static org.apache.hadoop.util.ExitUtil.terminate; import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.Comparator; import java.util.LinkedList; import java.util.List; import java.util.PriorityQueue; import java.util.SortedSet; import java.util.concurrent.CopyOnWriteArrayList; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; import static org.apache.hadoop.util.ExitUtil.terminate; import com.google.common.base.Preconditions; import com.google.common.collect.ComparisonChain; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableListMultimap; import com.google.common.collect.Lists; import com.google.common.collect.Multimaps; import com.google.common.collect.Sets; /** * Manages a collection of Journals. None of the methods are synchronized, it is * assumed that FSEditLog methods, that use this class, use proper * synchronization. */ public class JournalSet implements JournalManager { static final Log LOG = LogFactory.getLog(FSEditLog.class); private static final Comparator<EditLogInputStream> LOCAL_LOG_PREFERENCE_COMPARATOR = new Comparator<EditLogInputStream>() { @Override public int compare(EditLogInputStream elis1, EditLogInputStream elis2) { // we want local logs to be ordered earlier in the collection, and true // is considered larger than false, so we want to invert the booleans here return ComparisonChain.start().compare(!elis1.isLocalLog(), !elis2.isLocalLog()).result(); } }; static final public Comparator<EditLogInputStream> EDIT_LOG_INPUT_STREAM_COMPARATOR = new Comparator<EditLogInputStream>() { @Override public int compare(EditLogInputStream a, EditLogInputStream b) { return ComparisonChain.start(). compare(a.getFirstTxId(), b.getFirstTxId()). compare(b.getLastTxId(), a.getLastTxId()). result(); } }; /** * Container for a JournalManager paired with its currently * active stream. * * If a Journal gets disabled due to an error writing to its * stream, then the stream will be aborted and set to null. */ static class JournalAndStream implements CheckableNameNodeResource { private final JournalManager journal; private boolean disabled = false; private EditLogOutputStream stream; private final boolean required; private final boolean shared; public JournalAndStream(JournalManager manager, boolean required, boolean shared) { this.journal = manager; this.required = required; this.shared = shared; } public void startLogSegment(long txId, int layoutVersion) throws IOException { Preconditions.checkState(stream == null); disabled = false; stream = journal.startLogSegment(txId, layoutVersion); } /** * Closes the stream, also sets it to null. */ public void closeStream() throws IOException { if (stream == null) return; stream.close(); stream = null; } /** * Close the Journal and Stream */ public void close() throws IOException { closeStream(); journal.close(); } /** * Aborts the stream, also sets it to null. */ public void abort() { if (stream == null) return; try { stream.abort(); } catch (IOException ioe) { LOG.error("Unable to abort stream " + stream, ioe); } stream = null; } boolean isActive() { return stream != null; } /** * Should be used outside JournalSet only for testing. */ EditLogOutputStream getCurrentStream() { return stream; } @Override public String toString() { return "JournalAndStream(mgr=" + journal + ", " + "stream=" + stream + ")"; } void setCurrentStreamForTests(EditLogOutputStream stream) { this.stream = stream; } JournalManager getManager() { return journal; } boolean isDisabled() { return disabled; } private void setDisabled(boolean disabled) { this.disabled = disabled; } @Override public boolean isResourceAvailable() { return !isDisabled(); } @Override public boolean isRequired() { return required; } public boolean isShared() { return shared; } } // COW implementation is necessary since some users (eg the web ui) call // getAllJournalStreams() and then iterate. Since this is rarely // mutated, there is no performance concern. private final List<JournalAndStream> journals = new CopyOnWriteArrayList<JournalSet.JournalAndStream>(); final int minimumRedundantJournals; private boolean closed; JournalSet(int minimumRedundantResources) { this.minimumRedundantJournals = minimumRedundantResources; } @Override public void format(NamespaceInfo nsInfo) throws IOException { // The operation is done by FSEditLog itself throw new UnsupportedOperationException(); } @Override public boolean hasSomeData() throws IOException { // This is called individually on the underlying journals, // not on the JournalSet. throw new UnsupportedOperationException(); } @Override public EditLogOutputStream startLogSegment(final long txId, final int layoutVersion) throws IOException { mapJournalsAndReportErrors(new JournalClosure() { @Override public void apply(JournalAndStream jas) throws IOException { jas.startLogSegment(txId, layoutVersion); } }, "starting log segment " + txId); return new JournalSetOutputStream(); } @Override public void finalizeLogSegment(final long firstTxId, final long lastTxId) throws IOException { mapJournalsAndReportErrors(new JournalClosure() { @Override public void apply(JournalAndStream jas) throws IOException { if (jas.isActive()) { jas.closeStream(); jas.getManager().finalizeLogSegment(firstTxId, lastTxId); } } }, "finalize log segment " + firstTxId + ", " + lastTxId); } @Override public void close() throws IOException { mapJournalsAndReportErrors(new JournalClosure() { @Override public void apply(JournalAndStream jas) throws IOException { jas.close(); } }, "close journal"); closed = true; } public boolean isOpen() { return !closed; } /** * In this function, we get a bunch of streams from all of our JournalManager * objects. Then we add these to the collection one by one. * * @param streams The collection to add the streams to. It may or * may not be sorted-- this is up to the caller. * @param fromTxId The transaction ID to start looking for streams at * @param inProgressOk Should we consider unfinalized streams? */ @Override public void selectInputStreams(Collection<EditLogInputStream> streams, long fromTxId, boolean inProgressOk) throws IOException { final PriorityQueue<EditLogInputStream> allStreams = new PriorityQueue<EditLogInputStream>(64, EDIT_LOG_INPUT_STREAM_COMPARATOR); for (JournalAndStream jas : journals) { if (jas.isDisabled()) { LOG.info("Skipping jas " + jas + " since it's disabled"); continue; } try { jas.getManager().selectInputStreams(allStreams, fromTxId, inProgressOk); } catch (IOException ioe) { LOG.warn("Unable to determine input streams from " + jas.getManager() + ". Skipping.", ioe); } } chainAndMakeRedundantStreams(streams, allStreams, fromTxId); } public static void chainAndMakeRedundantStreams( Collection<EditLogInputStream> outStreams, PriorityQueue<EditLogInputStream> allStreams, long fromTxId) { // We want to group together all the streams that start on the same start // transaction ID. To do this, we maintain an accumulator (acc) of all // the streams we've seen at a given start transaction ID. When we see a // higher start transaction ID, we select a stream from the accumulator and // clear it. Then we begin accumulating streams with the new, higher start // transaction ID. LinkedList<EditLogInputStream> acc = new LinkedList<EditLogInputStream>(); EditLogInputStream elis; while ((elis = allStreams.poll()) != null) { if (acc.isEmpty()) { acc.add(elis); } else { EditLogInputStream accFirst = acc.get(0); long accFirstTxId = accFirst.getFirstTxId(); if (accFirstTxId == elis.getFirstTxId()) { // if we have a finalized log segment available at this txid, // we should throw out all in-progress segments at this txid if (elis.isInProgress()) { if (accFirst.isInProgress()) { acc.add(elis); } } else { if (accFirst.isInProgress()) { acc.clear(); } acc.add(elis); } } else if (accFirstTxId < elis.getFirstTxId()) { // try to read from the local logs first since the throughput should // be higher Collections.sort(acc, LOCAL_LOG_PREFERENCE_COMPARATOR); outStreams.add(new RedundantEditLogInputStream(acc, fromTxId)); acc.clear(); acc.add(elis); } else if (accFirstTxId > elis.getFirstTxId()) { throw new RuntimeException("sorted set invariants violated! " + "Got stream with first txid " + elis.getFirstTxId() + ", but the last firstTxId was " + accFirstTxId); } } } if (!acc.isEmpty()) { Collections.sort(acc, LOCAL_LOG_PREFERENCE_COMPARATOR); outStreams.add(new RedundantEditLogInputStream(acc, fromTxId)); acc.clear(); } } /** * Returns true if there are no journals, all redundant journals are disabled, * or any required journals are disabled. * * @return True if there no journals, all redundant journals are disabled, * or any required journals are disabled. */ public boolean isEmpty() { return !NameNodeResourcePolicy.areResourcesAvailable(journals, minimumRedundantJournals); } /** * Called when some journals experience an error in some operation. */ private void disableAndReportErrorOnJournals(List<JournalAndStream> badJournals) { if (badJournals == null || badJournals.isEmpty()) { return; // nothing to do } for (JournalAndStream j : badJournals) { LOG.error("Disabling journal " + j); j.abort(); j.setDisabled(true); } } /** * Implementations of this interface encapsulate operations that can be * iteratively applied on all the journals. For example see * {@link JournalSet#mapJournalsAndReportErrors}. */ private interface JournalClosure { /** * The operation on JournalAndStream. * @param jas Object on which operations are performed. * @throws IOException */ public void apply(JournalAndStream jas) throws IOException; } /** * Apply the given operation across all of the journal managers, disabling * any for which the closure throws an IOException. * @param closure {@link JournalClosure} object encapsulating the operation. * @param status message used for logging errors (e.g. "opening journal") * @throws IOException If the operation fails on all the journals. */ private void mapJournalsAndReportErrors( JournalClosure closure, String status) throws IOException{ List<JournalAndStream> badJAS = Lists.newLinkedList(); for (JournalAndStream jas : journals) { try { closure.apply(jas); } catch (Throwable t) { if (jas.isRequired()) { final String msg = "Error: " + status + " failed for required journal (" + jas + ")"; LOG.fatal(msg, t); // If we fail on *any* of the required journals, then we must not // continue on any of the other journals. Abort them to ensure that // retry behavior doesn't allow them to keep going in any way. abortAllJournals(); // the current policy is to shutdown the NN on errors to shared edits // dir. There are many code paths to shared edits failures - syncs, // roll of edits etc. All of them go through this common function // where the isRequired() check is made. Applying exit policy here // to catch all code paths. terminate(1, msg); } else { LOG.error("Error: " + status + " failed for (journal " + jas + ")", t); badJAS.add(jas); } } } disableAndReportErrorOnJournals(badJAS); if (!NameNodeResourcePolicy.areResourcesAvailable(journals, minimumRedundantJournals)) { String message = status + " failed for too many journals"; LOG.error("Error: " + message); throw new IOException(message); } } /** * Abort all of the underlying streams. */ private void abortAllJournals() { for (JournalAndStream jas : journals) { if (jas.isActive()) { jas.abort(); } } } /** * An implementation of EditLogOutputStream that applies a requested method on * all the journals that are currently active. */ private class JournalSetOutputStream extends EditLogOutputStream { JournalSetOutputStream() throws IOException { super(); } @Override public void write(final FSEditLogOp op) throws IOException { mapJournalsAndReportErrors(new JournalClosure() { @Override public void apply(JournalAndStream jas) throws IOException { if (jas.isActive()) { jas.getCurrentStream().write(op); } } }, "write op"); } @Override public void writeRaw(final byte[] data, final int offset, final int length) throws IOException { mapJournalsAndReportErrors(new JournalClosure() { @Override public void apply(JournalAndStream jas) throws IOException { if (jas.isActive()) { jas.getCurrentStream().writeRaw(data, offset, length); } } }, "write bytes"); } @Override public void create(final int layoutVersion) throws IOException { mapJournalsAndReportErrors(new JournalClosure() { @Override public void apply(JournalAndStream jas) throws IOException { if (jas.isActive()) { jas.getCurrentStream().create(layoutVersion); } } }, "create"); } @Override public void close() throws IOException { mapJournalsAndReportErrors(new JournalClosure() { @Override public void apply(JournalAndStream jas) throws IOException { jas.closeStream(); } }, "close"); } @Override public void abort() throws IOException { mapJournalsAndReportErrors(new JournalClosure() { @Override public void apply(JournalAndStream jas) throws IOException { jas.abort(); } }, "abort"); } @Override public void setReadyToFlush() throws IOException { mapJournalsAndReportErrors(new JournalClosure() { @Override public void apply(JournalAndStream jas) throws IOException { if (jas.isActive()) { jas.getCurrentStream().setReadyToFlush(); } } }, "setReadyToFlush"); } @Override protected void flushAndSync(final boolean durable) throws IOException { mapJournalsAndReportErrors(new JournalClosure() { @Override public void apply(JournalAndStream jas) throws IOException { if (jas.isActive()) { jas.getCurrentStream().flushAndSync(durable); } } }, "flushAndSync"); } @Override public void flush() throws IOException { mapJournalsAndReportErrors(new JournalClosure() { @Override public void apply(JournalAndStream jas) throws IOException { if (jas.isActive()) { jas.getCurrentStream().flush(); } } }, "flush"); } @Override public boolean shouldForceSync() { for (JournalAndStream js : journals) { if (js.isActive() && js.getCurrentStream().shouldForceSync()) { return true; } } return false; } @Override protected long getNumSync() { for (JournalAndStream jas : journals) { if (jas.isActive()) { return jas.getCurrentStream().getNumSync(); } } return 0; } } @Override public void setOutputBufferCapacity(final int size) { try { mapJournalsAndReportErrors(new JournalClosure() { @Override public void apply(JournalAndStream jas) throws IOException { jas.getManager().setOutputBufferCapacity(size); } }, "setOutputBufferCapacity"); } catch (IOException e) { LOG.error("Error in setting outputbuffer capacity"); } } List<JournalAndStream> getAllJournalStreams() { return journals; } List<JournalManager> getJournalManagers() { List<JournalManager> jList = new ArrayList<JournalManager>(); for (JournalAndStream j : journals) { jList.add(j.getManager()); } return jList; } void add(JournalManager j, boolean required) { add(j, required, false); } void add(JournalManager j, boolean required, boolean shared) { JournalAndStream jas = new JournalAndStream(j, required, shared); journals.add(jas); } void remove(JournalManager j) { JournalAndStream jasToRemove = null; for (JournalAndStream jas: journals) { if (jas.getManager().equals(j)) { jasToRemove = jas; break; } } if (jasToRemove != null) { jasToRemove.abort(); journals.remove(jasToRemove); } } @Override public void purgeLogsOlderThan(final long minTxIdToKeep) throws IOException { mapJournalsAndReportErrors(new JournalClosure() { @Override public void apply(JournalAndStream jas) throws IOException { jas.getManager().purgeLogsOlderThan(minTxIdToKeep); } }, "purgeLogsOlderThan " + minTxIdToKeep); } @Override public void recoverUnfinalizedSegments() throws IOException { mapJournalsAndReportErrors(new JournalClosure() { @Override public void apply(JournalAndStream jas) throws IOException { jas.getManager().recoverUnfinalizedSegments(); } }, "recoverUnfinalizedSegments"); } /** * Return a manifest of what finalized edit logs are available. All available * edit logs are returned starting from the transaction id passed. If * 'fromTxId' falls in the middle of a log, that log is returned as well. * * @param fromTxId Starting transaction id to read the logs. * @return RemoteEditLogManifest object. */ public synchronized RemoteEditLogManifest getEditLogManifest(long fromTxId) { // Collect RemoteEditLogs available from each FileJournalManager List<RemoteEditLog> allLogs = Lists.newArrayList(); for (JournalAndStream j : journals) { if (j.getManager() instanceof FileJournalManager) { FileJournalManager fjm = (FileJournalManager)j.getManager(); try { allLogs.addAll(fjm.getRemoteEditLogs(fromTxId, false)); } catch (Throwable t) { LOG.warn("Cannot list edit logs in " + fjm, t); } } } // Group logs by their starting txid ImmutableListMultimap<Long, RemoteEditLog> logsByStartTxId = Multimaps.index(allLogs, RemoteEditLog.GET_START_TXID); long curStartTxId = fromTxId; List<RemoteEditLog> logs = Lists.newArrayList(); while (true) { ImmutableList<RemoteEditLog> logGroup = logsByStartTxId.get(curStartTxId); if (logGroup.isEmpty()) { // we have a gap in logs - for example because we recovered some old // storage directory with ancient logs. Clear out any logs we've // accumulated so far, and then skip to the next segment of logs // after the gap. SortedSet<Long> startTxIds = Sets.newTreeSet(logsByStartTxId.keySet()); startTxIds = startTxIds.tailSet(curStartTxId); if (startTxIds.isEmpty()) { break; } else { if (LOG.isDebugEnabled()) { LOG.debug("Found gap in logs at " + curStartTxId + ": " + "not returning previous logs in manifest."); } logs.clear(); curStartTxId = startTxIds.first(); continue; } } // Find the one that extends the farthest forward RemoteEditLog bestLog = Collections.max(logGroup); logs.add(bestLog); // And then start looking from after that point curStartTxId = bestLog.getEndTxId() + 1; } RemoteEditLogManifest ret = new RemoteEditLogManifest(logs); if (LOG.isDebugEnabled()) { LOG.debug("Generated manifest for logs since " + fromTxId + ":" + ret); } return ret; } /** * Add sync times to the buffer. */ String getSyncTimes() { StringBuilder buf = new StringBuilder(); for (JournalAndStream jas : journals) { if (jas.isActive()) { buf.append(jas.getCurrentStream().getTotalSyncTime()); buf.append(" "); } } return buf.toString(); } @Override public void discardSegments(long startTxId) throws IOException { // This operation is handled by FSEditLog directly. throw new UnsupportedOperationException(); } @Override public void doPreUpgrade() throws IOException { // This operation is handled by FSEditLog directly. throw new UnsupportedOperationException(); } @Override public void doUpgrade(Storage storage) throws IOException { // This operation is handled by FSEditLog directly. throw new UnsupportedOperationException(); } @Override public void doFinalize() throws IOException { // This operation is handled by FSEditLog directly. throw new UnsupportedOperationException(); } @Override public boolean canRollBack(StorageInfo storage, StorageInfo prevStorage, int targetLayoutVersion) throws IOException { // This operation is handled by FSEditLog directly. throw new UnsupportedOperationException(); } @Override public void doRollback() throws IOException { // This operation is handled by FSEditLog directly. throw new UnsupportedOperationException(); } @Override public long getJournalCTime() throws IOException { // This operation is handled by FSEditLog directly. throw new UnsupportedOperationException(); } }
24,212
31.284
120
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/QuotaCounts.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.namenode; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.util.EnumCounters; /** * Counters for namespace, storage space and storage type space quota and usage. */ public class QuotaCounts { // Name space and storage space counts (HDFS-7775 refactors the original disk // space count to storage space counts) private EnumCounters<Quota> nsSsCounts; // Storage type space counts private EnumCounters<StorageType> tsCounts; public static class Builder { private EnumCounters<Quota> nsSsCounts; private EnumCounters<StorageType> tsCounts; public Builder() { this.nsSsCounts = new EnumCounters<Quota>(Quota.class); this.tsCounts = new EnumCounters<StorageType>(StorageType.class); } public Builder nameSpace(long val) { this.nsSsCounts.set(Quota.NAMESPACE, val); return this; } public Builder storageSpace(long val) { this.nsSsCounts.set(Quota.STORAGESPACE, val); return this; } public Builder typeSpaces(EnumCounters<StorageType> val) { if (val != null) { this.tsCounts.set(val); } return this; } public Builder typeSpaces(long val) { this.tsCounts.reset(val); return this; } public Builder quotaCount(QuotaCounts that) { this.nsSsCounts.set(that.nsSsCounts); this.tsCounts.set(that.tsCounts); return this; } public QuotaCounts build() { return new QuotaCounts(this); } } private QuotaCounts(Builder builder) { this.nsSsCounts = builder.nsSsCounts; this.tsCounts = builder.tsCounts; } public QuotaCounts add(QuotaCounts that) { this.nsSsCounts.add(that.nsSsCounts); this.tsCounts.add(that.tsCounts); return this; } public QuotaCounts subtract(QuotaCounts that) { this.nsSsCounts.subtract(that.nsSsCounts); this.tsCounts.subtract(that.tsCounts); return this; } /** * Returns a QuotaCounts whose value is {@code (-this)}. * * @return {@code -this} */ public QuotaCounts negation() { QuotaCounts ret = new QuotaCounts.Builder().quotaCount(this).build(); ret.nsSsCounts.negation(); ret.tsCounts.negation(); return ret; } public long getNameSpace(){ return nsSsCounts.get(Quota.NAMESPACE); } public void setNameSpace(long nameSpaceCount) { this.nsSsCounts.set(Quota.NAMESPACE, nameSpaceCount); } public void addNameSpace(long nsDelta) { this.nsSsCounts.add(Quota.NAMESPACE, nsDelta); } public long getStorageSpace(){ return nsSsCounts.get(Quota.STORAGESPACE); } public void setStorageSpace(long spaceCount) { this.nsSsCounts.set(Quota.STORAGESPACE, spaceCount); } public void addStorageSpace(long dsDelta) { this.nsSsCounts.add(Quota.STORAGESPACE, dsDelta); } public EnumCounters<StorageType> getTypeSpaces() { EnumCounters<StorageType> ret = new EnumCounters<StorageType>(StorageType.class); ret.set(tsCounts); return ret; } void setTypeSpaces(EnumCounters<StorageType> that) { if (that != null) { this.tsCounts.set(that); } } long getTypeSpace(StorageType type) { return this.tsCounts.get(type); } void setTypeSpace(StorageType type, long spaceCount) { this.tsCounts.set(type, spaceCount); } public void addTypeSpace(StorageType type, long delta) { this.tsCounts.add(type, delta); } public boolean anyNsSsCountGreaterOrEqual(long val) { return nsSsCounts.anyGreaterOrEqual(val); } public boolean anyTypeSpaceCountGreaterOrEqual(long val) { return tsCounts.anyGreaterOrEqual(val); } @Override public boolean equals(Object obj) { if (obj == this) { return true; } else if (obj == null || !(obj instanceof QuotaCounts)) { return false; } final QuotaCounts that = (QuotaCounts)obj; return this.nsSsCounts.equals(that.nsSsCounts) && this.tsCounts.equals(that.tsCounts); } @Override public int hashCode() { assert false : "hashCode not designed"; return 42; // any arbitrary constant will do } }
4,927
26.530726
80
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclFeature.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.namenode; import java.util.Arrays; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.hdfs.util.ReferenceCountMap.ReferenceCounter; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; /** * Feature that represents the ACLs of the inode. */ @InterfaceAudience.Private public class AclFeature implements INode.Feature, ReferenceCounter { public static final ImmutableList<AclEntry> EMPTY_ENTRY_LIST = ImmutableList.of(); private int refCount = 0; private final int [] entries; public AclFeature(int[] entries) { this.entries = entries; } /** * Get the number of entries present */ int getEntriesSize() { return entries.length; } /** * Get the entry at the specified position * @param pos Position of the entry to be obtained * @return integer representation of AclEntry * @throws IndexOutOfBoundsException if pos out of bound */ int getEntryAt(int pos) { Preconditions.checkPositionIndex(pos, entries.length, "Invalid position for AclEntry"); return entries[pos]; } @Override public boolean equals(Object o) { if (o == null) { return false; } if (getClass() != o.getClass()) { return false; } return Arrays.equals(entries, ((AclFeature) o).entries); } @Override public int hashCode() { return Arrays.hashCode(entries); } @Override public int getRefCount() { return refCount; } @Override public int incrementAndGetRefCount() { return ++refCount; } @Override public int decrementAndGetRefCount() { return (refCount > 0) ? --refCount : 0; } }
2,571
26.073684
75
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.namenode; import java.io.BufferedInputStream; import java.io.BufferedOutputStream; import java.io.File; import java.io.FileInputStream; import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.io.RandomAccessFile; import java.nio.ByteBuffer; import java.nio.channels.FileChannel; import java.security.DigestOutputStream; import java.security.MessageDigest; import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; import java.util.Map; import java.util.Map.Entry; import java.util.Set; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockIdManager; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection; import org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary; import org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection; import org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection; import org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection; import org.apache.hadoop.hdfs.server.namenode.snapshot.FSImageFormatPBSnapshot; import org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase; import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.Counter; import org.apache.hadoop.hdfs.server.namenode.startupprogress.Step; import org.apache.hadoop.hdfs.server.namenode.startupprogress.StepType; import org.apache.hadoop.hdfs.util.MD5FileUtils; import org.apache.hadoop.io.MD5Hash; import org.apache.hadoop.io.compress.CompressionCodec; import org.apache.hadoop.io.compress.CompressorStream; import org.apache.hadoop.util.LimitInputStream; import org.apache.hadoop.util.Time; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.google.protobuf.CodedOutputStream; /** * Utility class to read / write fsimage in protobuf format. */ @InterfaceAudience.Private public final class FSImageFormatProtobuf { private static final Log LOG = LogFactory.getLog(FSImageFormatProtobuf.class); public static final class LoaderContext { private String[] stringTable; private final ArrayList<INodeReference> refList = Lists.newArrayList(); public String[] getStringTable() { return stringTable; } public ArrayList<INodeReference> getRefList() { return refList; } } public static final class SaverContext { public static class DeduplicationMap<E> { private final Map<E, Integer> map = Maps.newHashMap(); private DeduplicationMap() {} static <T> DeduplicationMap<T> newMap() { return new DeduplicationMap<T>(); } int getId(E value) { if (value == null) { return 0; } Integer v = map.get(value); if (v == null) { int nv = map.size() + 1; map.put(value, nv); return nv; } return v; } int size() { return map.size(); } Set<Entry<E, Integer>> entrySet() { return map.entrySet(); } } private final ArrayList<INodeReference> refList = Lists.newArrayList(); private final DeduplicationMap<String> stringMap = DeduplicationMap .newMap(); public DeduplicationMap<String> getStringMap() { return stringMap; } public ArrayList<INodeReference> getRefList() { return refList; } } public static final class Loader implements FSImageFormat.AbstractLoader { static final int MINIMUM_FILE_LENGTH = 8; private final Configuration conf; private final FSNamesystem fsn; private final LoaderContext ctx; /** The MD5 sum of the loaded file */ private MD5Hash imgDigest; /** The transaction ID of the last edit represented by the loaded file */ private long imgTxId; /** * Whether the image's layout version must be the same with * {@link HdfsServerConstants#NAMENODE_LAYOUT_VERSION}. This is only set to true * when we're doing (rollingUpgrade rollback). */ private final boolean requireSameLayoutVersion; Loader(Configuration conf, FSNamesystem fsn, boolean requireSameLayoutVersion) { this.conf = conf; this.fsn = fsn; this.ctx = new LoaderContext(); this.requireSameLayoutVersion = requireSameLayoutVersion; } @Override public MD5Hash getLoadedImageMd5() { return imgDigest; } @Override public long getLoadedImageTxId() { return imgTxId; } public LoaderContext getLoaderContext() { return ctx; } void load(File file) throws IOException { long start = Time.monotonicNow(); imgDigest = MD5FileUtils.computeMd5ForFile(file); RandomAccessFile raFile = new RandomAccessFile(file, "r"); FileInputStream fin = new FileInputStream(file); try { loadInternal(raFile, fin); long end = Time.monotonicNow(); LOG.info("Loaded FSImage in " + (end - start) / 1000 + " seconds."); } finally { fin.close(); raFile.close(); } } private void loadInternal(RandomAccessFile raFile, FileInputStream fin) throws IOException { if (!FSImageUtil.checkFileFormat(raFile)) { throw new IOException("Unrecognized file format"); } FileSummary summary = FSImageUtil.loadSummary(raFile); if (requireSameLayoutVersion && summary.getLayoutVersion() != HdfsServerConstants.NAMENODE_LAYOUT_VERSION) { throw new IOException("Image version " + summary.getLayoutVersion() + " is not equal to the software version " + HdfsServerConstants.NAMENODE_LAYOUT_VERSION); } FileChannel channel = fin.getChannel(); FSImageFormatPBINode.Loader inodeLoader = new FSImageFormatPBINode.Loader( fsn, this); FSImageFormatPBSnapshot.Loader snapshotLoader = new FSImageFormatPBSnapshot.Loader( fsn, this); ArrayList<FileSummary.Section> sections = Lists.newArrayList(summary .getSectionsList()); Collections.sort(sections, new Comparator<FileSummary.Section>() { @Override public int compare(FileSummary.Section s1, FileSummary.Section s2) { SectionName n1 = SectionName.fromString(s1.getName()); SectionName n2 = SectionName.fromString(s2.getName()); if (n1 == null) { return n2 == null ? 0 : -1; } else if (n2 == null) { return -1; } else { return n1.ordinal() - n2.ordinal(); } } }); StartupProgress prog = NameNode.getStartupProgress(); /** * beginStep() and the endStep() calls do not match the boundary of the * sections. This is because that the current implementation only allows * a particular step to be started for once. */ Step currentStep = null; for (FileSummary.Section s : sections) { channel.position(s.getOffset()); InputStream in = new BufferedInputStream(new LimitInputStream(fin, s.getLength())); in = FSImageUtil.wrapInputStreamForCompression(conf, summary.getCodec(), in); String n = s.getName(); switch (SectionName.fromString(n)) { case NS_INFO: loadNameSystemSection(in); break; case STRING_TABLE: loadStringTableSection(in); break; case INODE: { currentStep = new Step(StepType.INODES); prog.beginStep(Phase.LOADING_FSIMAGE, currentStep); inodeLoader.loadINodeSection(in, prog, currentStep); } break; case INODE_REFERENCE: snapshotLoader.loadINodeReferenceSection(in); break; case INODE_DIR: inodeLoader.loadINodeDirectorySection(in); break; case FILES_UNDERCONSTRUCTION: inodeLoader.loadFilesUnderConstructionSection(in); break; case SNAPSHOT: snapshotLoader.loadSnapshotSection(in); break; case SNAPSHOT_DIFF: snapshotLoader.loadSnapshotDiffSection(in); break; case SECRET_MANAGER: { prog.endStep(Phase.LOADING_FSIMAGE, currentStep); Step step = new Step(StepType.DELEGATION_TOKENS); prog.beginStep(Phase.LOADING_FSIMAGE, step); loadSecretManagerSection(in, prog, step); prog.endStep(Phase.LOADING_FSIMAGE, step); } break; case CACHE_MANAGER: { Step step = new Step(StepType.CACHE_POOLS); prog.beginStep(Phase.LOADING_FSIMAGE, step); loadCacheManagerSection(in, prog, step); prog.endStep(Phase.LOADING_FSIMAGE, step); } break; default: LOG.warn("Unrecognized section " + n); break; } } } private void loadNameSystemSection(InputStream in) throws IOException { NameSystemSection s = NameSystemSection.parseDelimitedFrom(in); BlockIdManager blockIdManager = fsn.getBlockIdManager(); blockIdManager.setGenerationStampV1(s.getGenstampV1()); blockIdManager.setGenerationStampV2(s.getGenstampV2()); blockIdManager.setGenerationStampV1Limit(s.getGenstampV1Limit()); blockIdManager.setLastAllocatedBlockId(s.getLastAllocatedBlockId()); imgTxId = s.getTransactionId(); if (s.hasRollingUpgradeStartTime() && fsn.getFSImage().hasRollbackFSImage()) { // we set the rollingUpgradeInfo only when we make sure we have the // rollback image fsn.setRollingUpgradeInfo(true, s.getRollingUpgradeStartTime()); } } private void loadStringTableSection(InputStream in) throws IOException { StringTableSection s = StringTableSection.parseDelimitedFrom(in); ctx.stringTable = new String[s.getNumEntry() + 1]; for (int i = 0; i < s.getNumEntry(); ++i) { StringTableSection.Entry e = StringTableSection.Entry .parseDelimitedFrom(in); ctx.stringTable[e.getId()] = e.getStr(); } } private void loadSecretManagerSection(InputStream in, StartupProgress prog, Step currentStep) throws IOException { SecretManagerSection s = SecretManagerSection.parseDelimitedFrom(in); int numKeys = s.getNumKeys(), numTokens = s.getNumTokens(); ArrayList<SecretManagerSection.DelegationKey> keys = Lists .newArrayListWithCapacity(numKeys); ArrayList<SecretManagerSection.PersistToken> tokens = Lists .newArrayListWithCapacity(numTokens); for (int i = 0; i < numKeys; ++i) keys.add(SecretManagerSection.DelegationKey.parseDelimitedFrom(in)); prog.setTotal(Phase.LOADING_FSIMAGE, currentStep, numTokens); Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, currentStep); for (int i = 0; i < numTokens; ++i) { tokens.add(SecretManagerSection.PersistToken.parseDelimitedFrom(in)); counter.increment(); } fsn.loadSecretManagerState(s, keys, tokens); } private void loadCacheManagerSection(InputStream in, StartupProgress prog, Step currentStep) throws IOException { CacheManagerSection s = CacheManagerSection.parseDelimitedFrom(in); int numPools = s.getNumPools(); ArrayList<CachePoolInfoProto> pools = Lists .newArrayListWithCapacity(numPools); ArrayList<CacheDirectiveInfoProto> directives = Lists .newArrayListWithCapacity(s.getNumDirectives()); prog.setTotal(Phase.LOADING_FSIMAGE, currentStep, numPools); Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, currentStep); for (int i = 0; i < numPools; ++i) { pools.add(CachePoolInfoProto.parseDelimitedFrom(in)); counter.increment(); } for (int i = 0; i < s.getNumDirectives(); ++i) directives.add(CacheDirectiveInfoProto.parseDelimitedFrom(in)); fsn.getCacheManager().loadState( new CacheManager.PersistState(s, pools, directives)); } } public static final class Saver { public static final int CHECK_CANCEL_INTERVAL = 4096; private final SaveNamespaceContext context; private final SaverContext saverContext; private long currentOffset = FSImageUtil.MAGIC_HEADER.length; private MD5Hash savedDigest; private FileChannel fileChannel; // OutputStream for the section data private OutputStream sectionOutputStream; private CompressionCodec codec; private OutputStream underlyingOutputStream; Saver(SaveNamespaceContext context) { this.context = context; this.saverContext = new SaverContext(); } public MD5Hash getSavedDigest() { return savedDigest; } public SaveNamespaceContext getContext() { return context; } public SaverContext getSaverContext() { return saverContext; } public void commitSection(FileSummary.Builder summary, SectionName name) throws IOException { long oldOffset = currentOffset; flushSectionOutputStream(); if (codec != null) { sectionOutputStream = codec.createOutputStream(underlyingOutputStream); } else { sectionOutputStream = underlyingOutputStream; } long length = fileChannel.position() - oldOffset; summary.addSections(FileSummary.Section.newBuilder().setName(name.name) .setLength(length).setOffset(currentOffset)); currentOffset += length; } private void flushSectionOutputStream() throws IOException { if (codec != null) { ((CompressorStream) sectionOutputStream).finish(); } sectionOutputStream.flush(); } void save(File file, FSImageCompression compression) throws IOException { FileOutputStream fout = new FileOutputStream(file); fileChannel = fout.getChannel(); try { saveInternal(fout, compression, file.getAbsolutePath()); } finally { fout.close(); } } private static void saveFileSummary(OutputStream out, FileSummary summary) throws IOException { summary.writeDelimitedTo(out); int length = getOndiskTrunkSize(summary); byte[] lengthBytes = new byte[4]; ByteBuffer.wrap(lengthBytes).asIntBuffer().put(length); out.write(lengthBytes); } private void saveInodes(FileSummary.Builder summary) throws IOException { FSImageFormatPBINode.Saver saver = new FSImageFormatPBINode.Saver(this, summary); saver.serializeINodeSection(sectionOutputStream); saver.serializeINodeDirectorySection(sectionOutputStream); saver.serializeFilesUCSection(sectionOutputStream); } private void saveSnapshots(FileSummary.Builder summary) throws IOException { FSImageFormatPBSnapshot.Saver snapshotSaver = new FSImageFormatPBSnapshot.Saver( this, summary, context, context.getSourceNamesystem()); snapshotSaver.serializeSnapshotSection(sectionOutputStream); snapshotSaver.serializeSnapshotDiffSection(sectionOutputStream); snapshotSaver.serializeINodeReferenceSection(sectionOutputStream); } private void saveInternal(FileOutputStream fout, FSImageCompression compression, String filePath) throws IOException { StartupProgress prog = NameNode.getStartupProgress(); MessageDigest digester = MD5Hash.getDigester(); underlyingOutputStream = new DigestOutputStream(new BufferedOutputStream( fout), digester); underlyingOutputStream.write(FSImageUtil.MAGIC_HEADER); fileChannel = fout.getChannel(); FileSummary.Builder b = FileSummary.newBuilder() .setOndiskVersion(FSImageUtil.FILE_VERSION) .setLayoutVersion( context.getSourceNamesystem().getEffectiveLayoutVersion()); codec = compression.getImageCodec(); if (codec != null) { b.setCodec(codec.getClass().getCanonicalName()); sectionOutputStream = codec.createOutputStream(underlyingOutputStream); } else { sectionOutputStream = underlyingOutputStream; } saveNameSystemSection(b); // Check for cancellation right after serializing the name system section. // Some unit tests, such as TestSaveNamespace#testCancelSaveNameSpace // depends on this behavior. context.checkCancelled(); Step step = new Step(StepType.INODES, filePath); prog.beginStep(Phase.SAVING_CHECKPOINT, step); saveInodes(b); saveSnapshots(b); prog.endStep(Phase.SAVING_CHECKPOINT, step); step = new Step(StepType.DELEGATION_TOKENS, filePath); prog.beginStep(Phase.SAVING_CHECKPOINT, step); saveSecretManagerSection(b); prog.endStep(Phase.SAVING_CHECKPOINT, step); step = new Step(StepType.CACHE_POOLS, filePath); prog.beginStep(Phase.SAVING_CHECKPOINT, step); saveCacheManagerSection(b); prog.endStep(Phase.SAVING_CHECKPOINT, step); saveStringTableSection(b); // We use the underlyingOutputStream to write the header. Therefore flush // the buffered stream (which is potentially compressed) first. flushSectionOutputStream(); FileSummary summary = b.build(); saveFileSummary(underlyingOutputStream, summary); underlyingOutputStream.close(); savedDigest = new MD5Hash(digester.digest()); } private void saveSecretManagerSection(FileSummary.Builder summary) throws IOException { final FSNamesystem fsn = context.getSourceNamesystem(); DelegationTokenSecretManager.SecretManagerState state = fsn .saveSecretManagerState(); state.section.writeDelimitedTo(sectionOutputStream); for (SecretManagerSection.DelegationKey k : state.keys) k.writeDelimitedTo(sectionOutputStream); for (SecretManagerSection.PersistToken t : state.tokens) t.writeDelimitedTo(sectionOutputStream); commitSection(summary, SectionName.SECRET_MANAGER); } private void saveCacheManagerSection(FileSummary.Builder summary) throws IOException { final FSNamesystem fsn = context.getSourceNamesystem(); CacheManager.PersistState state = fsn.getCacheManager().saveState(); state.section.writeDelimitedTo(sectionOutputStream); for (CachePoolInfoProto p : state.pools) p.writeDelimitedTo(sectionOutputStream); for (CacheDirectiveInfoProto p : state.directives) p.writeDelimitedTo(sectionOutputStream); commitSection(summary, SectionName.CACHE_MANAGER); } private void saveNameSystemSection(FileSummary.Builder summary) throws IOException { final FSNamesystem fsn = context.getSourceNamesystem(); OutputStream out = sectionOutputStream; BlockIdManager blockIdManager = fsn.getBlockIdManager(); NameSystemSection.Builder b = NameSystemSection.newBuilder() .setGenstampV1(blockIdManager.getGenerationStampV1()) .setGenstampV1Limit(blockIdManager.getGenerationStampV1Limit()) .setGenstampV2(blockIdManager.getGenerationStampV2()) .setLastAllocatedBlockId(blockIdManager.getLastAllocatedBlockId()) .setTransactionId(context.getTxId()); // We use the non-locked version of getNamespaceInfo here since // the coordinating thread of saveNamespace already has read-locked // the namespace for us. If we attempt to take another readlock // from the actual saver thread, there's a potential of a // fairness-related deadlock. See the comments on HDFS-2223. b.setNamespaceId(fsn.unprotectedGetNamespaceInfo().getNamespaceID()); if (fsn.isRollingUpgrade()) { b.setRollingUpgradeStartTime(fsn.getRollingUpgradeInfo().getStartTime()); } NameSystemSection s = b.build(); s.writeDelimitedTo(out); commitSection(summary, SectionName.NS_INFO); } private void saveStringTableSection(FileSummary.Builder summary) throws IOException { OutputStream out = sectionOutputStream; StringTableSection.Builder b = StringTableSection.newBuilder() .setNumEntry(saverContext.stringMap.size()); b.build().writeDelimitedTo(out); for (Entry<String, Integer> e : saverContext.stringMap.entrySet()) { StringTableSection.Entry.Builder eb = StringTableSection.Entry .newBuilder().setId(e.getValue()).setStr(e.getKey()); eb.build().writeDelimitedTo(out); } commitSection(summary, SectionName.STRING_TABLE); } } /** * Supported section name. The order of the enum determines the order of * loading. */ public enum SectionName { NS_INFO("NS_INFO"), STRING_TABLE("STRING_TABLE"), EXTENDED_ACL("EXTENDED_ACL"), INODE("INODE"), INODE_REFERENCE("INODE_REFERENCE"), SNAPSHOT("SNAPSHOT"), INODE_DIR("INODE_DIR"), FILES_UNDERCONSTRUCTION("FILES_UNDERCONSTRUCTION"), SNAPSHOT_DIFF("SNAPSHOT_DIFF"), SECRET_MANAGER("SECRET_MANAGER"), CACHE_MANAGER("CACHE_MANAGER"); private static final SectionName[] values = SectionName.values(); public static SectionName fromString(String name) { for (SectionName n : values) { if (n.name.equals(name)) return n; } return null; } private final String name; private SectionName(String name) { this.name = name; } } private static int getOndiskTrunkSize(com.google.protobuf.GeneratedMessage s) { return CodedOutputStream.computeRawVarint32Size(s.getSerializedSize()) + s.getSerializedSize(); } private FSImageFormatProtobuf() { } }
23,068
35.792663
98
java
hadoop
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogInputStream.java
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hdfs.server.namenode; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import java.io.Closeable; import java.io.IOException; /** * A generic abstract class to support reading edits log data from * persistent storage. * * It should stream bytes from the storage exactly as they were written * into the #{@link EditLogOutputStream}. */ @InterfaceAudience.Private @InterfaceStability.Evolving public abstract class EditLogInputStream implements Closeable { private FSEditLogOp cachedOp = null; /** * Returns the name of the currently active underlying stream. The default * implementation returns the same value as getName unless overridden by the * subclass. * * @return String name of the currently active underlying stream */ public String getCurrentStreamName() { return getName(); } /** * @return the name of the EditLogInputStream */ public abstract String getName(); /** * @return the first transaction which will be found in this stream */ public abstract long getFirstTxId(); /** * @return the last transaction which will be found in this stream */ public abstract long getLastTxId(); /** * Close the stream. * @throws IOException if an error occurred while closing */ @Override public abstract void close() throws IOException; /** * Read an operation from the stream * @return an operation from the stream or null if at end of stream * @throws IOException if there is an error reading from the stream */ public FSEditLogOp readOp() throws IOException { FSEditLogOp ret; if (cachedOp != null) { ret = cachedOp; cachedOp = null; return ret; } return nextOp(); } /** * Position the stream so that a valid operation can be read from it with * readOp(). * * This method can be used to skip over corrupted sections of edit logs. */ public void resync() { if (cachedOp != null) { return; } cachedOp = nextValidOp(); } /** * Get the next operation from the stream storage. * * @return an operation from the stream or null if at end of stream * @throws IOException if there is an error reading from the stream */ protected abstract FSEditLogOp nextOp() throws IOException; /** * Go through the next operation from the stream storage. * @return the txid of the next operation. */ protected long scanNextOp() throws IOException { FSEditLogOp next = readOp(); return next != null ? next.txid : HdfsServerConstants.INVALID_TXID; } /** * Get the next valid operation from the stream storage. * * This is exactly like nextOp, except that we attempt to skip over damaged * parts of the edit log * * @return an operation from the stream or null if at end of stream */ protected FSEditLogOp nextValidOp() { // This is a trivial implementation which just assumes that any errors mean // that there is nothing more of value in the log. Subclasses that support // error recovery will want to override this. try { return nextOp(); } catch (Throwable e) { return null; } } /** * Skip edit log operations up to a given transaction ID, or until the * end of the edit log is reached. * * After this function returns, the next call to readOp will return either * end-of-file (null) or a transaction with a txid equal to or higher than * the one we asked for. * * @param txid The transaction ID to read up until. * @return Returns true if we found a transaction ID greater than * or equal to 'txid' in the log. */ public boolean skipUntil(long txid) throws IOException { while (true) { FSEditLogOp op = readOp(); if (op == null) { return false; } if (op.getTransactionId() >= txid) { cachedOp = op; return true; } } } /** * return the cachedOp, and reset it to null. */ FSEditLogOp getCachedOp() { FSEditLogOp op = this.cachedOp; cachedOp = null; return op; } /** * Get the layout version of the data in the stream. * @return the layout version of the ops in the stream. * @throws IOException if there is an error reading the version */ public abstract int getVersion(boolean verifyVersion) throws IOException; /** * Get the "position" of in the stream. This is useful for * debugging and operational purposes. * * Different stream types can have a different meaning for * what the position is. For file streams it means the byte offset * from the start of the file. * * @return the position in the stream */ public abstract long getPosition(); /** * Return the size of the current edits log or -1 if unknown. * * @return long size of the current edits log or -1 if unknown */ public abstract long length() throws IOException; /** * Return true if this stream is in progress, false if it is finalized. */ public abstract boolean isInProgress(); /** * Set the maximum opcode size in bytes. */ public abstract void setMaxOpSize(int maxOpSize); /** * Returns true if we are currently reading the log from a local disk or an * even faster data source (e.g. a byte buffer). */ public abstract boolean isLocalLog(); }
6,345
28.793427
79
java