repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocolPB;
import java.io.Closeable;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import javax.net.SocketFactory;
import com.google.common.base.Optional;
import com.google.common.collect.Maps;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.ReconfigurationTaskStatus;
import org.apache.hadoop.conf.ReconfigurationUtil.PropertyChange;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.hdfs.client.BlockReportOptions;
import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeLocalInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsBlocksMetadata;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ListReconfigurablePropertiesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ListReconfigurablePropertiesResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.ipc.ProtobufHelper;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.ProtocolMetaInterface;
import org.apache.hadoop.ipc.ProtocolTranslator;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RpcClientUtil;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
import org.apache.hadoop.security.token.Token;
import com.google.common.primitives.Longs;
import com.google.protobuf.ByteString;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
/**
* This class is the client side translator to translate the requests made on
* {@link ClientDatanodeProtocol} interfaces to the RPC server implementing
* {@link ClientDatanodeProtocolPB}.
*/
@InterfaceAudience.Private
@InterfaceStability.Stable
public class ClientDatanodeProtocolTranslatorPB implements
ProtocolMetaInterface, ClientDatanodeProtocol,
ProtocolTranslator, Closeable {
public static final Log LOG = LogFactory
.getLog(ClientDatanodeProtocolTranslatorPB.class);
/** RpcController is not used and hence is set to null */
private final static RpcController NULL_CONTROLLER = null;
private final ClientDatanodeProtocolPB rpcProxy;
private final static RefreshNamenodesRequestProto VOID_REFRESH_NAMENODES =
RefreshNamenodesRequestProto.newBuilder().build();
private final static GetDatanodeInfoRequestProto VOID_GET_DATANODE_INFO =
GetDatanodeInfoRequestProto.newBuilder().build();
private final static GetReconfigurationStatusRequestProto VOID_GET_RECONFIG_STATUS =
GetReconfigurationStatusRequestProto.newBuilder().build();
private final static StartReconfigurationRequestProto VOID_START_RECONFIG =
StartReconfigurationRequestProto.newBuilder().build();
private static final ListReconfigurablePropertiesRequestProto
VOID_LIST_RECONFIGURABLE_PROPERTIES =
ListReconfigurablePropertiesRequestProto.newBuilder().build();
public ClientDatanodeProtocolTranslatorPB(DatanodeID datanodeid,
Configuration conf, int socketTimeout, boolean connectToDnViaHostname,
LocatedBlock locatedBlock) throws IOException {
rpcProxy = createClientDatanodeProtocolProxy( datanodeid, conf,
socketTimeout, connectToDnViaHostname, locatedBlock);
}
public ClientDatanodeProtocolTranslatorPB(InetSocketAddress addr,
UserGroupInformation ticket, Configuration conf, SocketFactory factory)
throws IOException {
rpcProxy = createClientDatanodeProtocolProxy(addr, ticket, conf, factory, 0);
}
/**
* Constructor.
* @param datanodeid Datanode to connect to.
* @param conf Configuration.
* @param socketTimeout Socket timeout to use.
* @param connectToDnViaHostname connect to the Datanode using its hostname
* @throws IOException
*/
public ClientDatanodeProtocolTranslatorPB(DatanodeID datanodeid,
Configuration conf, int socketTimeout, boolean connectToDnViaHostname)
throws IOException {
final String dnAddr = datanodeid.getIpcAddr(connectToDnViaHostname);
InetSocketAddress addr = NetUtils.createSocketAddr(dnAddr);
if (LOG.isDebugEnabled()) {
LOG.debug("Connecting to datanode " + dnAddr + " addr=" + addr);
}
rpcProxy = createClientDatanodeProtocolProxy(addr,
UserGroupInformation.getCurrentUser(), conf,
NetUtils.getDefaultSocketFactory(conf), socketTimeout);
}
static ClientDatanodeProtocolPB createClientDatanodeProtocolProxy(
DatanodeID datanodeid, Configuration conf, int socketTimeout,
boolean connectToDnViaHostname, LocatedBlock locatedBlock) throws IOException {
final String dnAddr = datanodeid.getIpcAddr(connectToDnViaHostname);
InetSocketAddress addr = NetUtils.createSocketAddr(dnAddr);
if (LOG.isDebugEnabled()) {
LOG.debug("Connecting to datanode " + dnAddr + " addr=" + addr);
}
// Since we're creating a new UserGroupInformation here, we know that no
// future RPC proxies will be able to re-use the same connection. And
// usages of this proxy tend to be one-off calls.
//
// This is a temporary fix: callers should really achieve this by using
// RPC.stopProxy() on the resulting object, but this is currently not
// working in trunk. See the discussion on HDFS-1965.
Configuration confWithNoIpcIdle = new Configuration(conf);
confWithNoIpcIdle.setInt(CommonConfigurationKeysPublic
.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, 0);
UserGroupInformation ticket = UserGroupInformation
.createRemoteUser(locatedBlock.getBlock().getLocalBlock().toString());
ticket.addToken(locatedBlock.getBlockToken());
return createClientDatanodeProtocolProxy(addr, ticket, confWithNoIpcIdle,
NetUtils.getDefaultSocketFactory(conf), socketTimeout);
}
static ClientDatanodeProtocolPB createClientDatanodeProtocolProxy(
InetSocketAddress addr, UserGroupInformation ticket, Configuration conf,
SocketFactory factory, int socketTimeout) throws IOException {
RPC.setProtocolEngine(conf, ClientDatanodeProtocolPB.class,
ProtobufRpcEngine.class);
return RPC.getProxy(ClientDatanodeProtocolPB.class,
RPC.getProtocolVersion(ClientDatanodeProtocolPB.class), addr, ticket,
conf, factory, socketTimeout);
}
@Override
public void close() {
RPC.stopProxy(rpcProxy);
}
@Override
public long getReplicaVisibleLength(ExtendedBlock b) throws IOException {
GetReplicaVisibleLengthRequestProto req = GetReplicaVisibleLengthRequestProto
.newBuilder().setBlock(PBHelper.convert(b)).build();
try {
return rpcProxy.getReplicaVisibleLength(NULL_CONTROLLER, req).getLength();
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
@Override
public void refreshNamenodes() throws IOException {
try {
rpcProxy.refreshNamenodes(NULL_CONTROLLER, VOID_REFRESH_NAMENODES);
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
@Override
public void deleteBlockPool(String bpid, boolean force) throws IOException {
DeleteBlockPoolRequestProto req = DeleteBlockPoolRequestProto.newBuilder()
.setBlockPool(bpid).setForce(force).build();
try {
rpcProxy.deleteBlockPool(NULL_CONTROLLER, req);
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
@Override
public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block,
Token<BlockTokenIdentifier> token) throws IOException {
GetBlockLocalPathInfoRequestProto req =
GetBlockLocalPathInfoRequestProto.newBuilder()
.setBlock(PBHelper.convert(block))
.setToken(PBHelper.convert(token)).build();
GetBlockLocalPathInfoResponseProto resp;
try {
resp = rpcProxy.getBlockLocalPathInfo(NULL_CONTROLLER, req);
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
return new BlockLocalPathInfo(PBHelper.convert(resp.getBlock()),
resp.getLocalPath(), resp.getLocalMetaPath());
}
@Override
public boolean isMethodSupported(String methodName) throws IOException {
return RpcClientUtil.isMethodSupported(rpcProxy,
ClientDatanodeProtocolPB.class, RPC.RpcKind.RPC_PROTOCOL_BUFFER,
RPC.getProtocolVersion(ClientDatanodeProtocolPB.class), methodName);
}
@Override
public Object getUnderlyingProxyObject() {
return rpcProxy;
}
@Override
public HdfsBlocksMetadata getHdfsBlocksMetadata(String blockPoolId,
long[] blockIds,
List<Token<BlockTokenIdentifier>> tokens) throws IOException {
List<TokenProto> tokensProtos =
new ArrayList<TokenProto>(tokens.size());
for (Token<BlockTokenIdentifier> t : tokens) {
tokensProtos.add(PBHelper.convert(t));
}
// Build the request
GetHdfsBlockLocationsRequestProto request =
GetHdfsBlockLocationsRequestProto.newBuilder()
.setBlockPoolId(blockPoolId)
.addAllBlockIds(Longs.asList(blockIds))
.addAllTokens(tokensProtos)
.build();
// Send the RPC
GetHdfsBlockLocationsResponseProto response;
try {
response = rpcProxy.getHdfsBlockLocations(NULL_CONTROLLER, request);
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
// List of volumes in the response
List<ByteString> volumeIdsByteStrings = response.getVolumeIdsList();
List<byte[]> volumeIds = new ArrayList<byte[]>(volumeIdsByteStrings.size());
for (ByteString bs : volumeIdsByteStrings) {
volumeIds.add(bs.toByteArray());
}
// Array of indexes into the list of volumes, one per block
List<Integer> volumeIndexes = response.getVolumeIndexesList();
// Parsed HdfsVolumeId values, one per block
return new HdfsBlocksMetadata(blockPoolId, blockIds,
volumeIds, volumeIndexes);
}
@Override
public void shutdownDatanode(boolean forUpgrade) throws IOException {
ShutdownDatanodeRequestProto request = ShutdownDatanodeRequestProto
.newBuilder().setForUpgrade(forUpgrade).build();
try {
rpcProxy.shutdownDatanode(NULL_CONTROLLER, request);
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
@Override
public DatanodeLocalInfo getDatanodeInfo() throws IOException {
GetDatanodeInfoResponseProto response;
try {
response = rpcProxy.getDatanodeInfo(NULL_CONTROLLER, VOID_GET_DATANODE_INFO);
return PBHelper.convert(response.getLocalInfo());
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
@Override
public void startReconfiguration() throws IOException {
try {
rpcProxy.startReconfiguration(NULL_CONTROLLER, VOID_START_RECONFIG);
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
@Override
public ReconfigurationTaskStatus getReconfigurationStatus() throws IOException {
GetReconfigurationStatusResponseProto response;
Map<PropertyChange, Optional<String>> statusMap = null;
long startTime;
long endTime = 0;
try {
response = rpcProxy.getReconfigurationStatus(NULL_CONTROLLER,
VOID_GET_RECONFIG_STATUS);
startTime = response.getStartTime();
if (response.hasEndTime()) {
endTime = response.getEndTime();
}
if (response.getChangesCount() > 0) {
statusMap = Maps.newHashMap();
for (GetReconfigurationStatusConfigChangeProto change :
response.getChangesList()) {
PropertyChange pc = new PropertyChange(
change.getName(), change.getNewValue(), change.getOldValue());
String errorMessage = null;
if (change.hasErrorMessage()) {
errorMessage = change.getErrorMessage();
}
statusMap.put(pc, Optional.fromNullable(errorMessage));
}
}
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
return new ReconfigurationTaskStatus(startTime, endTime, statusMap);
}
@Override
public List<String> listReconfigurableProperties()
throws IOException {
ListReconfigurablePropertiesResponseProto response;
try {
response = rpcProxy.listReconfigurableProperties(NULL_CONTROLLER,
VOID_LIST_RECONFIGURABLE_PROPERTIES);
return response.getNameList();
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
@Override
public void triggerBlockReport(BlockReportOptions options)
throws IOException {
try {
rpcProxy.triggerBlockReport(NULL_CONTROLLER,
TriggerBlockReportRequestProto.newBuilder().
setIncremental(options.isIncremental()).
build());
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
}
| 15,976 | 42.181081 | 116 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolPB.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocolPB;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.NamenodeProtocolService;
import org.apache.hadoop.ipc.ProtocolInfo;
import org.apache.hadoop.security.KerberosInfo;
/**
* Protocol that a secondary NameNode uses to communicate with the NameNode.
* It's used to get part of the name node state
*
* Note: This extends the protocolbuffer service based interface to
* add annotations required for security.
*/
@KerberosInfo(
serverPrincipal = DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY,
clientPrincipal = DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY)
@ProtocolInfo(protocolName =
"org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol",
protocolVersion = 1)
@InterfaceAudience.Private
public interface NamenodeProtocolPB extends
NamenodeProtocolService.BlockingInterface {
}
| 1,788 | 39.659091 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolPB.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocolPB;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
import org.apache.hadoop.ipc.ProtocolInfo;
import org.apache.hadoop.security.KerberosInfo;
import org.apache.hadoop.security.token.TokenInfo;
@InterfaceAudience.Private
@InterfaceStability.Stable
@KerberosInfo(
serverPrincipal = DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY)
@TokenInfo(DelegationTokenSelector.class)
@ProtocolInfo(protocolName = HdfsConstants.CLIENT_NAMENODE_PROTOCOL_NAME,
protocolVersion = 1)
/**
* Protocol that a clients use to communicate with the NameNode.
*
* Note: This extends the protocolbuffer service based interface to
* add annotations required for security.
*/
public interface ClientNamenodeProtocolPB extends
ClientNamenodeProtocol.BlockingInterface {
}
| 1,960 | 40.723404 | 97 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InterDatanodeProtocolServerSideTranslatorPB.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocolPB;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryResponseProto;
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
/**
* Implementation for protobuf service that forwards requests
* received on {@link InterDatanodeProtocolPB} to the
* {@link InterDatanodeProtocol} server implementation.
*/
@InterfaceAudience.Private
public class InterDatanodeProtocolServerSideTranslatorPB implements
InterDatanodeProtocolPB {
private final InterDatanodeProtocol impl;
public InterDatanodeProtocolServerSideTranslatorPB(InterDatanodeProtocol impl) {
this.impl = impl;
}
@Override
public InitReplicaRecoveryResponseProto initReplicaRecovery(
RpcController unused, InitReplicaRecoveryRequestProto request)
throws ServiceException {
RecoveringBlock b = PBHelper.convert(request.getBlock());
ReplicaRecoveryInfo r;
try {
r = impl.initReplicaRecovery(b);
} catch (IOException e) {
throw new ServiceException(e);
}
if (r == null) {
return InitReplicaRecoveryResponseProto.newBuilder()
.setReplicaFound(false)
.build();
} else {
return InitReplicaRecoveryResponseProto.newBuilder()
.setReplicaFound(true)
.setBlock(PBHelper.convert(r))
.setState(PBHelper.convert(r.getOriginalReplicaState())).build();
}
}
@Override
public UpdateReplicaUnderRecoveryResponseProto updateReplicaUnderRecovery(
RpcController unused, UpdateReplicaUnderRecoveryRequestProto request)
throws ServiceException {
final String storageID;
try {
storageID = impl.updateReplicaUnderRecovery(
PBHelper.convert(request.getBlock()), request.getRecoveryId(),
request.getNewBlockId(), request.getNewLength());
} catch (IOException e) {
throw new ServiceException(e);
}
return UpdateReplicaUnderRecoveryResponseProto.newBuilder()
.setStorageUuid(storageID).build();
}
}
| 3,513 | 38.931818 | 113 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocolPB;
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.RollingUpgradeStatus;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CacheReportResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto;
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary;
import com.google.common.base.Preconditions;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
public class DatanodeProtocolServerSideTranslatorPB implements
DatanodeProtocolPB {
private final DatanodeProtocol impl;
private static final ErrorReportResponseProto
VOID_ERROR_REPORT_RESPONSE_PROTO =
ErrorReportResponseProto.newBuilder().build();
private static final BlockReceivedAndDeletedResponseProto
VOID_BLOCK_RECEIVED_AND_DELETE_RESPONSE =
BlockReceivedAndDeletedResponseProto.newBuilder().build();
private static final ReportBadBlocksResponseProto
VOID_REPORT_BAD_BLOCK_RESPONSE =
ReportBadBlocksResponseProto.newBuilder().build();
private static final CommitBlockSynchronizationResponseProto
VOID_COMMIT_BLOCK_SYNCHRONIZATION_RESPONSE_PROTO =
CommitBlockSynchronizationResponseProto.newBuilder().build();
public DatanodeProtocolServerSideTranslatorPB(DatanodeProtocol impl) {
this.impl = impl;
}
@Override
public RegisterDatanodeResponseProto registerDatanode(
RpcController controller, RegisterDatanodeRequestProto request)
throws ServiceException {
DatanodeRegistration registration = PBHelper.convert(request
.getRegistration());
DatanodeRegistration registrationResp;
try {
registrationResp = impl.registerDatanode(registration);
} catch (IOException e) {
throw new ServiceException(e);
}
return RegisterDatanodeResponseProto.newBuilder()
.setRegistration(PBHelper.convert(registrationResp)).build();
}
@Override
public HeartbeatResponseProto sendHeartbeat(RpcController controller,
HeartbeatRequestProto request) throws ServiceException {
HeartbeatResponse response;
try {
final StorageReport[] report = PBHelper.convertStorageReports(
request.getReportsList());
VolumeFailureSummary volumeFailureSummary =
request.hasVolumeFailureSummary() ? PBHelper.convertVolumeFailureSummary(
request.getVolumeFailureSummary()) : null;
response = impl.sendHeartbeat(PBHelper.convert(request.getRegistration()),
report, request.getCacheCapacity(), request.getCacheUsed(),
request.getXmitsInProgress(),
request.getXceiverCount(), request.getFailedVolumes(),
volumeFailureSummary, request.getRequestFullBlockReportLease());
} catch (IOException e) {
throw new ServiceException(e);
}
HeartbeatResponseProto.Builder builder = HeartbeatResponseProto
.newBuilder();
DatanodeCommand[] cmds = response.getCommands();
if (cmds != null) {
for (int i = 0; i < cmds.length; i++) {
if (cmds[i] != null) {
builder.addCmds(PBHelper.convert(cmds[i]));
}
}
}
builder.setHaStatus(PBHelper.convert(response.getNameNodeHaState()));
RollingUpgradeStatus rollingUpdateStatus = response
.getRollingUpdateStatus();
if (rollingUpdateStatus != null) {
builder.setRollingUpgradeStatus(PBHelper
.convertRollingUpgradeStatus(rollingUpdateStatus));
}
builder.setFullBlockReportLeaseId(response.getFullBlockReportLeaseId());
return builder.build();
}
@Override
public BlockReportResponseProto blockReport(RpcController controller,
BlockReportRequestProto request) throws ServiceException {
DatanodeCommand cmd = null;
StorageBlockReport[] report =
new StorageBlockReport[request.getReportsCount()];
int index = 0;
for (StorageBlockReportProto s : request.getReportsList()) {
final BlockListAsLongs blocks;
if (s.hasNumberOfBlocks()) { // new style buffer based reports
int num = (int)s.getNumberOfBlocks();
Preconditions.checkState(s.getBlocksCount() == 0,
"cannot send both blocks list and buffers");
blocks = BlockListAsLongs.decodeBuffers(num, s.getBlocksBuffersList());
} else {
blocks = BlockListAsLongs.decodeLongs(s.getBlocksList());
}
report[index++] = new StorageBlockReport(PBHelper.convert(s.getStorage()),
blocks);
}
try {
cmd = impl.blockReport(PBHelper.convert(request.getRegistration()),
request.getBlockPoolId(), report,
request.hasContext() ?
PBHelper.convert(request.getContext()) : null);
} catch (IOException e) {
throw new ServiceException(e);
}
BlockReportResponseProto.Builder builder =
BlockReportResponseProto.newBuilder();
if (cmd != null) {
builder.setCmd(PBHelper.convert(cmd));
}
return builder.build();
}
@Override
public CacheReportResponseProto cacheReport(RpcController controller,
CacheReportRequestProto request) throws ServiceException {
DatanodeCommand cmd = null;
try {
cmd = impl.cacheReport(
PBHelper.convert(request.getRegistration()),
request.getBlockPoolId(),
request.getBlocksList());
} catch (IOException e) {
throw new ServiceException(e);
}
CacheReportResponseProto.Builder builder =
CacheReportResponseProto.newBuilder();
if (cmd != null) {
builder.setCmd(PBHelper.convert(cmd));
}
return builder.build();
}
@Override
public BlockReceivedAndDeletedResponseProto blockReceivedAndDeleted(
RpcController controller, BlockReceivedAndDeletedRequestProto request)
throws ServiceException {
List<StorageReceivedDeletedBlocksProto> sBlocks = request.getBlocksList();
StorageReceivedDeletedBlocks[] info =
new StorageReceivedDeletedBlocks[sBlocks.size()];
for (int i = 0; i < sBlocks.size(); i++) {
StorageReceivedDeletedBlocksProto sBlock = sBlocks.get(i);
List<ReceivedDeletedBlockInfoProto> list = sBlock.getBlocksList();
ReceivedDeletedBlockInfo[] rdBlocks =
new ReceivedDeletedBlockInfo[list.size()];
for (int j = 0; j < list.size(); j++) {
rdBlocks[j] = PBHelper.convert(list.get(j));
}
if (sBlock.hasStorage()) {
info[i] = new StorageReceivedDeletedBlocks(
PBHelper.convert(sBlock.getStorage()), rdBlocks);
} else {
info[i] = new StorageReceivedDeletedBlocks(sBlock.getStorageUuid(), rdBlocks);
}
}
try {
impl.blockReceivedAndDeleted(PBHelper.convert(request.getRegistration()),
request.getBlockPoolId(), info);
} catch (IOException e) {
throw new ServiceException(e);
}
return VOID_BLOCK_RECEIVED_AND_DELETE_RESPONSE;
}
@Override
public ErrorReportResponseProto errorReport(RpcController controller,
ErrorReportRequestProto request) throws ServiceException {
try {
impl.errorReport(PBHelper.convert(request.getRegistartion()),
request.getErrorCode(), request.getMsg());
} catch (IOException e) {
throw new ServiceException(e);
}
return VOID_ERROR_REPORT_RESPONSE_PROTO;
}
@Override
public VersionResponseProto versionRequest(RpcController controller,
VersionRequestProto request) throws ServiceException {
NamespaceInfo info;
try {
info = impl.versionRequest();
} catch (IOException e) {
throw new ServiceException(e);
}
return VersionResponseProto.newBuilder()
.setInfo(PBHelper.convert(info)).build();
}
@Override
public ReportBadBlocksResponseProto reportBadBlocks(RpcController controller,
ReportBadBlocksRequestProto request) throws ServiceException {
List<LocatedBlockProto> lbps = request.getBlocksList();
LocatedBlock [] blocks = new LocatedBlock [lbps.size()];
for(int i=0; i<lbps.size(); i++) {
blocks[i] = PBHelper.convert(lbps.get(i));
}
try {
impl.reportBadBlocks(blocks);
} catch (IOException e) {
throw new ServiceException(e);
}
return VOID_REPORT_BAD_BLOCK_RESPONSE;
}
@Override
public CommitBlockSynchronizationResponseProto commitBlockSynchronization(
RpcController controller, CommitBlockSynchronizationRequestProto request)
throws ServiceException {
List<DatanodeIDProto> dnprotos = request.getNewTaragetsList();
DatanodeID[] dns = new DatanodeID[dnprotos.size()];
for (int i = 0; i < dnprotos.size(); i++) {
dns[i] = PBHelper.convert(dnprotos.get(i));
}
final List<String> sidprotos = request.getNewTargetStoragesList();
final String[] storageIDs = sidprotos.toArray(new String[sidprotos.size()]);
try {
impl.commitBlockSynchronization(PBHelper.convert(request.getBlock()),
request.getNewGenStamp(), request.getNewLength(),
request.getCloseFile(), request.getDeleteBlock(), dns, storageIDs);
} catch (IOException e) {
throw new ServiceException(e);
}
return VOID_COMMIT_BLOCK_SYNCHRONIZATION_RESPONSE_PROTO;
}
}
| 12,803 | 42.699659 | 108 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolServerSideTranslatorPB.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocolPB;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import com.google.common.base.Optional;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.ReconfigurationTaskStatus;
import org.apache.hadoop.conf.ReconfigurationUtil.PropertyChange;
import org.apache.hadoop.hdfs.client.BlockReportOptions;
import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
import org.apache.hadoop.hdfs.protocol.HdfsBlocksMetadata;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusConfigChangeProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReconfigurationStatusResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetHdfsBlockLocationsResponseProto.Builder;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetReplicaVisibleLengthResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ListReconfigurablePropertiesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ListReconfigurablePropertiesResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.RefreshNamenodesResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.StartReconfigurationResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportResponseProto;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
import org.apache.hadoop.security.token.Token;
import com.google.common.primitives.Longs;
import com.google.protobuf.ByteString;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
/**
* Implementation for protobuf service that forwards requests
* received on {@link ClientDatanodeProtocolPB} to the
* {@link ClientDatanodeProtocol} server implementation.
*/
@InterfaceAudience.Private
public class ClientDatanodeProtocolServerSideTranslatorPB implements
ClientDatanodeProtocolPB {
private final static RefreshNamenodesResponseProto REFRESH_NAMENODE_RESP =
RefreshNamenodesResponseProto.newBuilder().build();
private final static DeleteBlockPoolResponseProto DELETE_BLOCKPOOL_RESP =
DeleteBlockPoolResponseProto.newBuilder().build();
private final static ShutdownDatanodeResponseProto SHUTDOWN_DATANODE_RESP =
ShutdownDatanodeResponseProto.newBuilder().build();
private final static StartReconfigurationResponseProto START_RECONFIG_RESP =
StartReconfigurationResponseProto.newBuilder().build();
private final static TriggerBlockReportResponseProto TRIGGER_BLOCK_REPORT_RESP =
TriggerBlockReportResponseProto.newBuilder().build();
private final ClientDatanodeProtocol impl;
public ClientDatanodeProtocolServerSideTranslatorPB(
ClientDatanodeProtocol impl) {
this.impl = impl;
}
@Override
public GetReplicaVisibleLengthResponseProto getReplicaVisibleLength(
RpcController unused, GetReplicaVisibleLengthRequestProto request)
throws ServiceException {
long len;
try {
len = impl.getReplicaVisibleLength(PBHelper.convert(request.getBlock()));
} catch (IOException e) {
throw new ServiceException(e);
}
return GetReplicaVisibleLengthResponseProto.newBuilder().setLength(len)
.build();
}
@Override
public RefreshNamenodesResponseProto refreshNamenodes(
RpcController unused, RefreshNamenodesRequestProto request)
throws ServiceException {
try {
impl.refreshNamenodes();
} catch (IOException e) {
throw new ServiceException(e);
}
return REFRESH_NAMENODE_RESP;
}
@Override
public DeleteBlockPoolResponseProto deleteBlockPool(RpcController unused,
DeleteBlockPoolRequestProto request) throws ServiceException {
try {
impl.deleteBlockPool(request.getBlockPool(), request.getForce());
} catch (IOException e) {
throw new ServiceException(e);
}
return DELETE_BLOCKPOOL_RESP;
}
@Override
public GetBlockLocalPathInfoResponseProto getBlockLocalPathInfo(
RpcController unused, GetBlockLocalPathInfoRequestProto request)
throws ServiceException {
BlockLocalPathInfo resp;
try {
resp = impl.getBlockLocalPathInfo(PBHelper.convert(request.getBlock()), PBHelper.convert(request.getToken()));
} catch (IOException e) {
throw new ServiceException(e);
}
return GetBlockLocalPathInfoResponseProto.newBuilder()
.setBlock(PBHelper.convert(resp.getBlock()))
.setLocalPath(resp.getBlockPath()).setLocalMetaPath(resp.getMetaPath())
.build();
}
@Override
public GetHdfsBlockLocationsResponseProto getHdfsBlockLocations(
RpcController controller, GetHdfsBlockLocationsRequestProto request)
throws ServiceException {
HdfsBlocksMetadata resp;
try {
String poolId = request.getBlockPoolId();
List<Token<BlockTokenIdentifier>> tokens =
new ArrayList<Token<BlockTokenIdentifier>>(request.getTokensCount());
for (TokenProto b : request.getTokensList()) {
tokens.add(PBHelper.convert(b));
}
long[] blockIds = Longs.toArray(request.getBlockIdsList());
// Call the real implementation
resp = impl.getHdfsBlocksMetadata(poolId, blockIds, tokens);
} catch (IOException e) {
throw new ServiceException(e);
}
List<ByteString> volumeIdsByteStrings =
new ArrayList<ByteString>(resp.getVolumeIds().size());
for (byte[] b : resp.getVolumeIds()) {
volumeIdsByteStrings.add(ByteString.copyFrom(b));
}
// Build and return the response
Builder builder = GetHdfsBlockLocationsResponseProto.newBuilder();
builder.addAllVolumeIds(volumeIdsByteStrings);
builder.addAllVolumeIndexes(resp.getVolumeIndexes());
return builder.build();
}
@Override
public ShutdownDatanodeResponseProto shutdownDatanode(
RpcController unused, ShutdownDatanodeRequestProto request)
throws ServiceException {
try {
impl.shutdownDatanode(request.getForUpgrade());
} catch (IOException e) {
throw new ServiceException(e);
}
return SHUTDOWN_DATANODE_RESP;
}
public GetDatanodeInfoResponseProto getDatanodeInfo(RpcController unused,
GetDatanodeInfoRequestProto request) throws ServiceException {
GetDatanodeInfoResponseProto res;
try {
res = GetDatanodeInfoResponseProto.newBuilder()
.setLocalInfo(PBHelper.convert(impl.getDatanodeInfo())).build();
} catch (IOException e) {
throw new ServiceException(e);
}
return res;
}
@Override
public StartReconfigurationResponseProto startReconfiguration(
RpcController unused, StartReconfigurationRequestProto request)
throws ServiceException {
try {
impl.startReconfiguration();
} catch (IOException e) {
throw new ServiceException(e);
}
return START_RECONFIG_RESP;
}
@Override
public ListReconfigurablePropertiesResponseProto listReconfigurableProperties(
RpcController controller,
ListReconfigurablePropertiesRequestProto request)
throws ServiceException {
ListReconfigurablePropertiesResponseProto.Builder builder =
ListReconfigurablePropertiesResponseProto.newBuilder();
try {
for (String name : impl.listReconfigurableProperties()) {
builder.addName(name);
}
} catch (IOException e) {
throw new ServiceException(e);
}
return builder.build();
}
@Override
public GetReconfigurationStatusResponseProto getReconfigurationStatus(
RpcController unused, GetReconfigurationStatusRequestProto request)
throws ServiceException {
GetReconfigurationStatusResponseProto.Builder builder =
GetReconfigurationStatusResponseProto.newBuilder();
try {
ReconfigurationTaskStatus status = impl.getReconfigurationStatus();
builder.setStartTime(status.getStartTime());
if (status.stopped()) {
builder.setEndTime(status.getEndTime());
assert status.getStatus() != null;
for (Map.Entry<PropertyChange, Optional<String>> result :
status.getStatus().entrySet()) {
GetReconfigurationStatusConfigChangeProto.Builder changeBuilder =
GetReconfigurationStatusConfigChangeProto.newBuilder();
PropertyChange change = result.getKey();
changeBuilder.setName(change.prop);
changeBuilder.setOldValue(change.oldVal != null ? change.oldVal : "");
if (change.newVal != null) {
changeBuilder.setNewValue(change.newVal);
}
if (result.getValue().isPresent()) {
// Get full stack trace.
changeBuilder.setErrorMessage(result.getValue().get());
}
builder.addChanges(changeBuilder);
}
}
} catch (IOException e) {
throw new ServiceException(e);
}
return builder.build();
}
@Override
public TriggerBlockReportResponseProto triggerBlockReport(
RpcController unused, TriggerBlockReportRequestProto request)
throws ServiceException {
try {
impl.triggerBlockReport(new BlockReportOptions.Factory().
setIncremental(request.getIncremental()).build());
} catch (IOException e) {
throw new ServiceException(e);
}
return TRIGGER_BLOCK_REPORT_RESP;
}
}
| 12,059 | 42.381295 | 117 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/JournalProtocolPB.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocolPB;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalProtocolService;
import org.apache.hadoop.ipc.ProtocolInfo;
import org.apache.hadoop.security.KerberosInfo;
/**
* Protocol used to journal edits to a remote node. Currently,
* this is used to publish edits from the NameNode to a BackupNode.
*
* Note: This extends the protocolbuffer service based interface to
* add annotations required for security.
*/
@KerberosInfo(
serverPrincipal = DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY,
clientPrincipal = DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY)
@ProtocolInfo(protocolName =
"org.apache.hadoop.hdfs.server.protocol.JournalProtocol",
protocolVersion = 1)
@InterfaceAudience.Private
public interface JournalProtocolPB extends
JournalProtocolService.BlockingInterface {
}
| 1,788 | 40.604651 | 90 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/JournalProtocolServerSideTranslatorPB.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocolPB;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.FenceRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.FenceResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentResponseProto;
import org.apache.hadoop.hdfs.server.protocol.FenceResponse;
import org.apache.hadoop.hdfs.server.protocol.JournalProtocol;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
/**
* Implementation for protobuf service that forwards requests
* received on {@link JournalProtocolPB} to the
* {@link JournalProtocol} server implementation.
*/
@InterfaceAudience.Private
public class JournalProtocolServerSideTranslatorPB implements JournalProtocolPB {
/** Server side implementation to delegate the requests to */
private final JournalProtocol impl;
private final static JournalResponseProto VOID_JOURNAL_RESPONSE =
JournalResponseProto.newBuilder().build();
private final static StartLogSegmentResponseProto
VOID_START_LOG_SEGMENT_RESPONSE =
StartLogSegmentResponseProto.newBuilder().build();
public JournalProtocolServerSideTranslatorPB(JournalProtocol impl) {
this.impl = impl;
}
/** @see JournalProtocol#journal */
@Override
public JournalResponseProto journal(RpcController unused,
JournalRequestProto req) throws ServiceException {
try {
impl.journal(PBHelper.convert(req.getJournalInfo()), req.getEpoch(),
req.getFirstTxnId(), req.getNumTxns(), req.getRecords().toByteArray());
} catch (IOException e) {
throw new ServiceException(e);
}
return VOID_JOURNAL_RESPONSE;
}
/** @see JournalProtocol#startLogSegment */
@Override
public StartLogSegmentResponseProto startLogSegment(RpcController controller,
StartLogSegmentRequestProto req) throws ServiceException {
try {
impl.startLogSegment(PBHelper.convert(req.getJournalInfo()),
req.getEpoch(), req.getTxid());
} catch (IOException e) {
throw new ServiceException(e);
}
return VOID_START_LOG_SEGMENT_RESPONSE;
}
@Override
public FenceResponseProto fence(RpcController controller,
FenceRequestProto req) throws ServiceException {
try {
FenceResponse resp = impl.fence(PBHelper.convert(req.getJournalInfo()), req.getEpoch(),
req.getFencerInfo());
return FenceResponseProto.newBuilder().setInSync(resp.isInSync())
.setLastTransactionId(resp.getLastTransactionId())
.setPreviousEpoch(resp.getPreviousEpoch()).build();
} catch (IOException e) {
throw new ServiceException(e);
}
}
}
| 3,862 | 39.239583 | 96 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InterDatanodeProtocolTranslatorPB.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocolPB;
import java.io.Closeable;
import java.io.IOException;
import java.net.InetSocketAddress;
import javax.net.SocketFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
import org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InitReplicaRecoveryResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.UpdateReplicaUnderRecoveryRequestProto;
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
import org.apache.hadoop.ipc.ProtobufHelper;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.ProtocolMetaInterface;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RpcClientUtil;
import org.apache.hadoop.security.UserGroupInformation;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
/**
* This class is the client side translator to translate the requests made on
* {@link InterDatanodeProtocol} interfaces to the RPC server implementing
* {@link InterDatanodeProtocolPB}.
*/
@InterfaceAudience.Private
@InterfaceStability.Stable
public class InterDatanodeProtocolTranslatorPB implements
ProtocolMetaInterface, InterDatanodeProtocol, Closeable {
/** RpcController is not used and hence is set to null */
private final static RpcController NULL_CONTROLLER = null;
final private InterDatanodeProtocolPB rpcProxy;
public InterDatanodeProtocolTranslatorPB(InetSocketAddress addr,
UserGroupInformation ugi, Configuration conf, SocketFactory factory,
int socketTimeout)
throws IOException {
RPC.setProtocolEngine(conf, InterDatanodeProtocolPB.class,
ProtobufRpcEngine.class);
rpcProxy = RPC.getProxy(InterDatanodeProtocolPB.class,
RPC.getProtocolVersion(InterDatanodeProtocolPB.class), addr, ugi, conf,
factory, socketTimeout);
}
@Override
public void close() {
RPC.stopProxy(rpcProxy);
}
@Override
public ReplicaRecoveryInfo initReplicaRecovery(RecoveringBlock rBlock)
throws IOException {
InitReplicaRecoveryRequestProto req = InitReplicaRecoveryRequestProto
.newBuilder().setBlock(PBHelper.convert(rBlock)).build();
InitReplicaRecoveryResponseProto resp;
try {
resp = rpcProxy.initReplicaRecovery(NULL_CONTROLLER, req);
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
if (!resp.getReplicaFound()) {
// No replica found on the remote node.
return null;
} else {
if (!resp.hasBlock() || !resp.hasState()) {
throw new IOException("Replica was found but missing fields. " +
"Req: " + req + "\n" +
"Resp: " + resp);
}
}
BlockProto b = resp.getBlock();
return new ReplicaRecoveryInfo(b.getBlockId(), b.getNumBytes(),
b.getGenStamp(), PBHelper.convert(resp.getState()));
}
@Override
public String updateReplicaUnderRecovery(ExtendedBlock oldBlock,
long recoveryId, long newBlockId, long newLength) throws IOException {
UpdateReplicaUnderRecoveryRequestProto req =
UpdateReplicaUnderRecoveryRequestProto.newBuilder()
.setBlock(PBHelper.convert(oldBlock))
.setNewLength(newLength).setNewBlockId(newBlockId)
.setRecoveryId(recoveryId).build();
try {
return rpcProxy.updateReplicaUnderRecovery(NULL_CONTROLLER, req
).getStorageUuid();
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
@Override
public boolean isMethodSupported(String methodName) throws IOException {
return RpcClientUtil.isMethodSupported(rpcProxy,
InterDatanodeProtocolPB.class, RPC.RpcKind.RPC_PROTOCOL_BUFFER,
RPC.getProtocolVersion(InterDatanodeProtocolPB.class), methodName);
}
}
| 5,121 | 39.650794 | 112 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InterDatanodeProtocolPB.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocolPB;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.proto.InterDatanodeProtocolProtos.InterDatanodeProtocolService;
import org.apache.hadoop.ipc.ProtocolInfo;
import org.apache.hadoop.security.KerberosInfo;
@KerberosInfo(
serverPrincipal = DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY,
clientPrincipal = DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY)
@ProtocolInfo(protocolName =
"org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol",
protocolVersion = 1)
@InterfaceAudience.Private
public interface InterDatanodeProtocolPB extends
InterDatanodeProtocolService.BlockingInterface {
}
| 1,564 | 43.714286 | 102 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/JournalProtocolTranslatorPB.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocolPB;
import java.io.Closeable;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.FenceRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.FenceResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.StartLogSegmentRequestProto;
import org.apache.hadoop.hdfs.server.protocol.FenceResponse;
import org.apache.hadoop.hdfs.server.protocol.JournalInfo;
import org.apache.hadoop.hdfs.server.protocol.JournalProtocol;
import org.apache.hadoop.ipc.ProtobufHelper;
import org.apache.hadoop.ipc.ProtocolMetaInterface;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RpcClientUtil;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
/**
* This class is the client side translator to translate the requests made on
* {@link JournalProtocol} interfaces to the RPC server implementing
* {@link JournalProtocolPB}.
*/
@InterfaceAudience.Private
@InterfaceStability.Stable
public class JournalProtocolTranslatorPB implements ProtocolMetaInterface,
JournalProtocol, Closeable {
/** RpcController is not used and hence is set to null */
private final static RpcController NULL_CONTROLLER = null;
private final JournalProtocolPB rpcProxy;
public JournalProtocolTranslatorPB(JournalProtocolPB rpcProxy) {
this.rpcProxy = rpcProxy;
}
@Override
public void close() {
RPC.stopProxy(rpcProxy);
}
@Override
public void journal(JournalInfo journalInfo, long epoch, long firstTxnId,
int numTxns, byte[] records) throws IOException {
JournalRequestProto req = JournalRequestProto.newBuilder()
.setJournalInfo(PBHelper.convert(journalInfo))
.setEpoch(epoch)
.setFirstTxnId(firstTxnId)
.setNumTxns(numTxns)
.setRecords(PBHelper.getByteString(records))
.build();
try {
rpcProxy.journal(NULL_CONTROLLER, req);
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
@Override
public void startLogSegment(JournalInfo journalInfo, long epoch, long txid)
throws IOException {
StartLogSegmentRequestProto req = StartLogSegmentRequestProto.newBuilder()
.setJournalInfo(PBHelper.convert(journalInfo))
.setEpoch(epoch)
.setTxid(txid)
.build();
try {
rpcProxy.startLogSegment(NULL_CONTROLLER, req);
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
@Override
public FenceResponse fence(JournalInfo journalInfo, long epoch,
String fencerInfo) throws IOException {
FenceRequestProto req = FenceRequestProto.newBuilder().setEpoch(epoch)
.setJournalInfo(PBHelper.convert(journalInfo)).build();
try {
FenceResponseProto resp = rpcProxy.fence(NULL_CONTROLLER, req);
return new FenceResponse(resp.getPreviousEpoch(),
resp.getLastTransactionId(), resp.getInSync());
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
@Override
public boolean isMethodSupported(String methodName) throws IOException {
return RpcClientUtil.isMethodSupported(rpcProxy, JournalProtocolPB.class,
RPC.RpcKind.RPC_PROTOCOL_BUFFER,
RPC.getProtocolVersion(JournalProtocolPB.class), methodName);
}
}
| 4,404 | 37.304348 | 95 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolPB.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocolPB;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ClientDatanodeProtocolService;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSelector;
import org.apache.hadoop.ipc.ProtocolInfo;
import org.apache.hadoop.security.KerberosInfo;
import org.apache.hadoop.security.token.TokenInfo;
@KerberosInfo(
serverPrincipal = DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY)
@TokenInfo(BlockTokenSelector.class)
@ProtocolInfo(protocolName =
"org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol",
protocolVersion = 1)
@InterfaceAudience.Private
public interface ClientDatanodeProtocolPB extends
ClientDatanodeProtocolService.BlockingInterface {
}
| 1,649 | 42.421053 | 104 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocolPB;
import java.io.Closeable;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos;
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto;
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
import org.apache.hadoop.ipc.ProtobufHelper;
import org.apache.hadoop.ipc.ProtocolMetaInterface;
import org.apache.hadoop.ipc.ProtocolTranslator;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RpcClientUtil;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
/**
* This class is the client side translator to translate the requests made on
* {@link NamenodeProtocol} interfaces to the RPC server implementing
* {@link NamenodeProtocolPB}.
*/
@InterfaceAudience.Private
@InterfaceStability.Stable
public class NamenodeProtocolTranslatorPB implements NamenodeProtocol,
ProtocolMetaInterface, Closeable, ProtocolTranslator {
/** RpcController is not used and hence is set to null */
private final static RpcController NULL_CONTROLLER = null;
/*
* Protobuf requests with no parameters instantiated only once
*/
private static final GetBlockKeysRequestProto VOID_GET_BLOCKKEYS_REQUEST =
GetBlockKeysRequestProto.newBuilder().build();
private static final GetTransactionIdRequestProto VOID_GET_TRANSACTIONID_REQUEST =
GetTransactionIdRequestProto.newBuilder().build();
private static final RollEditLogRequestProto VOID_ROLL_EDITLOG_REQUEST =
RollEditLogRequestProto.newBuilder().build();
private static final VersionRequestProto VOID_VERSION_REQUEST =
VersionRequestProto.newBuilder().build();
final private NamenodeProtocolPB rpcProxy;
public NamenodeProtocolTranslatorPB(NamenodeProtocolPB rpcProxy) {
this.rpcProxy = rpcProxy;
}
@Override
public void close() {
RPC.stopProxy(rpcProxy);
}
@Override
public Object getUnderlyingProxyObject() {
return rpcProxy;
}
@Override
public BlocksWithLocations getBlocks(DatanodeInfo datanode, long size)
throws IOException {
GetBlocksRequestProto req = GetBlocksRequestProto.newBuilder()
.setDatanode(PBHelper.convert((DatanodeID)datanode)).setSize(size)
.build();
try {
return PBHelper.convert(rpcProxy.getBlocks(NULL_CONTROLLER, req)
.getBlocks());
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
@Override
public ExportedBlockKeys getBlockKeys() throws IOException {
try {
GetBlockKeysResponseProto rsp = rpcProxy.getBlockKeys(NULL_CONTROLLER,
VOID_GET_BLOCKKEYS_REQUEST);
return rsp.hasKeys() ? PBHelper.convert(rsp.getKeys()) : null;
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
@Override
public long getTransactionID() throws IOException {
try {
return rpcProxy.getTransactionId(NULL_CONTROLLER,
VOID_GET_TRANSACTIONID_REQUEST).getTxId();
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
@Override
public long getMostRecentCheckpointTxId() throws IOException {
try {
return rpcProxy.getMostRecentCheckpointTxId(NULL_CONTROLLER,
GetMostRecentCheckpointTxIdRequestProto.getDefaultInstance()).getTxId();
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
@Override
public CheckpointSignature rollEditLog() throws IOException {
try {
return PBHelper.convert(rpcProxy.rollEditLog(NULL_CONTROLLER,
VOID_ROLL_EDITLOG_REQUEST).getSignature());
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
@Override
public NamespaceInfo versionRequest() throws IOException {
try {
return PBHelper.convert(rpcProxy.versionRequest(NULL_CONTROLLER,
VOID_VERSION_REQUEST).getInfo());
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
@Override
public void errorReport(NamenodeRegistration registration, int errorCode,
String msg) throws IOException {
ErrorReportRequestProto req = ErrorReportRequestProto.newBuilder()
.setErrorCode(errorCode).setMsg(msg)
.setRegistration(PBHelper.convert(registration)).build();
try {
rpcProxy.errorReport(NULL_CONTROLLER, req);
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
@Override
public NamenodeRegistration registerSubordinateNamenode(
NamenodeRegistration registration) throws IOException {
RegisterRequestProto req = RegisterRequestProto.newBuilder()
.setRegistration(PBHelper.convert(registration)).build();
try {
return PBHelper.convert(
rpcProxy.registerSubordinateNamenode(NULL_CONTROLLER, req)
.getRegistration());
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
@Override
public NamenodeCommand startCheckpoint(NamenodeRegistration registration)
throws IOException {
StartCheckpointRequestProto req = StartCheckpointRequestProto.newBuilder()
.setRegistration(PBHelper.convert(registration)).build();
NamenodeCommandProto cmd;
try {
cmd = rpcProxy.startCheckpoint(NULL_CONTROLLER, req).getCommand();
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
return PBHelper.convert(cmd);
}
@Override
public void endCheckpoint(NamenodeRegistration registration,
CheckpointSignature sig) throws IOException {
EndCheckpointRequestProto req = EndCheckpointRequestProto.newBuilder()
.setRegistration(PBHelper.convert(registration))
.setSignature(PBHelper.convert(sig)).build();
try {
rpcProxy.endCheckpoint(NULL_CONTROLLER, req);
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
@Override
public RemoteEditLogManifest getEditLogManifest(long sinceTxId)
throws IOException {
GetEditLogManifestRequestProto req = GetEditLogManifestRequestProto
.newBuilder().setSinceTxId(sinceTxId).build();
try {
return PBHelper.convert(rpcProxy.getEditLogManifest(NULL_CONTROLLER, req)
.getManifest());
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
@Override
public boolean isMethodSupported(String methodName) throws IOException {
return RpcClientUtil.isMethodSupported(rpcProxy, NamenodeProtocolPB.class,
RPC.RpcKind.RPC_PROTOCOL_BUFFER,
RPC.getProtocolVersion(NamenodeProtocolPB.class), methodName);
}
@Override
public boolean isUpgradeFinalized() throws IOException {
IsUpgradeFinalizedRequestProto req = IsUpgradeFinalizedRequestProto
.newBuilder().build();
try {
IsUpgradeFinalizedResponseProto response = rpcProxy.isUpgradeFinalized(
NULL_CONTROLLER, req);
return response.getIsUpgradeFinalized();
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
}
| 10,004 | 38.702381 | 108 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocolPB;
import static com.google.common.base.Preconditions.checkNotNull;
import static org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos
.EncryptionZoneProto;
import static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherSuiteProto;
import static org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto;
import java.io.EOFException;
import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.EnumSet;
import java.util.List;
import org.apache.hadoop.fs.CacheFlag;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclEntryScope;
import org.apache.hadoop.fs.permission.AclEntryType;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.ha.proto.HAServiceProtocolProtos;
import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.inotify.EventBatch;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.inotify.Event;
import org.apache.hadoop.hdfs.inotify.EventBatchList;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveStats;
import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.CachePoolStats;
import org.apache.hadoop.crypto.CipherOption;
import org.apache.hadoop.crypto.CipherSuite;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
import org.apache.hadoop.crypto.CryptoProtocolVersion;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
import org.apache.hadoop.hdfs.protocol.DatanodeLocalInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.fs.FileEncryptionInfo;
import org.apache.hadoop.hdfs.protocol.FsPermissionExtension;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
import org.apache.hadoop.hdfs.protocol.RollingUpgradeStatus;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.AclEntryScopeProto;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.AclEntryTypeProto;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.FsActionProto;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclStatusProto;
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.GetAclStatusResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheFlagProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateFlagProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeActionProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SafeModeActionProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointCommandProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RecoveringBlockProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaStateProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto;
import org.apache.hadoop.hdfs.protocol.proto.InotifyProtos;
import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.ListXAttrsResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.XAttrProto;
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.XAttrProto.XAttrNamespaceProto;
import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.XAttrSetFlagProto;
import org.apache.hadoop.hdfs.security.token.block.BlockKey;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
import org.apache.hadoop.hdfs.server.protocol.BalancerBandwidthCommand;
import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
import org.apache.hadoop.hdfs.server.protocol.BlockIdCommand;
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand;
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
import org.apache.hadoop.hdfs.server.protocol.CheckpointCommand;
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand;
import org.apache.hadoop.hdfs.server.protocol.JournalInfo;
import org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand;
import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat;
import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStatus;
import org.apache.hadoop.hdfs.server.protocol.RegisterCommand;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary;
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.ShmId;
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.SlotId;
import org.apache.hadoop.hdfs.util.ExactSizeInputStream;
import org.apache.hadoop.io.EnumSetWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.DataChecksum;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.common.primitives.Shorts;
import com.google.protobuf.ByteString;
import com.google.protobuf.CodedInputStream;
/**
* Utilities for converting protobuf classes to and from implementation classes
* and other helper utilities to help in dealing with protobuf.
*
* Note that when converting from an internal type to protobuf type, the
* converter never return null for protobuf type. The check for internal type
* being null must be done before calling the convert() method.
*/
public class PBHelper {
private static final RegisterCommandProto REG_CMD_PROTO =
RegisterCommandProto.newBuilder().build();
private static final RegisterCommand REG_CMD = new RegisterCommand();
private static final AclEntryScope[] ACL_ENTRY_SCOPE_VALUES =
AclEntryScope.values();
private static final AclEntryType[] ACL_ENTRY_TYPE_VALUES =
AclEntryType.values();
private static final FsAction[] FSACTION_VALUES =
FsAction.values();
private static final XAttr.NameSpace[] XATTR_NAMESPACE_VALUES =
XAttr.NameSpace.values();
private PBHelper() {
/** Hidden constructor */
}
public static ByteString getByteString(byte[] bytes) {
return ByteString.copyFrom(bytes);
}
private static <T extends Enum<T>, U extends Enum<U>> U castEnum(T from, U[] to) {
return to[from.ordinal()];
}
public static NamenodeRole convert(NamenodeRoleProto role) {
switch (role) {
case NAMENODE:
return NamenodeRole.NAMENODE;
case BACKUP:
return NamenodeRole.BACKUP;
case CHECKPOINT:
return NamenodeRole.CHECKPOINT;
}
return null;
}
public static NamenodeRoleProto convert(NamenodeRole role) {
switch (role) {
case NAMENODE:
return NamenodeRoleProto.NAMENODE;
case BACKUP:
return NamenodeRoleProto.BACKUP;
case CHECKPOINT:
return NamenodeRoleProto.CHECKPOINT;
}
return null;
}
public static BlockStoragePolicy[] convertStoragePolicies(
List<BlockStoragePolicyProto> policyProtos) {
if (policyProtos == null || policyProtos.size() == 0) {
return new BlockStoragePolicy[0];
}
BlockStoragePolicy[] policies = new BlockStoragePolicy[policyProtos.size()];
int i = 0;
for (BlockStoragePolicyProto proto : policyProtos) {
policies[i++] = convert(proto);
}
return policies;
}
public static BlockStoragePolicy convert(BlockStoragePolicyProto proto) {
List<StorageTypeProto> cList = proto.getCreationPolicy()
.getStorageTypesList();
StorageType[] creationTypes = convertStorageTypes(cList, cList.size());
List<StorageTypeProto> cfList = proto.hasCreationFallbackPolicy() ? proto
.getCreationFallbackPolicy().getStorageTypesList() : null;
StorageType[] creationFallbackTypes = cfList == null ? StorageType
.EMPTY_ARRAY : convertStorageTypes(cfList, cfList.size());
List<StorageTypeProto> rfList = proto.hasReplicationFallbackPolicy() ?
proto.getReplicationFallbackPolicy().getStorageTypesList() : null;
StorageType[] replicationFallbackTypes = rfList == null ? StorageType
.EMPTY_ARRAY : convertStorageTypes(rfList, rfList.size());
return new BlockStoragePolicy((byte) proto.getPolicyId(), proto.getName(),
creationTypes, creationFallbackTypes, replicationFallbackTypes);
}
public static BlockStoragePolicyProto convert(BlockStoragePolicy policy) {
BlockStoragePolicyProto.Builder builder = BlockStoragePolicyProto
.newBuilder().setPolicyId(policy.getId()).setName(policy.getName());
// creation storage types
StorageTypesProto creationProto = convert(policy.getStorageTypes());
Preconditions.checkArgument(creationProto != null);
builder.setCreationPolicy(creationProto);
// creation fallback
StorageTypesProto creationFallbackProto = convert(
policy.getCreationFallbacks());
if (creationFallbackProto != null) {
builder.setCreationFallbackPolicy(creationFallbackProto);
}
// replication fallback
StorageTypesProto replicationFallbackProto = convert(
policy.getReplicationFallbacks());
if (replicationFallbackProto != null) {
builder.setReplicationFallbackPolicy(replicationFallbackProto);
}
return builder.build();
}
public static StorageTypesProto convert(StorageType[] types) {
if (types == null || types.length == 0) {
return null;
}
List<StorageTypeProto> list = convertStorageTypes(types);
return StorageTypesProto.newBuilder().addAllStorageTypes(list).build();
}
public static StorageInfoProto convert(StorageInfo info) {
return StorageInfoProto.newBuilder().setClusterID(info.getClusterID())
.setCTime(info.getCTime()).setLayoutVersion(info.getLayoutVersion())
.setNamespceID(info.getNamespaceID()).build();
}
public static StorageInfo convert(StorageInfoProto info, NodeType type) {
return new StorageInfo(info.getLayoutVersion(), info.getNamespceID(),
info.getClusterID(), info.getCTime(), type);
}
public static NamenodeRegistrationProto convert(NamenodeRegistration reg) {
return NamenodeRegistrationProto.newBuilder()
.setHttpAddress(reg.getHttpAddress()).setRole(convert(reg.getRole()))
.setRpcAddress(reg.getAddress())
.setStorageInfo(convert((StorageInfo) reg)).build();
}
public static NamenodeRegistration convert(NamenodeRegistrationProto reg) {
StorageInfo si = convert(reg.getStorageInfo(), NodeType.NAME_NODE);
return new NamenodeRegistration(reg.getRpcAddress(), reg.getHttpAddress(),
si, convert(reg.getRole()));
}
// DatanodeId
public static DatanodeID convert(DatanodeIDProto dn) {
return new DatanodeID(dn.getIpAddr(), dn.getHostName(), dn.getDatanodeUuid(),
dn.getXferPort(), dn.getInfoPort(), dn.hasInfoSecurePort() ? dn
.getInfoSecurePort() : 0, dn.getIpcPort());
}
public static DatanodeIDProto convert(DatanodeID dn) {
// For wire compatibility with older versions we transmit the StorageID
// which is the same as the DatanodeUuid. Since StorageID is a required
// field we pass the empty string if the DatanodeUuid is not yet known.
return DatanodeIDProto.newBuilder()
.setIpAddr(dn.getIpAddr())
.setHostName(dn.getHostName())
.setXferPort(dn.getXferPort())
.setDatanodeUuid(dn.getDatanodeUuid() != null ? dn.getDatanodeUuid() : "")
.setInfoPort(dn.getInfoPort())
.setInfoSecurePort(dn.getInfoSecurePort())
.setIpcPort(dn.getIpcPort()).build();
}
// Arrays of DatanodeId
public static DatanodeIDProto[] convert(DatanodeID[] did) {
if (did == null)
return null;
final int len = did.length;
DatanodeIDProto[] result = new DatanodeIDProto[len];
for (int i = 0; i < len; ++i) {
result[i] = convert(did[i]);
}
return result;
}
public static DatanodeID[] convert(DatanodeIDProto[] did) {
if (did == null) return null;
final int len = did.length;
DatanodeID[] result = new DatanodeID[len];
for (int i = 0; i < len; ++i) {
result[i] = convert(did[i]);
}
return result;
}
// Block
public static BlockProto convert(Block b) {
return BlockProto.newBuilder().setBlockId(b.getBlockId())
.setGenStamp(b.getGenerationStamp()).setNumBytes(b.getNumBytes())
.build();
}
public static Block convert(BlockProto b) {
return new Block(b.getBlockId(), b.getNumBytes(), b.getGenStamp());
}
public static BlockWithLocationsProto convert(BlockWithLocations blk) {
return BlockWithLocationsProto.newBuilder()
.setBlock(convert(blk.getBlock()))
.addAllDatanodeUuids(Arrays.asList(blk.getDatanodeUuids()))
.addAllStorageUuids(Arrays.asList(blk.getStorageIDs()))
.addAllStorageTypes(convertStorageTypes(blk.getStorageTypes()))
.build();
}
public static BlockWithLocations convert(BlockWithLocationsProto b) {
final List<String> datanodeUuids = b.getDatanodeUuidsList();
final List<String> storageUuids = b.getStorageUuidsList();
final List<StorageTypeProto> storageTypes = b.getStorageTypesList();
return new BlockWithLocations(convert(b.getBlock()),
datanodeUuids.toArray(new String[datanodeUuids.size()]),
storageUuids.toArray(new String[storageUuids.size()]),
convertStorageTypes(storageTypes, storageUuids.size()));
}
public static BlocksWithLocationsProto convert(BlocksWithLocations blks) {
BlocksWithLocationsProto.Builder builder = BlocksWithLocationsProto
.newBuilder();
for (BlockWithLocations b : blks.getBlocks()) {
builder.addBlocks(convert(b));
}
return builder.build();
}
public static BlocksWithLocations convert(BlocksWithLocationsProto blocks) {
List<BlockWithLocationsProto> b = blocks.getBlocksList();
BlockWithLocations[] ret = new BlockWithLocations[b.size()];
int i = 0;
for (BlockWithLocationsProto entry : b) {
ret[i++] = convert(entry);
}
return new BlocksWithLocations(ret);
}
public static BlockKeyProto convert(BlockKey key) {
byte[] encodedKey = key.getEncodedKey();
ByteString keyBytes = ByteString.copyFrom(encodedKey == null ?
DFSUtilClient.EMPTY_BYTES : encodedKey);
return BlockKeyProto.newBuilder().setKeyId(key.getKeyId())
.setKeyBytes(keyBytes).setExpiryDate(key.getExpiryDate()).build();
}
public static BlockKey convert(BlockKeyProto k) {
return new BlockKey(k.getKeyId(), k.getExpiryDate(), k.getKeyBytes()
.toByteArray());
}
public static ExportedBlockKeysProto convert(ExportedBlockKeys keys) {
ExportedBlockKeysProto.Builder builder = ExportedBlockKeysProto
.newBuilder();
builder.setIsBlockTokenEnabled(keys.isBlockTokenEnabled())
.setKeyUpdateInterval(keys.getKeyUpdateInterval())
.setTokenLifeTime(keys.getTokenLifetime())
.setCurrentKey(convert(keys.getCurrentKey()));
for (BlockKey k : keys.getAllKeys()) {
builder.addAllKeys(convert(k));
}
return builder.build();
}
public static ExportedBlockKeys convert(ExportedBlockKeysProto keys) {
return new ExportedBlockKeys(keys.getIsBlockTokenEnabled(),
keys.getKeyUpdateInterval(), keys.getTokenLifeTime(),
convert(keys.getCurrentKey()), convertBlockKeys(keys.getAllKeysList()));
}
public static CheckpointSignatureProto convert(CheckpointSignature s) {
return CheckpointSignatureProto.newBuilder()
.setBlockPoolId(s.getBlockpoolID())
.setCurSegmentTxId(s.getCurSegmentTxId())
.setMostRecentCheckpointTxId(s.getMostRecentCheckpointTxId())
.setStorageInfo(PBHelper.convert((StorageInfo) s)).build();
}
public static CheckpointSignature convert(CheckpointSignatureProto s) {
StorageInfo si = PBHelper.convert(s.getStorageInfo(), NodeType.NAME_NODE);
return new CheckpointSignature(si, s.getBlockPoolId(),
s.getMostRecentCheckpointTxId(), s.getCurSegmentTxId());
}
public static RemoteEditLogProto convert(RemoteEditLog log) {
return RemoteEditLogProto.newBuilder()
.setStartTxId(log.getStartTxId())
.setEndTxId(log.getEndTxId())
.setIsInProgress(log.isInProgress()).build();
}
public static RemoteEditLog convert(RemoteEditLogProto l) {
return new RemoteEditLog(l.getStartTxId(), l.getEndTxId(),
l.getIsInProgress());
}
public static RemoteEditLogManifestProto convert(
RemoteEditLogManifest manifest) {
RemoteEditLogManifestProto.Builder builder = RemoteEditLogManifestProto
.newBuilder();
for (RemoteEditLog log : manifest.getLogs()) {
builder.addLogs(convert(log));
}
return builder.build();
}
public static RemoteEditLogManifest convert(
RemoteEditLogManifestProto manifest) {
List<RemoteEditLog> logs = new ArrayList<RemoteEditLog>(manifest
.getLogsList().size());
for (RemoteEditLogProto l : manifest.getLogsList()) {
logs.add(convert(l));
}
return new RemoteEditLogManifest(logs);
}
public static CheckpointCommandProto convert(CheckpointCommand cmd) {
return CheckpointCommandProto.newBuilder()
.setSignature(convert(cmd.getSignature()))
.setNeedToReturnImage(cmd.needToReturnImage()).build();
}
public static NamenodeCommandProto convert(NamenodeCommand cmd) {
if (cmd instanceof CheckpointCommand) {
return NamenodeCommandProto.newBuilder().setAction(cmd.getAction())
.setType(NamenodeCommandProto.Type.CheckPointCommand)
.setCheckpointCmd(convert((CheckpointCommand) cmd)).build();
}
return NamenodeCommandProto.newBuilder()
.setType(NamenodeCommandProto.Type.NamenodeCommand)
.setAction(cmd.getAction()).build();
}
public static BlockKey[] convertBlockKeys(List<BlockKeyProto> list) {
BlockKey[] ret = new BlockKey[list.size()];
int i = 0;
for (BlockKeyProto k : list) {
ret[i++] = convert(k);
}
return ret;
}
public static NamespaceInfo convert(NamespaceInfoProto info) {
StorageInfoProto storage = info.getStorageInfo();
return new NamespaceInfo(storage.getNamespceID(), storage.getClusterID(),
info.getBlockPoolID(), storage.getCTime(), info.getBuildVersion(),
info.getSoftwareVersion(), info.getCapabilities());
}
public static NamenodeCommand convert(NamenodeCommandProto cmd) {
if (cmd == null) return null;
switch (cmd.getType()) {
case CheckPointCommand:
CheckpointCommandProto chkPt = cmd.getCheckpointCmd();
return new CheckpointCommand(PBHelper.convert(chkPt.getSignature()),
chkPt.getNeedToReturnImage());
default:
return new NamenodeCommand(cmd.getAction());
}
}
public static ExtendedBlock convert(ExtendedBlockProto eb) {
if (eb == null) return null;
return new ExtendedBlock( eb.getPoolId(), eb.getBlockId(), eb.getNumBytes(),
eb.getGenerationStamp());
}
public static ExtendedBlockProto convert(final ExtendedBlock b) {
if (b == null) return null;
return ExtendedBlockProto.newBuilder().
setPoolId(b.getBlockPoolId()).
setBlockId(b.getBlockId()).
setNumBytes(b.getNumBytes()).
setGenerationStamp(b.getGenerationStamp()).
build();
}
public static RecoveringBlockProto convert(RecoveringBlock b) {
if (b == null) {
return null;
}
LocatedBlockProto lb = PBHelper.convert((LocatedBlock)b);
RecoveringBlockProto.Builder builder = RecoveringBlockProto.newBuilder();
builder.setBlock(lb).setNewGenStamp(b.getNewGenerationStamp());
if(b.getNewBlock() != null)
builder.setTruncateBlock(PBHelper.convert(b.getNewBlock()));
return builder.build();
}
public static RecoveringBlock convert(RecoveringBlockProto b) {
ExtendedBlock block = convert(b.getBlock().getB());
DatanodeInfo[] locs = convert(b.getBlock().getLocsList());
return (b.hasTruncateBlock()) ?
new RecoveringBlock(block, locs, PBHelper.convert(b.getTruncateBlock())) :
new RecoveringBlock(block, locs, b.getNewGenStamp());
}
public static DatanodeInfoProto.AdminState convert(
final DatanodeInfo.AdminStates inAs) {
switch (inAs) {
case NORMAL: return DatanodeInfoProto.AdminState.NORMAL;
case DECOMMISSION_INPROGRESS:
return DatanodeInfoProto.AdminState.DECOMMISSION_INPROGRESS;
case DECOMMISSIONED: return DatanodeInfoProto.AdminState.DECOMMISSIONED;
default: return DatanodeInfoProto.AdminState.NORMAL;
}
}
static public DatanodeInfo convert(DatanodeInfoProto di) {
if (di == null) return null;
return new DatanodeInfo(
PBHelper.convert(di.getId()),
di.hasLocation() ? di.getLocation() : null ,
di.getCapacity(), di.getDfsUsed(), di.getRemaining(),
di.getBlockPoolUsed(), di.getCacheCapacity(), di.getCacheUsed(),
di.getLastUpdate(), di.getLastUpdateMonotonic(),
di.getXceiverCount(), PBHelper.convert(di.getAdminState()));
}
static public DatanodeInfoProto convertDatanodeInfo(DatanodeInfo di) {
if (di == null) return null;
return convert(di);
}
static public DatanodeInfo[] convert(DatanodeInfoProto di[]) {
if (di == null) return null;
DatanodeInfo[] result = new DatanodeInfo[di.length];
for (int i = 0; i < di.length; i++) {
result[i] = convert(di[i]);
}
return result;
}
public static List<? extends HdfsProtos.DatanodeInfoProto> convert(
DatanodeInfo[] dnInfos) {
return convert(dnInfos, 0);
}
/**
* Copy from {@code dnInfos} to a target of list of same size starting at
* {@code startIdx}.
*/
public static List<? extends HdfsProtos.DatanodeInfoProto> convert(
DatanodeInfo[] dnInfos, int startIdx) {
if (dnInfos == null)
return null;
ArrayList<HdfsProtos.DatanodeInfoProto> protos = Lists
.newArrayListWithCapacity(dnInfos.length);
for (int i = startIdx; i < dnInfos.length; i++) {
protos.add(convert(dnInfos[i]));
}
return protos;
}
public static DatanodeInfo[] convert(List<DatanodeInfoProto> list) {
DatanodeInfo[] info = new DatanodeInfo[list.size()];
for (int i = 0; i < info.length; i++) {
info[i] = convert(list.get(i));
}
return info;
}
public static DatanodeInfoProto convert(DatanodeInfo info) {
DatanodeInfoProto.Builder builder = DatanodeInfoProto.newBuilder();
if (info.getNetworkLocation() != null) {
builder.setLocation(info.getNetworkLocation());
}
builder
.setId(PBHelper.convert((DatanodeID)info))
.setCapacity(info.getCapacity())
.setDfsUsed(info.getDfsUsed())
.setRemaining(info.getRemaining())
.setBlockPoolUsed(info.getBlockPoolUsed())
.setCacheCapacity(info.getCacheCapacity())
.setCacheUsed(info.getCacheUsed())
.setLastUpdate(info.getLastUpdate())
.setLastUpdateMonotonic(info.getLastUpdateMonotonic())
.setXceiverCount(info.getXceiverCount())
.setAdminState(PBHelper.convert(info.getAdminState()))
.build();
return builder.build();
}
public static DatanodeStorageReportProto convertDatanodeStorageReport(
DatanodeStorageReport report) {
return DatanodeStorageReportProto.newBuilder()
.setDatanodeInfo(convert(report.getDatanodeInfo()))
.addAllStorageReports(convertStorageReports(report.getStorageReports()))
.build();
}
public static List<DatanodeStorageReportProto> convertDatanodeStorageReports(
DatanodeStorageReport[] reports) {
final List<DatanodeStorageReportProto> protos
= new ArrayList<DatanodeStorageReportProto>(reports.length);
for(int i = 0; i < reports.length; i++) {
protos.add(convertDatanodeStorageReport(reports[i]));
}
return protos;
}
public static DatanodeStorageReport convertDatanodeStorageReport(
DatanodeStorageReportProto proto) {
return new DatanodeStorageReport(
convert(proto.getDatanodeInfo()),
convertStorageReports(proto.getStorageReportsList()));
}
public static DatanodeStorageReport[] convertDatanodeStorageReports(
List<DatanodeStorageReportProto> protos) {
final DatanodeStorageReport[] reports
= new DatanodeStorageReport[protos.size()];
for(int i = 0; i < reports.length; i++) {
reports[i] = convertDatanodeStorageReport(protos.get(i));
}
return reports;
}
public static AdminStates convert(AdminState adminState) {
switch(adminState) {
case DECOMMISSION_INPROGRESS:
return AdminStates.DECOMMISSION_INPROGRESS;
case DECOMMISSIONED:
return AdminStates.DECOMMISSIONED;
case NORMAL:
default:
return AdminStates.NORMAL;
}
}
public static LocatedBlockProto convert(LocatedBlock b) {
if (b == null) return null;
Builder builder = LocatedBlockProto.newBuilder();
DatanodeInfo[] locs = b.getLocations();
List<DatanodeInfo> cachedLocs =
Lists.newLinkedList(Arrays.asList(b.getCachedLocations()));
for (int i = 0; i < locs.length; i++) {
DatanodeInfo loc = locs[i];
builder.addLocs(i, PBHelper.convert(loc));
boolean locIsCached = cachedLocs.contains(loc);
builder.addIsCached(locIsCached);
if (locIsCached) {
cachedLocs.remove(loc);
}
}
Preconditions.checkArgument(cachedLocs.size() == 0,
"Found additional cached replica locations that are not in the set of"
+ " storage-backed locations!");
StorageType[] storageTypes = b.getStorageTypes();
if (storageTypes != null) {
for (int i = 0; i < storageTypes.length; ++i) {
builder.addStorageTypes(PBHelper.convertStorageType(storageTypes[i]));
}
}
final String[] storageIDs = b.getStorageIDs();
if (storageIDs != null) {
builder.addAllStorageIDs(Arrays.asList(storageIDs));
}
return builder.setB(PBHelper.convert(b.getBlock()))
.setBlockToken(PBHelper.convert(b.getBlockToken()))
.setCorrupt(b.isCorrupt()).setOffset(b.getStartOffset()).build();
}
public static LocatedBlock convert(LocatedBlockProto proto) {
if (proto == null) return null;
List<DatanodeInfoProto> locs = proto.getLocsList();
DatanodeInfo[] targets = new DatanodeInfo[locs.size()];
for (int i = 0; i < locs.size(); i++) {
targets[i] = PBHelper.convert(locs.get(i));
}
final StorageType[] storageTypes = convertStorageTypes(
proto.getStorageTypesList(), locs.size());
final int storageIDsCount = proto.getStorageIDsCount();
final String[] storageIDs;
if (storageIDsCount == 0) {
storageIDs = null;
} else {
Preconditions.checkState(storageIDsCount == locs.size());
storageIDs = proto.getStorageIDsList().toArray(new String[storageIDsCount]);
}
// Set values from the isCached list, re-using references from loc
List<DatanodeInfo> cachedLocs = new ArrayList<DatanodeInfo>(locs.size());
List<Boolean> isCachedList = proto.getIsCachedList();
for (int i=0; i<isCachedList.size(); i++) {
if (isCachedList.get(i)) {
cachedLocs.add(targets[i]);
}
}
LocatedBlock lb = new LocatedBlock(PBHelper.convert(proto.getB()), targets,
storageIDs, storageTypes, proto.getOffset(), proto.getCorrupt(),
cachedLocs.toArray(new DatanodeInfo[0]));
lb.setBlockToken(PBHelper.convert(proto.getBlockToken()));
return lb;
}
public static TokenProto convert(Token<?> tok) {
return TokenProto.newBuilder().
setIdentifier(ByteString.copyFrom(tok.getIdentifier())).
setPassword(ByteString.copyFrom(tok.getPassword())).
setKind(tok.getKind().toString()).
setService(tok.getService().toString()).build();
}
public static Token<BlockTokenIdentifier> convert(
TokenProto blockToken) {
return new Token<BlockTokenIdentifier>(blockToken.getIdentifier()
.toByteArray(), blockToken.getPassword().toByteArray(), new Text(
blockToken.getKind()), new Text(blockToken.getService()));
}
public static Token<DelegationTokenIdentifier> convertDelegationToken(
TokenProto blockToken) {
return new Token<DelegationTokenIdentifier>(blockToken.getIdentifier()
.toByteArray(), blockToken.getPassword().toByteArray(), new Text(
blockToken.getKind()), new Text(blockToken.getService()));
}
public static ReplicaState convert(ReplicaStateProto state) {
switch (state) {
case RBW:
return ReplicaState.RBW;
case RUR:
return ReplicaState.RUR;
case RWR:
return ReplicaState.RWR;
case TEMPORARY:
return ReplicaState.TEMPORARY;
case FINALIZED:
default:
return ReplicaState.FINALIZED;
}
}
public static ReplicaStateProto convert(ReplicaState state) {
switch (state) {
case RBW:
return ReplicaStateProto.RBW;
case RUR:
return ReplicaStateProto.RUR;
case RWR:
return ReplicaStateProto.RWR;
case TEMPORARY:
return ReplicaStateProto.TEMPORARY;
case FINALIZED:
default:
return ReplicaStateProto.FINALIZED;
}
}
public static DatanodeRegistrationProto convert(
DatanodeRegistration registration) {
DatanodeRegistrationProto.Builder builder = DatanodeRegistrationProto
.newBuilder();
return builder.setDatanodeID(PBHelper.convert((DatanodeID) registration))
.setStorageInfo(PBHelper.convert(registration.getStorageInfo()))
.setKeys(PBHelper.convert(registration.getExportedKeys()))
.setSoftwareVersion(registration.getSoftwareVersion()).build();
}
public static DatanodeRegistration convert(DatanodeRegistrationProto proto) {
StorageInfo si = convert(proto.getStorageInfo(), NodeType.DATA_NODE);
return new DatanodeRegistration(PBHelper.convert(proto.getDatanodeID()),
si, PBHelper.convert(proto.getKeys()), proto.getSoftwareVersion());
}
public static DatanodeCommand convert(DatanodeCommandProto proto) {
switch (proto.getCmdType()) {
case BalancerBandwidthCommand:
return PBHelper.convert(proto.getBalancerCmd());
case BlockCommand:
return PBHelper.convert(proto.getBlkCmd());
case BlockRecoveryCommand:
return PBHelper.convert(proto.getRecoveryCmd());
case FinalizeCommand:
return PBHelper.convert(proto.getFinalizeCmd());
case KeyUpdateCommand:
return PBHelper.convert(proto.getKeyUpdateCmd());
case RegisterCommand:
return REG_CMD;
case BlockIdCommand:
return PBHelper.convert(proto.getBlkIdCmd());
default:
return null;
}
}
public static BalancerBandwidthCommandProto convert(
BalancerBandwidthCommand bbCmd) {
return BalancerBandwidthCommandProto.newBuilder()
.setBandwidth(bbCmd.getBalancerBandwidthValue()).build();
}
public static KeyUpdateCommandProto convert(KeyUpdateCommand cmd) {
return KeyUpdateCommandProto.newBuilder()
.setKeys(PBHelper.convert(cmd.getExportedKeys())).build();
}
public static BlockRecoveryCommandProto convert(BlockRecoveryCommand cmd) {
BlockRecoveryCommandProto.Builder builder = BlockRecoveryCommandProto
.newBuilder();
for (RecoveringBlock b : cmd.getRecoveringBlocks()) {
builder.addBlocks(PBHelper.convert(b));
}
return builder.build();
}
public static FinalizeCommandProto convert(FinalizeCommand cmd) {
return FinalizeCommandProto.newBuilder()
.setBlockPoolId(cmd.getBlockPoolId()).build();
}
public static BlockCommandProto convert(BlockCommand cmd) {
BlockCommandProto.Builder builder = BlockCommandProto.newBuilder()
.setBlockPoolId(cmd.getBlockPoolId());
switch (cmd.getAction()) {
case DatanodeProtocol.DNA_TRANSFER:
builder.setAction(BlockCommandProto.Action.TRANSFER);
break;
case DatanodeProtocol.DNA_INVALIDATE:
builder.setAction(BlockCommandProto.Action.INVALIDATE);
break;
case DatanodeProtocol.DNA_SHUTDOWN:
builder.setAction(BlockCommandProto.Action.SHUTDOWN);
break;
default:
throw new AssertionError("Invalid action");
}
Block[] blocks = cmd.getBlocks();
for (int i = 0; i < blocks.length; i++) {
builder.addBlocks(PBHelper.convert(blocks[i]));
}
builder.addAllTargets(convert(cmd.getTargets()))
.addAllTargetStorageUuids(convert(cmd.getTargetStorageIDs()));
StorageType[][] types = cmd.getTargetStorageTypes();
if (types != null) {
builder.addAllTargetStorageTypes(convert(types));
}
return builder.build();
}
private static List<StorageTypesProto> convert(StorageType[][] types) {
List<StorageTypesProto> list = Lists.newArrayList();
if (types != null) {
for (StorageType[] ts : types) {
StorageTypesProto.Builder builder = StorageTypesProto.newBuilder();
builder.addAllStorageTypes(convertStorageTypes(ts));
list.add(builder.build());
}
}
return list;
}
public static BlockIdCommandProto convert(BlockIdCommand cmd) {
BlockIdCommandProto.Builder builder = BlockIdCommandProto.newBuilder()
.setBlockPoolId(cmd.getBlockPoolId());
switch (cmd.getAction()) {
case DatanodeProtocol.DNA_CACHE:
builder.setAction(BlockIdCommandProto.Action.CACHE);
break;
case DatanodeProtocol.DNA_UNCACHE:
builder.setAction(BlockIdCommandProto.Action.UNCACHE);
break;
default:
throw new AssertionError("Invalid action");
}
long[] blockIds = cmd.getBlockIds();
for (int i = 0; i < blockIds.length; i++) {
builder.addBlockIds(blockIds[i]);
}
return builder.build();
}
private static List<DatanodeInfosProto> convert(DatanodeInfo[][] targets) {
DatanodeInfosProto[] ret = new DatanodeInfosProto[targets.length];
for (int i = 0; i < targets.length; i++) {
ret[i] = DatanodeInfosProto.newBuilder()
.addAllDatanodes(PBHelper.convert(targets[i])).build();
}
return Arrays.asList(ret);
}
private static List<StorageUuidsProto> convert(String[][] targetStorageUuids) {
StorageUuidsProto[] ret = new StorageUuidsProto[targetStorageUuids.length];
for (int i = 0; i < targetStorageUuids.length; i++) {
ret[i] = StorageUuidsProto.newBuilder()
.addAllStorageUuids(Arrays.asList(targetStorageUuids[i])).build();
}
return Arrays.asList(ret);
}
public static DatanodeCommandProto convert(DatanodeCommand datanodeCommand) {
DatanodeCommandProto.Builder builder = DatanodeCommandProto.newBuilder();
if (datanodeCommand == null) {
return builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand)
.build();
}
switch (datanodeCommand.getAction()) {
case DatanodeProtocol.DNA_BALANCERBANDWIDTHUPDATE:
builder.setCmdType(DatanodeCommandProto.Type.BalancerBandwidthCommand)
.setBalancerCmd(
PBHelper.convert((BalancerBandwidthCommand) datanodeCommand));
break;
case DatanodeProtocol.DNA_ACCESSKEYUPDATE:
builder
.setCmdType(DatanodeCommandProto.Type.KeyUpdateCommand)
.setKeyUpdateCmd(PBHelper.convert((KeyUpdateCommand) datanodeCommand));
break;
case DatanodeProtocol.DNA_RECOVERBLOCK:
builder.setCmdType(DatanodeCommandProto.Type.BlockRecoveryCommand)
.setRecoveryCmd(
PBHelper.convert((BlockRecoveryCommand) datanodeCommand));
break;
case DatanodeProtocol.DNA_FINALIZE:
builder.setCmdType(DatanodeCommandProto.Type.FinalizeCommand)
.setFinalizeCmd(PBHelper.convert((FinalizeCommand) datanodeCommand));
break;
case DatanodeProtocol.DNA_REGISTER:
builder.setCmdType(DatanodeCommandProto.Type.RegisterCommand)
.setRegisterCmd(REG_CMD_PROTO);
break;
case DatanodeProtocol.DNA_TRANSFER:
case DatanodeProtocol.DNA_INVALIDATE:
case DatanodeProtocol.DNA_SHUTDOWN:
builder.setCmdType(DatanodeCommandProto.Type.BlockCommand).
setBlkCmd(PBHelper.convert((BlockCommand) datanodeCommand));
break;
case DatanodeProtocol.DNA_CACHE:
case DatanodeProtocol.DNA_UNCACHE:
builder.setCmdType(DatanodeCommandProto.Type.BlockIdCommand).
setBlkIdCmd(PBHelper.convert((BlockIdCommand) datanodeCommand));
break;
case DatanodeProtocol.DNA_UNKNOWN: //Not expected
default:
builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand);
}
return builder.build();
}
public static KeyUpdateCommand convert(KeyUpdateCommandProto keyUpdateCmd) {
return new KeyUpdateCommand(PBHelper.convert(keyUpdateCmd.getKeys()));
}
public static FinalizeCommand convert(FinalizeCommandProto finalizeCmd) {
return new FinalizeCommand(finalizeCmd.getBlockPoolId());
}
public static BlockRecoveryCommand convert(
BlockRecoveryCommandProto recoveryCmd) {
List<RecoveringBlockProto> list = recoveryCmd.getBlocksList();
List<RecoveringBlock> recoveringBlocks = new ArrayList<RecoveringBlock>(
list.size());
for (RecoveringBlockProto rbp : list) {
recoveringBlocks.add(PBHelper.convert(rbp));
}
return new BlockRecoveryCommand(recoveringBlocks);
}
public static BlockCommand convert(BlockCommandProto blkCmd) {
List<BlockProto> blockProtoList = blkCmd.getBlocksList();
Block[] blocks = new Block[blockProtoList.size()];
for (int i = 0; i < blockProtoList.size(); i++) {
blocks[i] = PBHelper.convert(blockProtoList.get(i));
}
List<DatanodeInfosProto> targetList = blkCmd.getTargetsList();
DatanodeInfo[][] targets = new DatanodeInfo[targetList.size()][];
for (int i = 0; i < targetList.size(); i++) {
targets[i] = PBHelper.convert(targetList.get(i));
}
StorageType[][] targetStorageTypes = new StorageType[targetList.size()][];
List<StorageTypesProto> targetStorageTypesList = blkCmd.getTargetStorageTypesList();
if (targetStorageTypesList.isEmpty()) { // missing storage types
for(int i = 0; i < targetStorageTypes.length; i++) {
targetStorageTypes[i] = new StorageType[targets[i].length];
Arrays.fill(targetStorageTypes[i], StorageType.DEFAULT);
}
} else {
for(int i = 0; i < targetStorageTypes.length; i++) {
List<StorageTypeProto> p = targetStorageTypesList.get(i).getStorageTypesList();
targetStorageTypes[i] = convertStorageTypes(p, targets[i].length);
}
}
List<StorageUuidsProto> targetStorageUuidsList = blkCmd.getTargetStorageUuidsList();
String[][] targetStorageIDs = new String[targetStorageUuidsList.size()][];
for(int i = 0; i < targetStorageIDs.length; i++) {
List<String> storageIDs = targetStorageUuidsList.get(i).getStorageUuidsList();
targetStorageIDs[i] = storageIDs.toArray(new String[storageIDs.size()]);
}
int action = DatanodeProtocol.DNA_UNKNOWN;
switch (blkCmd.getAction()) {
case TRANSFER:
action = DatanodeProtocol.DNA_TRANSFER;
break;
case INVALIDATE:
action = DatanodeProtocol.DNA_INVALIDATE;
break;
case SHUTDOWN:
action = DatanodeProtocol.DNA_SHUTDOWN;
break;
default:
throw new AssertionError("Unknown action type: " + blkCmd.getAction());
}
return new BlockCommand(action, blkCmd.getBlockPoolId(), blocks, targets,
targetStorageTypes, targetStorageIDs);
}
public static BlockIdCommand convert(BlockIdCommandProto blkIdCmd) {
int numBlockIds = blkIdCmd.getBlockIdsCount();
long blockIds[] = new long[numBlockIds];
for (int i = 0; i < numBlockIds; i++) {
blockIds[i] = blkIdCmd.getBlockIds(i);
}
int action = DatanodeProtocol.DNA_UNKNOWN;
switch (blkIdCmd.getAction()) {
case CACHE:
action = DatanodeProtocol.DNA_CACHE;
break;
case UNCACHE:
action = DatanodeProtocol.DNA_UNCACHE;
break;
default:
throw new AssertionError("Unknown action type: " + blkIdCmd.getAction());
}
return new BlockIdCommand(action, blkIdCmd.getBlockPoolId(), blockIds);
}
public static DatanodeInfo[] convert(DatanodeInfosProto datanodeInfosProto) {
List<DatanodeInfoProto> proto = datanodeInfosProto.getDatanodesList();
DatanodeInfo[] infos = new DatanodeInfo[proto.size()];
for (int i = 0; i < infos.length; i++) {
infos[i] = PBHelper.convert(proto.get(i));
}
return infos;
}
public static BalancerBandwidthCommand convert(
BalancerBandwidthCommandProto balancerCmd) {
return new BalancerBandwidthCommand(balancerCmd.getBandwidth());
}
public static ReceivedDeletedBlockInfoProto convert(
ReceivedDeletedBlockInfo receivedDeletedBlockInfo) {
ReceivedDeletedBlockInfoProto.Builder builder =
ReceivedDeletedBlockInfoProto.newBuilder();
ReceivedDeletedBlockInfoProto.BlockStatus status;
switch (receivedDeletedBlockInfo.getStatus()) {
case RECEIVING_BLOCK:
status = ReceivedDeletedBlockInfoProto.BlockStatus.RECEIVING;
break;
case RECEIVED_BLOCK:
status = ReceivedDeletedBlockInfoProto.BlockStatus.RECEIVED;
break;
case DELETED_BLOCK:
status = ReceivedDeletedBlockInfoProto.BlockStatus.DELETED;
break;
default:
throw new IllegalArgumentException("Bad status: " +
receivedDeletedBlockInfo.getStatus());
}
builder.setStatus(status);
if (receivedDeletedBlockInfo.getDelHints() != null) {
builder.setDeleteHint(receivedDeletedBlockInfo.getDelHints());
}
return builder.setBlock(PBHelper.convert(receivedDeletedBlockInfo.getBlock()))
.build();
}
public static ReceivedDeletedBlockInfo convert(
ReceivedDeletedBlockInfoProto proto) {
ReceivedDeletedBlockInfo.BlockStatus status = null;
switch (proto.getStatus()) {
case RECEIVING:
status = BlockStatus.RECEIVING_BLOCK;
break;
case RECEIVED:
status = BlockStatus.RECEIVED_BLOCK;
break;
case DELETED:
status = BlockStatus.DELETED_BLOCK;
break;
}
return new ReceivedDeletedBlockInfo(
PBHelper.convert(proto.getBlock()),
status,
proto.hasDeleteHint() ? proto.getDeleteHint() : null);
}
public static NamespaceInfoProto convert(NamespaceInfo info) {
return NamespaceInfoProto.newBuilder()
.setBlockPoolID(info.getBlockPoolID())
.setBuildVersion(info.getBuildVersion())
.setUnused(0)
.setStorageInfo(PBHelper.convert((StorageInfo)info))
.setSoftwareVersion(info.getSoftwareVersion())
.setCapabilities(info.getCapabilities())
.build();
}
// Located Block Arrays and Lists
public static LocatedBlockProto[] convertLocatedBlock(LocatedBlock[] lb) {
if (lb == null) return null;
return convertLocatedBlock2(Arrays.asList(lb)).toArray(
new LocatedBlockProto[lb.length]);
}
public static LocatedBlock[] convertLocatedBlock(LocatedBlockProto[] lb) {
if (lb == null) return null;
return convertLocatedBlock(Arrays.asList(lb)).toArray(
new LocatedBlock[lb.length]);
}
public static List<LocatedBlock> convertLocatedBlock(
List<LocatedBlockProto> lb) {
if (lb == null) return null;
final int len = lb.size();
List<LocatedBlock> result =
new ArrayList<LocatedBlock>(len);
for (int i = 0; i < len; ++i) {
result.add(PBHelper.convert(lb.get(i)));
}
return result;
}
public static List<LocatedBlockProto> convertLocatedBlock2(List<LocatedBlock> lb) {
if (lb == null) return null;
final int len = lb.size();
List<LocatedBlockProto> result = new ArrayList<LocatedBlockProto>(len);
for (int i = 0; i < len; ++i) {
result.add(PBHelper.convert(lb.get(i)));
}
return result;
}
// LocatedBlocks
public static LocatedBlocks convert(LocatedBlocksProto lb) {
return new LocatedBlocks(
lb.getFileLength(), lb.getUnderConstruction(),
PBHelper.convertLocatedBlock(lb.getBlocksList()),
lb.hasLastBlock() ? PBHelper.convert(lb.getLastBlock()) : null,
lb.getIsLastBlockComplete(),
lb.hasFileEncryptionInfo() ? convert(lb.getFileEncryptionInfo()) :
null);
}
public static LocatedBlocksProto convert(LocatedBlocks lb) {
if (lb == null) {
return null;
}
LocatedBlocksProto.Builder builder =
LocatedBlocksProto.newBuilder();
if (lb.getLastLocatedBlock() != null) {
builder.setLastBlock(PBHelper.convert(lb.getLastLocatedBlock()));
}
if (lb.getFileEncryptionInfo() != null) {
builder.setFileEncryptionInfo(convert(lb.getFileEncryptionInfo()));
}
return builder.setFileLength(lb.getFileLength())
.setUnderConstruction(lb.isUnderConstruction())
.addAllBlocks(PBHelper.convertLocatedBlock2(lb.getLocatedBlocks()))
.setIsLastBlockComplete(lb.isLastBlockComplete()).build();
}
// DataEncryptionKey
public static DataEncryptionKey convert(DataEncryptionKeyProto bet) {
String encryptionAlgorithm = bet.getEncryptionAlgorithm();
return new DataEncryptionKey(bet.getKeyId(),
bet.getBlockPoolId(),
bet.getNonce().toByteArray(),
bet.getEncryptionKey().toByteArray(),
bet.getExpiryDate(),
encryptionAlgorithm.isEmpty() ? null : encryptionAlgorithm);
}
public static DataEncryptionKeyProto convert(DataEncryptionKey bet) {
DataEncryptionKeyProto.Builder b = DataEncryptionKeyProto.newBuilder()
.setKeyId(bet.keyId)
.setBlockPoolId(bet.blockPoolId)
.setNonce(ByteString.copyFrom(bet.nonce))
.setEncryptionKey(ByteString.copyFrom(bet.encryptionKey))
.setExpiryDate(bet.expiryDate);
if (bet.encryptionAlgorithm != null) {
b.setEncryptionAlgorithm(bet.encryptionAlgorithm);
}
return b.build();
}
public static FsServerDefaults convert(FsServerDefaultsProto fs) {
if (fs == null) return null;
return new FsServerDefaults(
fs.getBlockSize(), fs.getBytesPerChecksum(),
fs.getWritePacketSize(), (short) fs.getReplication(),
fs.getFileBufferSize(),
fs.getEncryptDataTransfer(),
fs.getTrashInterval(),
PBHelper.convert(fs.getChecksumType()));
}
public static FsServerDefaultsProto convert(FsServerDefaults fs) {
if (fs == null) return null;
return FsServerDefaultsProto.newBuilder().
setBlockSize(fs.getBlockSize()).
setBytesPerChecksum(fs.getBytesPerChecksum()).
setWritePacketSize(fs.getWritePacketSize())
.setReplication(fs.getReplication())
.setFileBufferSize(fs.getFileBufferSize())
.setEncryptDataTransfer(fs.getEncryptDataTransfer())
.setTrashInterval(fs.getTrashInterval())
.setChecksumType(PBHelper.convert(fs.getChecksumType()))
.build();
}
public static FsPermissionProto convert(FsPermission p) {
return FsPermissionProto.newBuilder().setPerm(p.toExtendedShort()).build();
}
public static FsPermission convert(FsPermissionProto p) {
return new FsPermissionExtension((short)p.getPerm());
}
// The creatFlag field in PB is a bitmask whose values are the same a the
// emum values of CreateFlag
public static int convertCreateFlag(EnumSetWritable<CreateFlag> flag) {
int value = 0;
if (flag.contains(CreateFlag.APPEND)) {
value |= CreateFlagProto.APPEND.getNumber();
}
if (flag.contains(CreateFlag.CREATE)) {
value |= CreateFlagProto.CREATE.getNumber();
}
if (flag.contains(CreateFlag.OVERWRITE)) {
value |= CreateFlagProto.OVERWRITE.getNumber();
}
if (flag.contains(CreateFlag.LAZY_PERSIST)) {
value |= CreateFlagProto.LAZY_PERSIST.getNumber();
}
if (flag.contains(CreateFlag.NEW_BLOCK)) {
value |= CreateFlagProto.NEW_BLOCK.getNumber();
}
return value;
}
public static EnumSetWritable<CreateFlag> convertCreateFlag(int flag) {
EnumSet<CreateFlag> result =
EnumSet.noneOf(CreateFlag.class);
if ((flag & CreateFlagProto.APPEND_VALUE) == CreateFlagProto.APPEND_VALUE) {
result.add(CreateFlag.APPEND);
}
if ((flag & CreateFlagProto.CREATE_VALUE) == CreateFlagProto.CREATE_VALUE) {
result.add(CreateFlag.CREATE);
}
if ((flag & CreateFlagProto.OVERWRITE_VALUE)
== CreateFlagProto.OVERWRITE_VALUE) {
result.add(CreateFlag.OVERWRITE);
}
if ((flag & CreateFlagProto.LAZY_PERSIST_VALUE)
== CreateFlagProto.LAZY_PERSIST_VALUE) {
result.add(CreateFlag.LAZY_PERSIST);
}
if ((flag & CreateFlagProto.NEW_BLOCK_VALUE)
== CreateFlagProto.NEW_BLOCK_VALUE) {
result.add(CreateFlag.NEW_BLOCK);
}
return new EnumSetWritable<CreateFlag>(result, CreateFlag.class);
}
public static int convertCacheFlags(EnumSet<CacheFlag> flags) {
int value = 0;
if (flags.contains(CacheFlag.FORCE)) {
value |= CacheFlagProto.FORCE.getNumber();
}
return value;
}
public static EnumSet<CacheFlag> convertCacheFlags(int flags) {
EnumSet<CacheFlag> result = EnumSet.noneOf(CacheFlag.class);
if ((flags & CacheFlagProto.FORCE_VALUE) == CacheFlagProto.FORCE_VALUE) {
result.add(CacheFlag.FORCE);
}
return result;
}
public static HdfsFileStatus convert(HdfsFileStatusProto fs) {
if (fs == null)
return null;
return new HdfsLocatedFileStatus(
fs.getLength(), fs.getFileType().equals(FileType.IS_DIR),
fs.getBlockReplication(), fs.getBlocksize(),
fs.getModificationTime(), fs.getAccessTime(),
PBHelper.convert(fs.getPermission()), fs.getOwner(), fs.getGroup(),
fs.getFileType().equals(FileType.IS_SYMLINK) ?
fs.getSymlink().toByteArray() : null,
fs.getPath().toByteArray(),
fs.hasFileId()? fs.getFileId(): HdfsConstants.GRANDFATHER_INODE_ID,
fs.hasLocations() ? PBHelper.convert(fs.getLocations()) : null,
fs.hasChildrenNum() ? fs.getChildrenNum() : -1,
fs.hasFileEncryptionInfo() ? convert(fs.getFileEncryptionInfo()) : null,
fs.hasStoragePolicy() ? (byte) fs.getStoragePolicy()
: HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED);
}
public static SnapshottableDirectoryStatus convert(
SnapshottableDirectoryStatusProto sdirStatusProto) {
if (sdirStatusProto == null) {
return null;
}
final HdfsFileStatusProto status = sdirStatusProto.getDirStatus();
return new SnapshottableDirectoryStatus(
status.getModificationTime(),
status.getAccessTime(),
PBHelper.convert(status.getPermission()),
status.getOwner(),
status.getGroup(),
status.getPath().toByteArray(),
status.getFileId(),
status.getChildrenNum(),
sdirStatusProto.getSnapshotNumber(),
sdirStatusProto.getSnapshotQuota(),
sdirStatusProto.getParentFullpath().toByteArray());
}
public static HdfsFileStatusProto convert(HdfsFileStatus fs) {
if (fs == null)
return null;
FileType fType = FileType.IS_FILE;
if (fs.isDir()) {
fType = FileType.IS_DIR;
} else if (fs.isSymlink()) {
fType = FileType.IS_SYMLINK;
}
HdfsFileStatusProto.Builder builder =
HdfsFileStatusProto.newBuilder().
setLength(fs.getLen()).
setFileType(fType).
setBlockReplication(fs.getReplication()).
setBlocksize(fs.getBlockSize()).
setModificationTime(fs.getModificationTime()).
setAccessTime(fs.getAccessTime()).
setPermission(PBHelper.convert(fs.getPermission())).
setOwner(fs.getOwner()).
setGroup(fs.getGroup()).
setFileId(fs.getFileId()).
setChildrenNum(fs.getChildrenNum()).
setPath(ByteString.copyFrom(fs.getLocalNameInBytes())).
setStoragePolicy(fs.getStoragePolicy());
if (fs.isSymlink()) {
builder.setSymlink(ByteString.copyFrom(fs.getSymlinkInBytes()));
}
if (fs.getFileEncryptionInfo() != null) {
builder.setFileEncryptionInfo(convert(fs.getFileEncryptionInfo()));
}
if (fs instanceof HdfsLocatedFileStatus) {
final HdfsLocatedFileStatus lfs = (HdfsLocatedFileStatus) fs;
LocatedBlocks locations = lfs.getBlockLocations();
if (locations != null) {
builder.setLocations(PBHelper.convert(locations));
}
}
return builder.build();
}
public static SnapshottableDirectoryStatusProto convert(
SnapshottableDirectoryStatus status) {
if (status == null) {
return null;
}
int snapshotNumber = status.getSnapshotNumber();
int snapshotQuota = status.getSnapshotQuota();
byte[] parentFullPath = status.getParentFullPath();
ByteString parentFullPathBytes = ByteString.copyFrom(
parentFullPath == null ? DFSUtilClient.EMPTY_BYTES : parentFullPath);
HdfsFileStatusProto fs = convert(status.getDirStatus());
SnapshottableDirectoryStatusProto.Builder builder =
SnapshottableDirectoryStatusProto
.newBuilder().setSnapshotNumber(snapshotNumber)
.setSnapshotQuota(snapshotQuota).setParentFullpath(parentFullPathBytes)
.setDirStatus(fs);
return builder.build();
}
public static HdfsFileStatusProto[] convert(HdfsFileStatus[] fs) {
if (fs == null) return null;
final int len = fs.length;
HdfsFileStatusProto[] result = new HdfsFileStatusProto[len];
for (int i = 0; i < len; ++i) {
result[i] = PBHelper.convert(fs[i]);
}
return result;
}
public static HdfsFileStatus[] convert(HdfsFileStatusProto[] fs) {
if (fs == null) return null;
final int len = fs.length;
HdfsFileStatus[] result = new HdfsFileStatus[len];
for (int i = 0; i < len; ++i) {
result[i] = PBHelper.convert(fs[i]);
}
return result;
}
public static DirectoryListing convert(DirectoryListingProto dl) {
if (dl == null)
return null;
List<HdfsFileStatusProto> partList = dl.getPartialListingList();
return new DirectoryListing(
partList.isEmpty() ? new HdfsLocatedFileStatus[0]
: PBHelper.convert(
partList.toArray(new HdfsFileStatusProto[partList.size()])),
dl.getRemainingEntries());
}
public static DirectoryListingProto convert(DirectoryListing d) {
if (d == null)
return null;
return DirectoryListingProto.newBuilder().
addAllPartialListing(Arrays.asList(
PBHelper.convert(d.getPartialListing()))).
setRemainingEntries(d.getRemainingEntries()).
build();
}
public static long[] convert(GetFsStatsResponseProto res) {
long[] result = new long[7];
result[ClientProtocol.GET_STATS_CAPACITY_IDX] = res.getCapacity();
result[ClientProtocol.GET_STATS_USED_IDX] = res.getUsed();
result[ClientProtocol.GET_STATS_REMAINING_IDX] = res.getRemaining();
result[ClientProtocol.GET_STATS_UNDER_REPLICATED_IDX] = res.getUnderReplicated();
result[ClientProtocol.GET_STATS_CORRUPT_BLOCKS_IDX] = res.getCorruptBlocks();
result[ClientProtocol.GET_STATS_MISSING_BLOCKS_IDX] = res.getMissingBlocks();
result[ClientProtocol.GET_STATS_MISSING_REPL_ONE_BLOCKS_IDX] =
res.getMissingReplOneBlocks();
return result;
}
public static GetFsStatsResponseProto convert(long[] fsStats) {
GetFsStatsResponseProto.Builder result = GetFsStatsResponseProto
.newBuilder();
if (fsStats.length >= ClientProtocol.GET_STATS_CAPACITY_IDX + 1)
result.setCapacity(fsStats[ClientProtocol.GET_STATS_CAPACITY_IDX]);
if (fsStats.length >= ClientProtocol.GET_STATS_USED_IDX + 1)
result.setUsed(fsStats[ClientProtocol.GET_STATS_USED_IDX]);
if (fsStats.length >= ClientProtocol.GET_STATS_REMAINING_IDX + 1)
result.setRemaining(fsStats[ClientProtocol.GET_STATS_REMAINING_IDX]);
if (fsStats.length >= ClientProtocol.GET_STATS_UNDER_REPLICATED_IDX + 1)
result.setUnderReplicated(
fsStats[ClientProtocol.GET_STATS_UNDER_REPLICATED_IDX]);
if (fsStats.length >= ClientProtocol.GET_STATS_CORRUPT_BLOCKS_IDX + 1)
result.setCorruptBlocks(
fsStats[ClientProtocol.GET_STATS_CORRUPT_BLOCKS_IDX]);
if (fsStats.length >= ClientProtocol.GET_STATS_MISSING_BLOCKS_IDX + 1)
result.setMissingBlocks(
fsStats[ClientProtocol.GET_STATS_MISSING_BLOCKS_IDX]);
if (fsStats.length >= ClientProtocol.GET_STATS_MISSING_REPL_ONE_BLOCKS_IDX + 1)
result.setMissingReplOneBlocks(
fsStats[ClientProtocol.GET_STATS_MISSING_REPL_ONE_BLOCKS_IDX]);
return result.build();
}
public static DatanodeReportTypeProto
convert(DatanodeReportType t) {
switch (t) {
case ALL: return DatanodeReportTypeProto.ALL;
case LIVE: return DatanodeReportTypeProto.LIVE;
case DEAD: return DatanodeReportTypeProto.DEAD;
case DECOMMISSIONING: return DatanodeReportTypeProto.DECOMMISSIONING;
default:
throw new IllegalArgumentException("Unexpected data type report:" + t);
}
}
public static DatanodeReportType
convert(DatanodeReportTypeProto t) {
switch (t) {
case ALL: return DatanodeReportType.ALL;
case LIVE: return DatanodeReportType.LIVE;
case DEAD: return DatanodeReportType.DEAD;
case DECOMMISSIONING: return DatanodeReportType.DECOMMISSIONING;
default:
throw new IllegalArgumentException("Unexpected data type report:" + t);
}
}
public static SafeModeActionProto convert(
SafeModeAction a) {
switch (a) {
case SAFEMODE_LEAVE:
return SafeModeActionProto.SAFEMODE_LEAVE;
case SAFEMODE_ENTER:
return SafeModeActionProto.SAFEMODE_ENTER;
case SAFEMODE_GET:
return SafeModeActionProto.SAFEMODE_GET;
default:
throw new IllegalArgumentException("Unexpected SafeModeAction :" + a);
}
}
public static SafeModeAction convert(
ClientNamenodeProtocolProtos.SafeModeActionProto a) {
switch (a) {
case SAFEMODE_LEAVE:
return SafeModeAction.SAFEMODE_LEAVE;
case SAFEMODE_ENTER:
return SafeModeAction.SAFEMODE_ENTER;
case SAFEMODE_GET:
return SafeModeAction.SAFEMODE_GET;
default:
throw new IllegalArgumentException("Unexpected SafeModeAction :" + a);
}
}
public static RollingUpgradeActionProto convert(RollingUpgradeAction a) {
switch (a) {
case QUERY:
return RollingUpgradeActionProto.QUERY;
case PREPARE:
return RollingUpgradeActionProto.START;
case FINALIZE:
return RollingUpgradeActionProto.FINALIZE;
default:
throw new IllegalArgumentException("Unexpected value: " + a);
}
}
public static RollingUpgradeAction convert(RollingUpgradeActionProto a) {
switch (a) {
case QUERY:
return RollingUpgradeAction.QUERY;
case START:
return RollingUpgradeAction.PREPARE;
case FINALIZE:
return RollingUpgradeAction.FINALIZE;
default:
throw new IllegalArgumentException("Unexpected value: " + a);
}
}
public static RollingUpgradeStatusProto convertRollingUpgradeStatus(
RollingUpgradeStatus status) {
return RollingUpgradeStatusProto.newBuilder()
.setBlockPoolId(status.getBlockPoolId())
.setFinalized(status.isFinalized())
.build();
}
public static RollingUpgradeStatus convert(RollingUpgradeStatusProto proto) {
return new RollingUpgradeStatus(proto.getBlockPoolId(),
proto.getFinalized());
}
public static RollingUpgradeInfoProto convert(RollingUpgradeInfo info) {
return RollingUpgradeInfoProto.newBuilder()
.setStatus(convertRollingUpgradeStatus(info))
.setCreatedRollbackImages(info.createdRollbackImages())
.setStartTime(info.getStartTime())
.setFinalizeTime(info.getFinalizeTime())
.build();
}
public static RollingUpgradeInfo convert(RollingUpgradeInfoProto proto) {
RollingUpgradeStatusProto status = proto.getStatus();
return new RollingUpgradeInfo(status.getBlockPoolId(),
proto.getCreatedRollbackImages(),
proto.getStartTime(), proto.getFinalizeTime());
}
public static CorruptFileBlocks convert(CorruptFileBlocksProto c) {
if (c == null)
return null;
List<String> fileList = c.getFilesList();
return new CorruptFileBlocks(fileList.toArray(new String[fileList.size()]),
c.getCookie());
}
public static CorruptFileBlocksProto convert(CorruptFileBlocks c) {
if (c == null)
return null;
return CorruptFileBlocksProto.newBuilder().
addAllFiles(Arrays.asList(c.getFiles())).
setCookie(c.getCookie()).
build();
}
public static ContentSummary convert(ContentSummaryProto cs) {
if (cs == null) return null;
ContentSummary.Builder builder = new ContentSummary.Builder();
builder.length(cs.getLength()).
fileCount(cs.getFileCount()).
directoryCount(cs.getDirectoryCount()).
quota(cs.getQuota()).
spaceConsumed(cs.getSpaceConsumed()).
spaceQuota(cs.getSpaceQuota());
if (cs.hasTypeQuotaInfos()) {
for (HdfsProtos.StorageTypeQuotaInfoProto info :
cs.getTypeQuotaInfos().getTypeQuotaInfoList()) {
StorageType type = PBHelper.convertStorageType(info.getType());
builder.typeConsumed(type, info.getConsumed());
builder.typeQuota(type, info.getQuota());
}
}
return builder.build();
}
public static ContentSummaryProto convert(ContentSummary cs) {
if (cs == null) return null;
ContentSummaryProto.Builder builder = ContentSummaryProto.newBuilder();
builder.setLength(cs.getLength()).
setFileCount(cs.getFileCount()).
setDirectoryCount(cs.getDirectoryCount()).
setQuota(cs.getQuota()).
setSpaceConsumed(cs.getSpaceConsumed()).
setSpaceQuota(cs.getSpaceQuota());
if (cs.isTypeQuotaSet() || cs.isTypeConsumedAvailable()) {
HdfsProtos.StorageTypeQuotaInfosProto.Builder isb =
HdfsProtos.StorageTypeQuotaInfosProto.newBuilder();
for (StorageType t: StorageType.getTypesSupportingQuota()) {
HdfsProtos.StorageTypeQuotaInfoProto info =
HdfsProtos.StorageTypeQuotaInfoProto.newBuilder().
setType(convertStorageType(t)).
setConsumed(cs.getTypeConsumed(t)).
setQuota(cs.getTypeQuota(t)).
build();
isb.addTypeQuotaInfo(info);
}
builder.setTypeQuotaInfos(isb);
}
return builder.build();
}
public static NNHAStatusHeartbeat convert(NNHAStatusHeartbeatProto s) {
if (s == null) return null;
switch (s.getState()) {
case ACTIVE:
return new NNHAStatusHeartbeat(HAServiceState.ACTIVE, s.getTxid());
case STANDBY:
return new NNHAStatusHeartbeat(HAServiceState.STANDBY, s.getTxid());
default:
throw new IllegalArgumentException("Unexpected NNHAStatusHeartbeat.State:" + s.getState());
}
}
public static NNHAStatusHeartbeatProto convert(NNHAStatusHeartbeat hb) {
if (hb == null) return null;
NNHAStatusHeartbeatProto.Builder builder =
NNHAStatusHeartbeatProto.newBuilder();
switch (hb.getState()) {
case ACTIVE:
builder.setState(NNHAStatusHeartbeatProto.State.ACTIVE);
break;
case STANDBY:
builder.setState(NNHAStatusHeartbeatProto.State.STANDBY);
break;
default:
throw new IllegalArgumentException("Unexpected NNHAStatusHeartbeat.State:" +
hb.getState());
}
builder.setTxid(hb.getTxId());
return builder.build();
}
public static DatanodeStorageProto convert(DatanodeStorage s) {
return DatanodeStorageProto.newBuilder()
.setState(PBHelper.convertState(s.getState()))
.setStorageType(PBHelper.convertStorageType(s.getStorageType()))
.setStorageUuid(s.getStorageID()).build();
}
private static StorageState convertState(State state) {
switch(state) {
case READ_ONLY_SHARED:
return StorageState.READ_ONLY_SHARED;
case NORMAL:
default:
return StorageState.NORMAL;
}
}
public static List<StorageTypeProto> convertStorageTypes(
StorageType[] types) {
return convertStorageTypes(types, 0);
}
public static List<StorageTypeProto> convertStorageTypes(
StorageType[] types, int startIdx) {
if (types == null) {
return null;
}
final List<StorageTypeProto> protos = new ArrayList<StorageTypeProto>(
types.length);
for (int i = startIdx; i < types.length; ++i) {
protos.add(convertStorageType(types[i]));
}
return protos;
}
public static StorageTypeProto convertStorageType(StorageType type) {
switch(type) {
case DISK:
return StorageTypeProto.DISK;
case SSD:
return StorageTypeProto.SSD;
case ARCHIVE:
return StorageTypeProto.ARCHIVE;
case RAM_DISK:
return StorageTypeProto.RAM_DISK;
default:
throw new IllegalStateException(
"BUG: StorageType not found, type=" + type);
}
}
public static DatanodeStorage convert(DatanodeStorageProto s) {
return new DatanodeStorage(s.getStorageUuid(),
PBHelper.convertState(s.getState()),
PBHelper.convertStorageType(s.getStorageType()));
}
private static State convertState(StorageState state) {
switch(state) {
case READ_ONLY_SHARED:
return DatanodeStorage.State.READ_ONLY_SHARED;
case NORMAL:
default:
return DatanodeStorage.State.NORMAL;
}
}
public static StorageType convertStorageType(StorageTypeProto type) {
switch(type) {
case DISK:
return StorageType.DISK;
case SSD:
return StorageType.SSD;
case ARCHIVE:
return StorageType.ARCHIVE;
case RAM_DISK:
return StorageType.RAM_DISK;
default:
throw new IllegalStateException(
"BUG: StorageTypeProto not found, type=" + type);
}
}
public static StorageType[] convertStorageTypes(
List<StorageTypeProto> storageTypesList, int expectedSize) {
final StorageType[] storageTypes = new StorageType[expectedSize];
if (storageTypesList.size() != expectedSize) { // missing storage types
Preconditions.checkState(storageTypesList.isEmpty());
Arrays.fill(storageTypes, StorageType.DEFAULT);
} else {
for (int i = 0; i < storageTypes.length; ++i) {
storageTypes[i] = convertStorageType(storageTypesList.get(i));
}
}
return storageTypes;
}
public static StorageReportProto convert(StorageReport r) {
StorageReportProto.Builder builder = StorageReportProto.newBuilder()
.setBlockPoolUsed(r.getBlockPoolUsed()).setCapacity(r.getCapacity())
.setDfsUsed(r.getDfsUsed()).setRemaining(r.getRemaining())
.setStorageUuid(r.getStorage().getStorageID())
.setStorage(convert(r.getStorage()));
return builder.build();
}
public static StorageReport convert(StorageReportProto p) {
return new StorageReport(
p.hasStorage() ?
convert(p.getStorage()) :
new DatanodeStorage(p.getStorageUuid()),
p.getFailed(), p.getCapacity(), p.getDfsUsed(), p.getRemaining(),
p.getBlockPoolUsed());
}
public static StorageReport[] convertStorageReports(
List<StorageReportProto> list) {
final StorageReport[] report = new StorageReport[list.size()];
for (int i = 0; i < report.length; i++) {
report[i] = convert(list.get(i));
}
return report;
}
public static List<StorageReportProto> convertStorageReports(StorageReport[] storages) {
final List<StorageReportProto> protos = new ArrayList<StorageReportProto>(
storages.length);
for(int i = 0; i < storages.length; i++) {
protos.add(convert(storages[i]));
}
return protos;
}
public static VolumeFailureSummary convertVolumeFailureSummary(
VolumeFailureSummaryProto proto) {
List<String> failedStorageLocations = proto.getFailedStorageLocationsList();
return new VolumeFailureSummary(
failedStorageLocations.toArray(new String[failedStorageLocations.size()]),
proto.getLastVolumeFailureDate(), proto.getEstimatedCapacityLostTotal());
}
public static VolumeFailureSummaryProto convertVolumeFailureSummary(
VolumeFailureSummary volumeFailureSummary) {
VolumeFailureSummaryProto.Builder builder =
VolumeFailureSummaryProto.newBuilder();
for (String failedStorageLocation:
volumeFailureSummary.getFailedStorageLocations()) {
builder.addFailedStorageLocations(failedStorageLocation);
}
builder.setLastVolumeFailureDate(
volumeFailureSummary.getLastVolumeFailureDate());
builder.setEstimatedCapacityLostTotal(
volumeFailureSummary.getEstimatedCapacityLostTotal());
return builder.build();
}
public static JournalInfo convert(JournalInfoProto info) {
int lv = info.hasLayoutVersion() ? info.getLayoutVersion() : 0;
int nsID = info.hasNamespaceID() ? info.getNamespaceID() : 0;
return new JournalInfo(lv, info.getClusterID(), nsID);
}
/**
* Method used for converting {@link JournalInfoProto} sent from Namenode
* to Journal receivers to {@link NamenodeRegistration}.
*/
public static JournalInfoProto convert(JournalInfo j) {
return JournalInfoProto.newBuilder().setClusterID(j.getClusterId())
.setLayoutVersion(j.getLayoutVersion())
.setNamespaceID(j.getNamespaceId()).build();
}
public static SnapshottableDirectoryStatus[] convert(
SnapshottableDirectoryListingProto sdlp) {
if (sdlp == null)
return null;
List<SnapshottableDirectoryStatusProto> list = sdlp
.getSnapshottableDirListingList();
if (list.isEmpty()) {
return new SnapshottableDirectoryStatus[0];
} else {
SnapshottableDirectoryStatus[] result =
new SnapshottableDirectoryStatus[list.size()];
for (int i = 0; i < list.size(); i++) {
result[i] = PBHelper.convert(list.get(i));
}
return result;
}
}
public static SnapshottableDirectoryListingProto convert(
SnapshottableDirectoryStatus[] status) {
if (status == null)
return null;
SnapshottableDirectoryStatusProto[] protos =
new SnapshottableDirectoryStatusProto[status.length];
for (int i = 0; i < status.length; i++) {
protos[i] = PBHelper.convert(status[i]);
}
List<SnapshottableDirectoryStatusProto> protoList = Arrays.asList(protos);
return SnapshottableDirectoryListingProto.newBuilder()
.addAllSnapshottableDirListing(protoList).build();
}
public static DiffReportEntry convert(SnapshotDiffReportEntryProto entry) {
if (entry == null) {
return null;
}
DiffType type = DiffType.getTypeFromLabel(entry
.getModificationLabel());
return type == null ? null : new DiffReportEntry(type, entry.getFullpath()
.toByteArray(), entry.hasTargetPath() ? entry.getTargetPath()
.toByteArray() : null);
}
public static SnapshotDiffReportEntryProto convert(DiffReportEntry entry) {
if (entry == null) {
return null;
}
ByteString sourcePath = ByteString
.copyFrom(entry.getSourcePath() == null ? DFSUtilClient.EMPTY_BYTES : entry
.getSourcePath());
String modification = entry.getType().getLabel();
SnapshotDiffReportEntryProto.Builder builder = SnapshotDiffReportEntryProto
.newBuilder().setFullpath(sourcePath)
.setModificationLabel(modification);
if (entry.getType() == DiffType.RENAME) {
ByteString targetPath = ByteString
.copyFrom(entry.getTargetPath() == null ? DFSUtilClient.EMPTY_BYTES : entry
.getTargetPath());
builder.setTargetPath(targetPath);
}
return builder.build();
}
public static SnapshotDiffReport convert(SnapshotDiffReportProto reportProto) {
if (reportProto == null) {
return null;
}
String snapshotDir = reportProto.getSnapshotRoot();
String fromSnapshot = reportProto.getFromSnapshot();
String toSnapshot = reportProto.getToSnapshot();
List<SnapshotDiffReportEntryProto> list = reportProto
.getDiffReportEntriesList();
List<DiffReportEntry> entries = new ArrayList<DiffReportEntry>();
for (SnapshotDiffReportEntryProto entryProto : list) {
DiffReportEntry entry = convert(entryProto);
if (entry != null)
entries.add(entry);
}
return new SnapshotDiffReport(snapshotDir, fromSnapshot, toSnapshot,
entries);
}
public static SnapshotDiffReportProto convert(SnapshotDiffReport report) {
if (report == null) {
return null;
}
List<DiffReportEntry> entries = report.getDiffList();
List<SnapshotDiffReportEntryProto> entryProtos =
new ArrayList<SnapshotDiffReportEntryProto>();
for (DiffReportEntry entry : entries) {
SnapshotDiffReportEntryProto entryProto = convert(entry);
if (entryProto != null)
entryProtos.add(entryProto);
}
SnapshotDiffReportProto reportProto = SnapshotDiffReportProto.newBuilder()
.setSnapshotRoot(report.getSnapshotRoot())
.setFromSnapshot(report.getFromSnapshot())
.setToSnapshot(report.getLaterSnapshotName())
.addAllDiffReportEntries(entryProtos).build();
return reportProto;
}
public static DataChecksum.Type convert(HdfsProtos.ChecksumTypeProto type) {
return DataChecksum.Type.valueOf(type.getNumber());
}
public static CacheDirectiveInfoProto convert
(CacheDirectiveInfo info) {
CacheDirectiveInfoProto.Builder builder =
CacheDirectiveInfoProto.newBuilder();
if (info.getId() != null) {
builder.setId(info.getId());
}
if (info.getPath() != null) {
builder.setPath(info.getPath().toUri().getPath());
}
if (info.getReplication() != null) {
builder.setReplication(info.getReplication());
}
if (info.getPool() != null) {
builder.setPool(info.getPool());
}
if (info.getExpiration() != null) {
builder.setExpiration(convert(info.getExpiration()));
}
return builder.build();
}
public static CacheDirectiveInfo convert
(CacheDirectiveInfoProto proto) {
CacheDirectiveInfo.Builder builder =
new CacheDirectiveInfo.Builder();
if (proto.hasId()) {
builder.setId(proto.getId());
}
if (proto.hasPath()) {
builder.setPath(new Path(proto.getPath()));
}
if (proto.hasReplication()) {
builder.setReplication(Shorts.checkedCast(
proto.getReplication()));
}
if (proto.hasPool()) {
builder.setPool(proto.getPool());
}
if (proto.hasExpiration()) {
builder.setExpiration(convert(proto.getExpiration()));
}
return builder.build();
}
public static CacheDirectiveInfoExpirationProto convert(
CacheDirectiveInfo.Expiration expiration) {
return CacheDirectiveInfoExpirationProto.newBuilder()
.setIsRelative(expiration.isRelative())
.setMillis(expiration.getMillis())
.build();
}
public static CacheDirectiveInfo.Expiration convert(
CacheDirectiveInfoExpirationProto proto) {
if (proto.getIsRelative()) {
return CacheDirectiveInfo.Expiration.newRelative(proto.getMillis());
}
return CacheDirectiveInfo.Expiration.newAbsolute(proto.getMillis());
}
public static CacheDirectiveStatsProto convert(CacheDirectiveStats stats) {
CacheDirectiveStatsProto.Builder builder =
CacheDirectiveStatsProto.newBuilder();
builder.setBytesNeeded(stats.getBytesNeeded());
builder.setBytesCached(stats.getBytesCached());
builder.setFilesNeeded(stats.getFilesNeeded());
builder.setFilesCached(stats.getFilesCached());
builder.setHasExpired(stats.hasExpired());
return builder.build();
}
public static CacheDirectiveStats convert(CacheDirectiveStatsProto proto) {
CacheDirectiveStats.Builder builder = new CacheDirectiveStats.Builder();
builder.setBytesNeeded(proto.getBytesNeeded());
builder.setBytesCached(proto.getBytesCached());
builder.setFilesNeeded(proto.getFilesNeeded());
builder.setFilesCached(proto.getFilesCached());
builder.setHasExpired(proto.getHasExpired());
return builder.build();
}
public static CacheDirectiveEntryProto convert(CacheDirectiveEntry entry) {
CacheDirectiveEntryProto.Builder builder =
CacheDirectiveEntryProto.newBuilder();
builder.setInfo(PBHelper.convert(entry.getInfo()));
builder.setStats(PBHelper.convert(entry.getStats()));
return builder.build();
}
public static CacheDirectiveEntry convert(CacheDirectiveEntryProto proto) {
CacheDirectiveInfo info = PBHelper.convert(proto.getInfo());
CacheDirectiveStats stats = PBHelper.convert(proto.getStats());
return new CacheDirectiveEntry(info, stats);
}
public static CachePoolInfoProto convert(CachePoolInfo info) {
CachePoolInfoProto.Builder builder = CachePoolInfoProto.newBuilder();
builder.setPoolName(info.getPoolName());
if (info.getOwnerName() != null) {
builder.setOwnerName(info.getOwnerName());
}
if (info.getGroupName() != null) {
builder.setGroupName(info.getGroupName());
}
if (info.getMode() != null) {
builder.setMode(info.getMode().toShort());
}
if (info.getLimit() != null) {
builder.setLimit(info.getLimit());
}
if (info.getMaxRelativeExpiryMs() != null) {
builder.setMaxRelativeExpiry(info.getMaxRelativeExpiryMs());
}
return builder.build();
}
public static CachePoolInfo convert (CachePoolInfoProto proto) {
// Pool name is a required field, the rest are optional
String poolName = checkNotNull(proto.getPoolName());
CachePoolInfo info = new CachePoolInfo(poolName);
if (proto.hasOwnerName()) {
info.setOwnerName(proto.getOwnerName());
}
if (proto.hasGroupName()) {
info.setGroupName(proto.getGroupName());
}
if (proto.hasMode()) {
info.setMode(new FsPermission((short)proto.getMode()));
}
if (proto.hasLimit()) {
info.setLimit(proto.getLimit());
}
if (proto.hasMaxRelativeExpiry()) {
info.setMaxRelativeExpiryMs(proto.getMaxRelativeExpiry());
}
return info;
}
public static CachePoolStatsProto convert(CachePoolStats stats) {
CachePoolStatsProto.Builder builder = CachePoolStatsProto.newBuilder();
builder.setBytesNeeded(stats.getBytesNeeded());
builder.setBytesCached(stats.getBytesCached());
builder.setBytesOverlimit(stats.getBytesOverlimit());
builder.setFilesNeeded(stats.getFilesNeeded());
builder.setFilesCached(stats.getFilesCached());
return builder.build();
}
public static CachePoolStats convert (CachePoolStatsProto proto) {
CachePoolStats.Builder builder = new CachePoolStats.Builder();
builder.setBytesNeeded(proto.getBytesNeeded());
builder.setBytesCached(proto.getBytesCached());
builder.setBytesOverlimit(proto.getBytesOverlimit());
builder.setFilesNeeded(proto.getFilesNeeded());
builder.setFilesCached(proto.getFilesCached());
return builder.build();
}
public static CachePoolEntryProto convert(CachePoolEntry entry) {
CachePoolEntryProto.Builder builder = CachePoolEntryProto.newBuilder();
builder.setInfo(PBHelper.convert(entry.getInfo()));
builder.setStats(PBHelper.convert(entry.getStats()));
return builder.build();
}
public static CachePoolEntry convert (CachePoolEntryProto proto) {
CachePoolInfo info = PBHelper.convert(proto.getInfo());
CachePoolStats stats = PBHelper.convert(proto.getStats());
return new CachePoolEntry(info, stats);
}
public static HdfsProtos.ChecksumTypeProto convert(DataChecksum.Type type) {
return HdfsProtos.ChecksumTypeProto.valueOf(type.id);
}
public static DatanodeLocalInfoProto convert(DatanodeLocalInfo info) {
DatanodeLocalInfoProto.Builder builder = DatanodeLocalInfoProto.newBuilder();
builder.setSoftwareVersion(info.getSoftwareVersion());
builder.setConfigVersion(info.getConfigVersion());
builder.setUptime(info.getUptime());
return builder.build();
}
public static DatanodeLocalInfo convert(DatanodeLocalInfoProto proto) {
return new DatanodeLocalInfo(proto.getSoftwareVersion(),
proto.getConfigVersion(), proto.getUptime());
}
public static InputStream vintPrefixed(final InputStream input)
throws IOException {
final int firstByte = input.read();
if (firstByte == -1) {
throw new EOFException("Premature EOF: no length prefix available");
}
int size = CodedInputStream.readRawVarint32(firstByte, input);
assert size >= 0;
return new ExactSizeInputStream(input, size);
}
private static AclEntryScopeProto convert(AclEntryScope v) {
return AclEntryScopeProto.valueOf(v.ordinal());
}
private static AclEntryScope convert(AclEntryScopeProto v) {
return castEnum(v, ACL_ENTRY_SCOPE_VALUES);
}
private static AclEntryTypeProto convert(AclEntryType e) {
return AclEntryTypeProto.valueOf(e.ordinal());
}
private static AclEntryType convert(AclEntryTypeProto v) {
return castEnum(v, ACL_ENTRY_TYPE_VALUES);
}
private static XAttrNamespaceProto convert(XAttr.NameSpace v) {
return XAttrNamespaceProto.valueOf(v.ordinal());
}
private static XAttr.NameSpace convert(XAttrNamespaceProto v) {
return castEnum(v, XATTR_NAMESPACE_VALUES);
}
public static FsActionProto convert(FsAction v) {
return FsActionProto.valueOf(v != null ? v.ordinal() : 0);
}
public static FsAction convert(FsActionProto v) {
return castEnum(v, FSACTION_VALUES);
}
public static List<AclEntryProto> convertAclEntryProto(
List<AclEntry> aclSpec) {
ArrayList<AclEntryProto> r = Lists.newArrayListWithCapacity(aclSpec.size());
for (AclEntry e : aclSpec) {
AclEntryProto.Builder builder = AclEntryProto.newBuilder();
builder.setType(convert(e.getType()));
builder.setScope(convert(e.getScope()));
builder.setPermissions(convert(e.getPermission()));
if (e.getName() != null) {
builder.setName(e.getName());
}
r.add(builder.build());
}
return r;
}
public static List<AclEntry> convertAclEntry(List<AclEntryProto> aclSpec) {
ArrayList<AclEntry> r = Lists.newArrayListWithCapacity(aclSpec.size());
for (AclEntryProto e : aclSpec) {
AclEntry.Builder builder = new AclEntry.Builder();
builder.setType(convert(e.getType()));
builder.setScope(convert(e.getScope()));
builder.setPermission(convert(e.getPermissions()));
if (e.hasName()) {
builder.setName(e.getName());
}
r.add(builder.build());
}
return r;
}
public static AclStatus convert(GetAclStatusResponseProto e) {
AclStatusProto r = e.getResult();
AclStatus.Builder builder = new AclStatus.Builder();
builder.owner(r.getOwner()).group(r.getGroup()).stickyBit(r.getSticky())
.addEntries(convertAclEntry(r.getEntriesList()));
if (r.hasPermission()) {
builder.setPermission(convert(r.getPermission()));
}
return builder.build();
}
public static GetAclStatusResponseProto convert(AclStatus e) {
AclStatusProto.Builder builder = AclStatusProto.newBuilder();
builder.setOwner(e.getOwner())
.setGroup(e.getGroup()).setSticky(e.isStickyBit())
.addAllEntries(convertAclEntryProto(e.getEntries()));
if (e.getPermission() != null) {
builder.setPermission(convert(e.getPermission()));
}
AclStatusProto r = builder.build();
return GetAclStatusResponseProto.newBuilder().setResult(r).build();
}
public static XAttrProto convertXAttrProto(XAttr a) {
XAttrProto.Builder builder = XAttrProto.newBuilder();
builder.setNamespace(convert(a.getNameSpace()));
if (a.getName() != null) {
builder.setName(a.getName());
}
if (a.getValue() != null) {
builder.setValue(getByteString(a.getValue()));
}
return builder.build();
}
public static List<XAttrProto> convertXAttrProto(
List<XAttr> xAttrSpec) {
if (xAttrSpec == null) {
return Lists.newArrayListWithCapacity(0);
}
ArrayList<XAttrProto> xAttrs = Lists.newArrayListWithCapacity(
xAttrSpec.size());
for (XAttr a : xAttrSpec) {
XAttrProto.Builder builder = XAttrProto.newBuilder();
builder.setNamespace(convert(a.getNameSpace()));
if (a.getName() != null) {
builder.setName(a.getName());
}
if (a.getValue() != null) {
builder.setValue(getByteString(a.getValue()));
}
xAttrs.add(builder.build());
}
return xAttrs;
}
/**
* The flag field in PB is a bitmask whose values are the same a the
* emum values of XAttrSetFlag
*/
public static int convert(EnumSet<XAttrSetFlag> flag) {
int value = 0;
if (flag.contains(XAttrSetFlag.CREATE)) {
value |= XAttrSetFlagProto.XATTR_CREATE.getNumber();
}
if (flag.contains(XAttrSetFlag.REPLACE)) {
value |= XAttrSetFlagProto.XATTR_REPLACE.getNumber();
}
return value;
}
public static EnumSet<XAttrSetFlag> convert(int flag) {
EnumSet<XAttrSetFlag> result =
EnumSet.noneOf(XAttrSetFlag.class);
if ((flag & XAttrSetFlagProto.XATTR_CREATE_VALUE) ==
XAttrSetFlagProto.XATTR_CREATE_VALUE) {
result.add(XAttrSetFlag.CREATE);
}
if ((flag & XAttrSetFlagProto.XATTR_REPLACE_VALUE) ==
XAttrSetFlagProto.XATTR_REPLACE_VALUE) {
result.add(XAttrSetFlag.REPLACE);
}
return result;
}
public static XAttr convertXAttr(XAttrProto a) {
XAttr.Builder builder = new XAttr.Builder();
builder.setNameSpace(convert(a.getNamespace()));
if (a.hasName()) {
builder.setName(a.getName());
}
if (a.hasValue()) {
builder.setValue(a.getValue().toByteArray());
}
return builder.build();
}
public static List<XAttr> convertXAttrs(List<XAttrProto> xAttrSpec) {
ArrayList<XAttr> xAttrs = Lists.newArrayListWithCapacity(xAttrSpec.size());
for (XAttrProto a : xAttrSpec) {
XAttr.Builder builder = new XAttr.Builder();
builder.setNameSpace(convert(a.getNamespace()));
if (a.hasName()) {
builder.setName(a.getName());
}
if (a.hasValue()) {
builder.setValue(a.getValue().toByteArray());
}
xAttrs.add(builder.build());
}
return xAttrs;
}
public static List<XAttr> convert(GetXAttrsResponseProto a) {
List<XAttrProto> xAttrs = a.getXAttrsList();
return convertXAttrs(xAttrs);
}
public static GetXAttrsResponseProto convertXAttrsResponse(
List<XAttr> xAttrs) {
GetXAttrsResponseProto.Builder builder = GetXAttrsResponseProto
.newBuilder();
if (xAttrs != null) {
builder.addAllXAttrs(convertXAttrProto(xAttrs));
}
return builder.build();
}
public static List<XAttr> convert(ListXAttrsResponseProto a) {
final List<XAttrProto> xAttrs = a.getXAttrsList();
return convertXAttrs(xAttrs);
}
public static ListXAttrsResponseProto convertListXAttrsResponse(
List<XAttr> names) {
ListXAttrsResponseProto.Builder builder =
ListXAttrsResponseProto.newBuilder();
if (names != null) {
builder.addAllXAttrs(convertXAttrProto(names));
}
return builder.build();
}
public static EncryptionZoneProto convert(EncryptionZone zone) {
return EncryptionZoneProto.newBuilder()
.setId(zone.getId())
.setPath(zone.getPath())
.setSuite(convert(zone.getSuite()))
.setCryptoProtocolVersion(convert(zone.getVersion()))
.setKeyName(zone.getKeyName())
.build();
}
public static EncryptionZone convert(EncryptionZoneProto proto) {
return new EncryptionZone(proto.getId(), proto.getPath(),
convert(proto.getSuite()), convert(proto.getCryptoProtocolVersion()),
proto.getKeyName());
}
public static ShortCircuitShmSlotProto convert(SlotId slotId) {
return ShortCircuitShmSlotProto.newBuilder().
setShmId(convert(slotId.getShmId())).
setSlotIdx(slotId.getSlotIdx()).
build();
}
public static ShortCircuitShmIdProto convert(ShmId shmId) {
return ShortCircuitShmIdProto.newBuilder().
setHi(shmId.getHi()).
setLo(shmId.getLo()).
build();
}
public static SlotId convert(ShortCircuitShmSlotProto slotId) {
return new SlotId(PBHelper.convert(slotId.getShmId()),
slotId.getSlotIdx());
}
public static ShmId convert(ShortCircuitShmIdProto shmId) {
return new ShmId(shmId.getHi(), shmId.getLo());
}
private static Event.CreateEvent.INodeType createTypeConvert(InotifyProtos.INodeType
type) {
switch (type) {
case I_TYPE_DIRECTORY:
return Event.CreateEvent.INodeType.DIRECTORY;
case I_TYPE_FILE:
return Event.CreateEvent.INodeType.FILE;
case I_TYPE_SYMLINK:
return Event.CreateEvent.INodeType.SYMLINK;
default:
return null;
}
}
private static InotifyProtos.MetadataUpdateType metadataUpdateTypeConvert(
Event.MetadataUpdateEvent.MetadataType type) {
switch (type) {
case TIMES:
return InotifyProtos.MetadataUpdateType.META_TYPE_TIMES;
case REPLICATION:
return InotifyProtos.MetadataUpdateType.META_TYPE_REPLICATION;
case OWNER:
return InotifyProtos.MetadataUpdateType.META_TYPE_OWNER;
case PERMS:
return InotifyProtos.MetadataUpdateType.META_TYPE_PERMS;
case ACLS:
return InotifyProtos.MetadataUpdateType.META_TYPE_ACLS;
case XATTRS:
return InotifyProtos.MetadataUpdateType.META_TYPE_XATTRS;
default:
return null;
}
}
private static Event.MetadataUpdateEvent.MetadataType metadataUpdateTypeConvert(
InotifyProtos.MetadataUpdateType type) {
switch (type) {
case META_TYPE_TIMES:
return Event.MetadataUpdateEvent.MetadataType.TIMES;
case META_TYPE_REPLICATION:
return Event.MetadataUpdateEvent.MetadataType.REPLICATION;
case META_TYPE_OWNER:
return Event.MetadataUpdateEvent.MetadataType.OWNER;
case META_TYPE_PERMS:
return Event.MetadataUpdateEvent.MetadataType.PERMS;
case META_TYPE_ACLS:
return Event.MetadataUpdateEvent.MetadataType.ACLS;
case META_TYPE_XATTRS:
return Event.MetadataUpdateEvent.MetadataType.XATTRS;
default:
return null;
}
}
private static InotifyProtos.INodeType createTypeConvert(Event.CreateEvent.INodeType
type) {
switch (type) {
case DIRECTORY:
return InotifyProtos.INodeType.I_TYPE_DIRECTORY;
case FILE:
return InotifyProtos.INodeType.I_TYPE_FILE;
case SYMLINK:
return InotifyProtos.INodeType.I_TYPE_SYMLINK;
default:
return null;
}
}
public static EventBatchList convert(GetEditsFromTxidResponseProto resp) throws
IOException {
final InotifyProtos.EventsListProto list = resp.getEventsList();
final long firstTxid = list.getFirstTxid();
final long lastTxid = list.getLastTxid();
List<EventBatch> batches = Lists.newArrayList();
if (list.getEventsList().size() > 0) {
throw new IOException("Can't handle old inotify server response.");
}
for (InotifyProtos.EventBatchProto bp : list.getBatchList()) {
long txid = bp.getTxid();
if ((txid != -1) && ((txid < firstTxid) || (txid > lastTxid))) {
throw new IOException("Error converting TxidResponseProto: got a " +
"transaction id " + txid + " that was outside the range of [" +
firstTxid + ", " + lastTxid + "].");
}
List<Event> events = Lists.newArrayList();
for (InotifyProtos.EventProto p : bp.getEventsList()) {
switch (p.getType()) {
case EVENT_CLOSE:
InotifyProtos.CloseEventProto close =
InotifyProtos.CloseEventProto.parseFrom(p.getContents());
events.add(new Event.CloseEvent(close.getPath(),
close.getFileSize(), close.getTimestamp()));
break;
case EVENT_CREATE:
InotifyProtos.CreateEventProto create =
InotifyProtos.CreateEventProto.parseFrom(p.getContents());
events.add(new Event.CreateEvent.Builder()
.iNodeType(createTypeConvert(create.getType()))
.path(create.getPath())
.ctime(create.getCtime())
.ownerName(create.getOwnerName())
.groupName(create.getGroupName())
.perms(convert(create.getPerms()))
.replication(create.getReplication())
.symlinkTarget(create.getSymlinkTarget().isEmpty() ? null :
create.getSymlinkTarget())
.defaultBlockSize(create.getDefaultBlockSize())
.overwrite(create.getOverwrite()).build());
break;
case EVENT_METADATA:
InotifyProtos.MetadataUpdateEventProto meta =
InotifyProtos.MetadataUpdateEventProto.parseFrom(p.getContents());
events.add(new Event.MetadataUpdateEvent.Builder()
.path(meta.getPath())
.metadataType(metadataUpdateTypeConvert(meta.getType()))
.mtime(meta.getMtime())
.atime(meta.getAtime())
.replication(meta.getReplication())
.ownerName(
meta.getOwnerName().isEmpty() ? null : meta.getOwnerName())
.groupName(
meta.getGroupName().isEmpty() ? null : meta.getGroupName())
.perms(meta.hasPerms() ? convert(meta.getPerms()) : null)
.acls(meta.getAclsList().isEmpty() ? null : convertAclEntry(
meta.getAclsList()))
.xAttrs(meta.getXAttrsList().isEmpty() ? null : convertXAttrs(
meta.getXAttrsList()))
.xAttrsRemoved(meta.getXAttrsRemoved())
.build());
break;
case EVENT_RENAME:
InotifyProtos.RenameEventProto rename =
InotifyProtos.RenameEventProto.parseFrom(p.getContents());
events.add(new Event.RenameEvent.Builder()
.srcPath(rename.getSrcPath())
.dstPath(rename.getDestPath())
.timestamp(rename.getTimestamp())
.build());
break;
case EVENT_APPEND:
InotifyProtos.AppendEventProto append =
InotifyProtos.AppendEventProto.parseFrom(p.getContents());
events.add(new Event.AppendEvent.Builder().path(append.getPath())
.newBlock(append.hasNewBlock() && append.getNewBlock())
.build());
break;
case EVENT_UNLINK:
InotifyProtos.UnlinkEventProto unlink =
InotifyProtos.UnlinkEventProto.parseFrom(p.getContents());
events.add(new Event.UnlinkEvent.Builder()
.path(unlink.getPath())
.timestamp(unlink.getTimestamp())
.build());
break;
case EVENT_TRUNCATE:
InotifyProtos.TruncateEventProto truncate =
InotifyProtos.TruncateEventProto.parseFrom(p.getContents());
events.add(new Event.TruncateEvent(truncate.getPath(),
truncate.getFileSize(), truncate.getTimestamp()));
break;
default:
throw new RuntimeException("Unexpected inotify event type: " +
p.getType());
}
}
batches.add(new EventBatch(txid, events.toArray(new Event[0])));
}
return new EventBatchList(batches, resp.getEventsList().getFirstTxid(),
resp.getEventsList().getLastTxid(), resp.getEventsList().getSyncTxid());
}
public static GetEditsFromTxidResponseProto convertEditsResponse(EventBatchList el) {
InotifyProtos.EventsListProto.Builder builder =
InotifyProtos.EventsListProto.newBuilder();
for (EventBatch b : el.getBatches()) {
List<InotifyProtos.EventProto> events = Lists.newArrayList();
for (Event e : b.getEvents()) {
switch (e.getEventType()) {
case CLOSE:
Event.CloseEvent ce = (Event.CloseEvent) e;
events.add(InotifyProtos.EventProto.newBuilder()
.setType(InotifyProtos.EventType.EVENT_CLOSE)
.setContents(
InotifyProtos.CloseEventProto.newBuilder()
.setPath(ce.getPath())
.setFileSize(ce.getFileSize())
.setTimestamp(ce.getTimestamp()).build().toByteString()
).build());
break;
case CREATE:
Event.CreateEvent ce2 = (Event.CreateEvent) e;
events.add(InotifyProtos.EventProto.newBuilder()
.setType(InotifyProtos.EventType.EVENT_CREATE)
.setContents(
InotifyProtos.CreateEventProto.newBuilder()
.setType(createTypeConvert(ce2.getiNodeType()))
.setPath(ce2.getPath())
.setCtime(ce2.getCtime())
.setOwnerName(ce2.getOwnerName())
.setGroupName(ce2.getGroupName())
.setPerms(convert(ce2.getPerms()))
.setReplication(ce2.getReplication())
.setSymlinkTarget(ce2.getSymlinkTarget() == null ?
"" : ce2.getSymlinkTarget())
.setDefaultBlockSize(ce2.getDefaultBlockSize())
.setOverwrite(ce2.getOverwrite()).build().toByteString()
).build());
break;
case METADATA:
Event.MetadataUpdateEvent me = (Event.MetadataUpdateEvent) e;
InotifyProtos.MetadataUpdateEventProto.Builder metaB =
InotifyProtos.MetadataUpdateEventProto.newBuilder()
.setPath(me.getPath())
.setType(metadataUpdateTypeConvert(me.getMetadataType()))
.setMtime(me.getMtime())
.setAtime(me.getAtime())
.setReplication(me.getReplication())
.setOwnerName(me.getOwnerName() == null ? "" :
me.getOwnerName())
.setGroupName(me.getGroupName() == null ? "" :
me.getGroupName())
.addAllAcls(me.getAcls() == null ?
Lists.<AclEntryProto>newArrayList() :
convertAclEntryProto(me.getAcls()))
.addAllXAttrs(me.getxAttrs() == null ?
Lists.<XAttrProto>newArrayList() :
convertXAttrProto(me.getxAttrs()))
.setXAttrsRemoved(me.isxAttrsRemoved());
if (me.getPerms() != null) {
metaB.setPerms(convert(me.getPerms()));
}
events.add(InotifyProtos.EventProto.newBuilder()
.setType(InotifyProtos.EventType.EVENT_METADATA)
.setContents(metaB.build().toByteString())
.build());
break;
case RENAME:
Event.RenameEvent re = (Event.RenameEvent) e;
events.add(InotifyProtos.EventProto.newBuilder()
.setType(InotifyProtos.EventType.EVENT_RENAME)
.setContents(
InotifyProtos.RenameEventProto.newBuilder()
.setSrcPath(re.getSrcPath())
.setDestPath(re.getDstPath())
.setTimestamp(re.getTimestamp()).build().toByteString()
).build());
break;
case APPEND:
Event.AppendEvent re2 = (Event.AppendEvent) e;
events.add(InotifyProtos.EventProto.newBuilder()
.setType(InotifyProtos.EventType.EVENT_APPEND)
.setContents(InotifyProtos.AppendEventProto.newBuilder()
.setPath(re2.getPath())
.setNewBlock(re2.toNewBlock()).build().toByteString())
.build());
break;
case UNLINK:
Event.UnlinkEvent ue = (Event.UnlinkEvent) e;
events.add(InotifyProtos.EventProto.newBuilder()
.setType(InotifyProtos.EventType.EVENT_UNLINK)
.setContents(
InotifyProtos.UnlinkEventProto.newBuilder()
.setPath(ue.getPath())
.setTimestamp(ue.getTimestamp()).build().toByteString()
).build());
break;
case TRUNCATE:
Event.TruncateEvent te = (Event.TruncateEvent) e;
events.add(InotifyProtos.EventProto.newBuilder()
.setType(InotifyProtos.EventType.EVENT_TRUNCATE)
.setContents(
InotifyProtos.TruncateEventProto.newBuilder()
.setPath(te.getPath())
.setFileSize(te.getFileSize())
.setTimestamp(te.getTimestamp()).build().toByteString()
).build());
break;
default:
throw new RuntimeException("Unexpected inotify event: " + e);
}
}
builder.addBatch(InotifyProtos.EventBatchProto.newBuilder().
setTxid(b.getTxid()).
addAllEvents(events));
}
builder.setFirstTxid(el.getFirstTxid());
builder.setLastTxid(el.getLastTxid());
builder.setSyncTxid(el.getSyncTxid());
return GetEditsFromTxidResponseProto.newBuilder().setEventsList(
builder.build()).build();
}
public static CipherOptionProto convert(CipherOption option) {
if (option != null) {
CipherOptionProto.Builder builder = CipherOptionProto.
newBuilder();
if (option.getCipherSuite() != null) {
builder.setSuite(convert(option.getCipherSuite()));
}
if (option.getInKey() != null) {
builder.setInKey(ByteString.copyFrom(option.getInKey()));
}
if (option.getInIv() != null) {
builder.setInIv(ByteString.copyFrom(option.getInIv()));
}
if (option.getOutKey() != null) {
builder.setOutKey(ByteString.copyFrom(option.getOutKey()));
}
if (option.getOutIv() != null) {
builder.setOutIv(ByteString.copyFrom(option.getOutIv()));
}
return builder.build();
}
return null;
}
public static CipherOption convert(CipherOptionProto proto) {
if (proto != null) {
CipherSuite suite = null;
if (proto.getSuite() != null) {
suite = convert(proto.getSuite());
}
byte[] inKey = null;
if (proto.getInKey() != null) {
inKey = proto.getInKey().toByteArray();
}
byte[] inIv = null;
if (proto.getInIv() != null) {
inIv = proto.getInIv().toByteArray();
}
byte[] outKey = null;
if (proto.getOutKey() != null) {
outKey = proto.getOutKey().toByteArray();
}
byte[] outIv = null;
if (proto.getOutIv() != null) {
outIv = proto.getOutIv().toByteArray();
}
return new CipherOption(suite, inKey, inIv, outKey, outIv);
}
return null;
}
public static List<CipherOptionProto> convertCipherOptions(
List<CipherOption> options) {
if (options != null) {
List<CipherOptionProto> protos =
Lists.newArrayListWithCapacity(options.size());
for (CipherOption option : options) {
protos.add(convert(option));
}
return protos;
}
return null;
}
public static List<CipherOption> convertCipherOptionProtos(
List<CipherOptionProto> protos) {
if (protos != null) {
List<CipherOption> options =
Lists.newArrayListWithCapacity(protos.size());
for (CipherOptionProto proto : protos) {
options.add(convert(proto));
}
return options;
}
return null;
}
public static CipherSuiteProto convert(CipherSuite suite) {
switch (suite) {
case UNKNOWN:
return CipherSuiteProto.UNKNOWN;
case AES_CTR_NOPADDING:
return CipherSuiteProto.AES_CTR_NOPADDING;
default:
return null;
}
}
public static CipherSuite convert(CipherSuiteProto proto) {
switch (proto) {
case AES_CTR_NOPADDING:
return CipherSuite.AES_CTR_NOPADDING;
default:
// Set to UNKNOWN and stash the unknown enum value
CipherSuite suite = CipherSuite.UNKNOWN;
suite.setUnknownValue(proto.getNumber());
return suite;
}
}
public static List<CryptoProtocolVersionProto> convert(
CryptoProtocolVersion[] versions) {
List<CryptoProtocolVersionProto> protos =
Lists.newArrayListWithCapacity(versions.length);
for (CryptoProtocolVersion v: versions) {
protos.add(convert(v));
}
return protos;
}
public static CryptoProtocolVersion[] convertCryptoProtocolVersions(
List<CryptoProtocolVersionProto> protos) {
List<CryptoProtocolVersion> versions =
Lists.newArrayListWithCapacity(protos.size());
for (CryptoProtocolVersionProto p: protos) {
versions.add(convert(p));
}
return versions.toArray(new CryptoProtocolVersion[] {});
}
public static CryptoProtocolVersion convert(CryptoProtocolVersionProto
proto) {
switch(proto) {
case ENCRYPTION_ZONES:
return CryptoProtocolVersion.ENCRYPTION_ZONES;
default:
// Set to UNKNOWN and stash the unknown enum value
CryptoProtocolVersion version = CryptoProtocolVersion.UNKNOWN;
version.setUnknownValue(proto.getNumber());
return version;
}
}
public static CryptoProtocolVersionProto convert(CryptoProtocolVersion
version) {
switch(version) {
case UNKNOWN:
return CryptoProtocolVersionProto.UNKNOWN_PROTOCOL_VERSION;
case ENCRYPTION_ZONES:
return CryptoProtocolVersionProto.ENCRYPTION_ZONES;
default:
return null;
}
}
public static HdfsProtos.FileEncryptionInfoProto convert(
FileEncryptionInfo info) {
if (info == null) {
return null;
}
return HdfsProtos.FileEncryptionInfoProto.newBuilder()
.setSuite(convert(info.getCipherSuite()))
.setCryptoProtocolVersion(convert(info.getCryptoProtocolVersion()))
.setKey(getByteString(info.getEncryptedDataEncryptionKey()))
.setIv(getByteString(info.getIV()))
.setEzKeyVersionName(info.getEzKeyVersionName())
.setKeyName(info.getKeyName())
.build();
}
public static HdfsProtos.PerFileEncryptionInfoProto convertPerFileEncInfo(
FileEncryptionInfo info) {
if (info == null) {
return null;
}
return HdfsProtos.PerFileEncryptionInfoProto.newBuilder()
.setKey(getByteString(info.getEncryptedDataEncryptionKey()))
.setIv(getByteString(info.getIV()))
.setEzKeyVersionName(info.getEzKeyVersionName())
.build();
}
public static HdfsProtos.ZoneEncryptionInfoProto convert(
CipherSuite suite, CryptoProtocolVersion version, String keyName) {
if (suite == null || version == null || keyName == null) {
return null;
}
return HdfsProtos.ZoneEncryptionInfoProto.newBuilder()
.setSuite(convert(suite))
.setCryptoProtocolVersion(convert(version))
.setKeyName(keyName)
.build();
}
public static FileEncryptionInfo convert(
HdfsProtos.FileEncryptionInfoProto proto) {
if (proto == null) {
return null;
}
CipherSuite suite = convert(proto.getSuite());
CryptoProtocolVersion version = convert(proto.getCryptoProtocolVersion());
byte[] key = proto.getKey().toByteArray();
byte[] iv = proto.getIv().toByteArray();
String ezKeyVersionName = proto.getEzKeyVersionName();
String keyName = proto.getKeyName();
return new FileEncryptionInfo(suite, version, key, iv, keyName,
ezKeyVersionName);
}
public static FileEncryptionInfo convert(
HdfsProtos.PerFileEncryptionInfoProto fileProto,
CipherSuite suite, CryptoProtocolVersion version, String keyName) {
if (fileProto == null || suite == null || version == null ||
keyName == null) {
return null;
}
byte[] key = fileProto.getKey().toByteArray();
byte[] iv = fileProto.getIv().toByteArray();
String ezKeyVersionName = fileProto.getEzKeyVersionName();
return new FileEncryptionInfo(suite, version, key, iv, keyName,
ezKeyVersionName);
}
public static List<Boolean> convert(boolean[] targetPinnings, int idx) {
List<Boolean> pinnings = new ArrayList<Boolean>();
if (targetPinnings == null) {
pinnings.add(Boolean.FALSE);
} else {
for (; idx < targetPinnings.length; ++idx) {
pinnings.add(Boolean.valueOf(targetPinnings[idx]));
}
}
return pinnings;
}
public static boolean[] convertBooleanList(
List<Boolean> targetPinningsList) {
final boolean[] targetPinnings = new boolean[targetPinningsList.size()];
for (int i = 0; i < targetPinningsList.size(); i++) {
targetPinnings[i] = targetPinningsList.get(i);
}
return targetPinnings;
}
public static BlockReportContext convert(BlockReportContextProto proto) {
return new BlockReportContext(proto.getTotalRpcs(),
proto.getCurRpc(), proto.getId(), proto.getLeaseId());
}
public static BlockReportContextProto convert(BlockReportContext context) {
return BlockReportContextProto.newBuilder().
setTotalRpcs(context.getTotalRpcs()).
setCurRpc(context.getCurRpc()).
setId(context.getReportId()).
setLeaseId(context.getLeaseId()).
build();
}
}
| 120,300 | 38.122276 | 108 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolPB.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocolPB;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeProtocolService;
import org.apache.hadoop.ipc.ProtocolInfo;
import org.apache.hadoop.security.KerberosInfo;
@KerberosInfo(
serverPrincipal = DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY,
clientPrincipal = DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY)
@ProtocolInfo(
protocolName = "org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol",
protocolVersion = 1)
@InterfaceAudience.Private
public interface DatanodeProtocolPB extends
DatanodeProtocolService.BlockingInterface {
}
| 1,543 | 40.72973 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocolPB;
import java.io.IOException;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos;
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsUpgradeFinalizedResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RegisterResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.RollEditLogResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.StartCheckpointResponseProto;
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
/**
* Implementation for protobuf service that forwards requests
* received on {@link NamenodeProtocolPB} to the
* {@link NamenodeProtocol} server implementation.
*/
public class NamenodeProtocolServerSideTranslatorPB implements
NamenodeProtocolPB {
private final NamenodeProtocol impl;
private final static ErrorReportResponseProto VOID_ERROR_REPORT_RESPONSE =
ErrorReportResponseProto.newBuilder().build();
private final static EndCheckpointResponseProto VOID_END_CHECKPOINT_RESPONSE =
EndCheckpointResponseProto.newBuilder().build();
public NamenodeProtocolServerSideTranslatorPB(NamenodeProtocol impl) {
this.impl = impl;
}
@Override
public GetBlocksResponseProto getBlocks(RpcController unused,
GetBlocksRequestProto request) throws ServiceException {
DatanodeInfo dnInfo = new DatanodeInfo(PBHelper.convert(request
.getDatanode()));
BlocksWithLocations blocks;
try {
blocks = impl.getBlocks(dnInfo, request.getSize());
} catch (IOException e) {
throw new ServiceException(e);
}
return GetBlocksResponseProto.newBuilder()
.setBlocks(PBHelper.convert(blocks)).build();
}
@Override
public GetBlockKeysResponseProto getBlockKeys(RpcController unused,
GetBlockKeysRequestProto request) throws ServiceException {
ExportedBlockKeys keys;
try {
keys = impl.getBlockKeys();
} catch (IOException e) {
throw new ServiceException(e);
}
GetBlockKeysResponseProto.Builder builder =
GetBlockKeysResponseProto.newBuilder();
if (keys != null) {
builder.setKeys(PBHelper.convert(keys));
}
return builder.build();
}
@Override
public GetTransactionIdResponseProto getTransactionId(RpcController unused,
GetTransactionIdRequestProto request) throws ServiceException {
long txid;
try {
txid = impl.getTransactionID();
} catch (IOException e) {
throw new ServiceException(e);
}
return GetTransactionIdResponseProto.newBuilder().setTxId(txid).build();
}
@Override
public GetMostRecentCheckpointTxIdResponseProto getMostRecentCheckpointTxId(
RpcController unused, GetMostRecentCheckpointTxIdRequestProto request)
throws ServiceException {
long txid;
try {
txid = impl.getMostRecentCheckpointTxId();
} catch (IOException e) {
throw new ServiceException(e);
}
return GetMostRecentCheckpointTxIdResponseProto.newBuilder().setTxId(txid).build();
}
@Override
public RollEditLogResponseProto rollEditLog(RpcController unused,
RollEditLogRequestProto request) throws ServiceException {
CheckpointSignature signature;
try {
signature = impl.rollEditLog();
} catch (IOException e) {
throw new ServiceException(e);
}
return RollEditLogResponseProto.newBuilder()
.setSignature(PBHelper.convert(signature)).build();
}
@Override
public ErrorReportResponseProto errorReport(RpcController unused,
ErrorReportRequestProto request) throws ServiceException {
try {
impl.errorReport(PBHelper.convert(request.getRegistration()),
request.getErrorCode(), request.getMsg());
} catch (IOException e) {
throw new ServiceException(e);
}
return VOID_ERROR_REPORT_RESPONSE;
}
@Override
public RegisterResponseProto registerSubordinateNamenode(
RpcController unused, RegisterRequestProto request)
throws ServiceException {
NamenodeRegistration reg;
try {
reg = impl.registerSubordinateNamenode(
PBHelper.convert(request.getRegistration()));
} catch (IOException e) {
throw new ServiceException(e);
}
return RegisterResponseProto.newBuilder()
.setRegistration(PBHelper.convert(reg)).build();
}
@Override
public StartCheckpointResponseProto startCheckpoint(RpcController unused,
StartCheckpointRequestProto request) throws ServiceException {
NamenodeCommand cmd;
try {
cmd = impl.startCheckpoint(PBHelper.convert(request.getRegistration()));
} catch (IOException e) {
throw new ServiceException(e);
}
return StartCheckpointResponseProto.newBuilder()
.setCommand(PBHelper.convert(cmd)).build();
}
@Override
public EndCheckpointResponseProto endCheckpoint(RpcController unused,
EndCheckpointRequestProto request) throws ServiceException {
try {
impl.endCheckpoint(PBHelper.convert(request.getRegistration()),
PBHelper.convert(request.getSignature()));
} catch (IOException e) {
throw new ServiceException(e);
}
return VOID_END_CHECKPOINT_RESPONSE;
}
@Override
public GetEditLogManifestResponseProto getEditLogManifest(
RpcController unused, GetEditLogManifestRequestProto request)
throws ServiceException {
RemoteEditLogManifest manifest;
try {
manifest = impl.getEditLogManifest(request.getSinceTxId());
} catch (IOException e) {
throw new ServiceException(e);
}
return GetEditLogManifestResponseProto.newBuilder()
.setManifest(PBHelper.convert(manifest)).build();
}
@Override
public VersionResponseProto versionRequest(RpcController controller,
VersionRequestProto request) throws ServiceException {
NamespaceInfo info;
try {
info = impl.versionRequest();
} catch (IOException e) {
throw new ServiceException(e);
}
return VersionResponseProto.newBuilder()
.setInfo(PBHelper.convert(info)).build();
}
@Override
public IsUpgradeFinalizedResponseProto isUpgradeFinalized(
RpcController controller, IsUpgradeFinalizedRequestProto request)
throws ServiceException {
boolean isUpgradeFinalized;
try {
isUpgradeFinalized = impl.isUpgradeFinalized();
} catch (IOException e) {
throw new ServiceException(e);
}
return IsUpgradeFinalizedResponseProto.newBuilder()
.setIsUpgradeFinalized(isUpgradeFinalized).build();
}
}
| 9,880 | 39.662551 | 109 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools;
import java.io.PrintStream;
import java.util.Arrays;
import java.util.Collection;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.ha.HAAdmin;
import org.apache.hadoop.ha.HAServiceTarget;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.util.ToolRunner;
/**
* Class to extend HAAdmin to do a little bit of HDFS-specific configuration.
*/
public class DFSHAAdmin extends HAAdmin {
private static final Log LOG = LogFactory.getLog(DFSHAAdmin.class);
private String nameserviceId;
protected void setErrOut(PrintStream errOut) {
this.errOut = errOut;
}
protected void setOut(PrintStream out) {
this.out = out;
}
@Override
public void setConf(Configuration conf) {
if (conf != null) {
conf = addSecurityConfiguration(conf);
}
super.setConf(conf);
}
/**
* Add the requisite security principal settings to the given Configuration,
* returning a copy.
* @param conf the original config
* @return a copy with the security settings added
*/
public static Configuration addSecurityConfiguration(Configuration conf) {
// Make a copy so we don't mutate it. Also use an HdfsConfiguration to
// force loading of hdfs-site.xml.
conf = new HdfsConfiguration(conf);
String nameNodePrincipal = conf.get(
DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, "");
if (LOG.isDebugEnabled()) {
LOG.debug("Using NN principal: " + nameNodePrincipal);
}
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY,
nameNodePrincipal);
return conf;
}
/**
* Try to map the given namenode ID to its service address.
*/
@Override
protected HAServiceTarget resolveTarget(String nnId) {
HdfsConfiguration conf = (HdfsConfiguration)getConf();
return new NNHAServiceTarget(conf, nameserviceId, nnId);
}
@Override
protected String getUsageString() {
return "Usage: haadmin [-ns <nameserviceId>]";
}
@Override
protected int runCmd(String[] argv) throws Exception {
if (argv.length < 1) {
printUsage(errOut);
return -1;
}
int i = 0;
String cmd = argv[i++];
if ("-ns".equals(cmd)) {
if (i == argv.length) {
errOut.println("Missing nameservice ID");
printUsage(errOut);
return -1;
}
nameserviceId = argv[i++];
if (i >= argv.length) {
errOut.println("Missing command");
printUsage(errOut);
return -1;
}
argv = Arrays.copyOfRange(argv, i, argv.length);
}
return super.runCmd(argv);
}
/**
* returns the list of all namenode ids for the given configuration
*/
@Override
protected Collection<String> getTargetIds(String namenodeToActivate) {
return DFSUtilClient.getNameNodeIds(getConf(),
(nameserviceId != null) ? nameserviceId : DFSUtil.getNamenodeNameServiceId(
getConf()));
}
public static void main(String[] argv) throws Exception {
int res = ToolRunner.run(new DFSHAAdmin(), argv);
System.exit(res);
}
}
| 4,255 | 29.618705 | 115 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DebugAdmin.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools;
import java.io.DataInputStream;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.util.LinkedList;
import java.util.List;
import java.util.concurrent.TimeUnit;
import com.google.common.util.concurrent.Uninterruptibles;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Tool;
/**
* This class implements debug operations on the HDFS command-line.
*
* These operations are only for debugging, and may change or disappear
* between HDFS versions.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class DebugAdmin extends Configured implements Tool {
/**
* All the debug commands we can run.
*/
private DebugCommand DEBUG_COMMANDS[] = {
new VerifyBlockChecksumCommand(),
new RecoverLeaseCommand(),
new HelpCommand()
};
/**
* The base class for debug commands.
*/
private abstract class DebugCommand {
final String name;
final String usageText;
final String helpText;
DebugCommand(String name, String usageText, String helpText) {
this.name = name;
this.usageText = usageText;
this.helpText = helpText;
}
abstract int run(List<String> args) throws IOException;
}
private static int HEADER_LEN = 7;
/**
* The command for verifying a block metadata file and possibly block file.
*/
private class VerifyBlockChecksumCommand extends DebugCommand {
VerifyBlockChecksumCommand() {
super("verify",
"verify [-meta <metadata-file>] [-block <block-file>]",
" Verify HDFS metadata and block files. If a block file is specified, we\n" +
" will verify that the checksums in the metadata file match the block\n" +
" file.");
}
int run(List<String> args) throws IOException {
if (args.size() == 0) {
System.out.println(usageText);
System.out.println(helpText + "\n");
return 1;
}
String blockFile = StringUtils.popOptionWithArgument("-block", args);
String metaFile = StringUtils.popOptionWithArgument("-meta", args);
if (metaFile == null) {
System.err.println("You must specify a meta file with -meta");
return 1;
}
FileInputStream metaStream = null, dataStream = null;
FileChannel metaChannel = null, dataChannel = null;
DataInputStream checksumStream = null;
try {
BlockMetadataHeader header;
try {
metaStream = new FileInputStream(metaFile);
checksumStream = new DataInputStream(metaStream);
header = BlockMetadataHeader.readHeader(checksumStream);
metaChannel = metaStream.getChannel();
metaChannel.position(HEADER_LEN);
} catch (RuntimeException e) {
System.err.println("Failed to read HDFS metadata file header for " +
metaFile + ": " + StringUtils.stringifyException(e));
return 1;
} catch (IOException e) {
System.err.println("Failed to read HDFS metadata file header for " +
metaFile + ": " + StringUtils.stringifyException(e));
return 1;
}
DataChecksum checksum = header.getChecksum();
System.out.println("Checksum type: " + checksum.toString());
if (blockFile == null) {
return 0;
}
ByteBuffer metaBuf, dataBuf;
try {
dataStream = new FileInputStream(blockFile);
dataChannel = dataStream.getChannel();
final int CHECKSUMS_PER_BUF = 1024 * 32;
metaBuf = ByteBuffer.allocate(checksum.
getChecksumSize() * CHECKSUMS_PER_BUF);
dataBuf = ByteBuffer.allocate(checksum.
getBytesPerChecksum() * CHECKSUMS_PER_BUF);
} catch (IOException e) {
System.err.println("Failed to open HDFS block file for " +
blockFile + ": " + StringUtils.stringifyException(e));
return 1;
}
long offset = 0;
while (true) {
dataBuf.clear();
int dataRead = -1;
try {
dataRead = dataChannel.read(dataBuf);
if (dataRead < 0) {
break;
}
} catch (IOException e) {
System.err.println("Got I/O error reading block file " +
blockFile + "from disk at offset " + dataChannel.position() +
": " + StringUtils.stringifyException(e));
return 1;
}
try {
int csumToRead =
(((checksum.getBytesPerChecksum() - 1) + dataRead) /
checksum.getBytesPerChecksum()) *
checksum.getChecksumSize();
metaBuf.clear();
metaBuf.limit(csumToRead);
metaChannel.read(metaBuf);
dataBuf.flip();
metaBuf.flip();
} catch (IOException e) {
System.err.println("Got I/O error reading metadata file " +
metaFile + "from disk at offset " + metaChannel.position() +
": " + StringUtils.stringifyException(e));
return 1;
}
try {
checksum.verifyChunkedSums(dataBuf, metaBuf,
blockFile, offset);
} catch (IOException e) {
System.out.println("verifyChunkedSums error: " +
StringUtils.stringifyException(e));
return 1;
}
offset += dataRead;
}
System.out.println("Checksum verification succeeded on block file " +
blockFile);
return 0;
} finally {
IOUtils.cleanup(null, metaStream, dataStream, checksumStream);
}
}
}
/**
* The command for recovering a file lease.
*/
private class RecoverLeaseCommand extends DebugCommand {
RecoverLeaseCommand() {
super("recoverLease",
"recoverLease [-path <path>] [-retries <num-retries>]",
" Recover the lease on the specified path. The path must reside on an\n" +
" HDFS filesystem. The default number of retries is 1.");
}
private static final int TIMEOUT_MS = 5000;
int run(List<String> args) throws IOException {
if (args.size() == 0) {
System.out.println(usageText);
System.out.println(helpText + "\n");
return 1;
}
String pathStr = StringUtils.popOptionWithArgument("-path", args);
String retriesStr = StringUtils.popOptionWithArgument("-retries", args);
if (pathStr == null) {
System.err.println("You must supply a -path argument to " +
"recoverLease.");
return 1;
}
int maxRetries = 1;
if (retriesStr != null) {
try {
maxRetries = Integer.parseInt(retriesStr);
} catch (NumberFormatException e) {
System.err.println("Failed to parse the argument to -retries: " +
StringUtils.stringifyException(e));
return 1;
}
}
FileSystem fs;
try {
fs = FileSystem.newInstance(new URI(pathStr), getConf(), null);
} catch (URISyntaxException e) {
System.err.println("URISyntaxException for " + pathStr + ":" +
StringUtils.stringifyException(e));
return 1;
} catch (InterruptedException e) {
System.err.println("InterruptedException for " + pathStr + ":" +
StringUtils.stringifyException(e));
return 1;
}
DistributedFileSystem dfs = null;
try {
dfs = (DistributedFileSystem) fs;
} catch (ClassCastException e) {
System.err.println("Invalid filesystem for path " + pathStr + ": " +
"needed scheme hdfs, but got: " + fs.getScheme());
return 1;
}
for (int retry = 0; true; ) {
boolean recovered = false;
IOException ioe = null;
try {
recovered = dfs.recoverLease(new Path(pathStr));
} catch (FileNotFoundException e) {
System.err.println("recoverLease got exception: " + e.getMessage());
System.err.println("Giving up on recoverLease for " + pathStr +
" after 1 try");
return 1;
} catch (IOException e) {
ioe = e;
}
if (recovered) {
System.out.println("recoverLease SUCCEEDED on " + pathStr);
return 0;
}
if (ioe != null) {
System.err.println("recoverLease got exception: " +
ioe.getMessage());
} else {
System.err.println("recoverLease returned false.");
}
retry++;
if (retry >= maxRetries) {
break;
}
System.err.println("Retrying in " + TIMEOUT_MS + " ms...");
Uninterruptibles.sleepUninterruptibly(TIMEOUT_MS,
TimeUnit.MILLISECONDS);
System.err.println("Retry #" + retry);
}
System.err.println("Giving up on recoverLease for " + pathStr + " after " +
maxRetries + (maxRetries == 1 ? " try." : " tries."));
return 1;
}
}
/**
* The command for getting help about other commands.
*/
private class HelpCommand extends DebugCommand {
HelpCommand() {
super("help",
"help [command-name]",
" Get help about a command.");
}
int run(List<String> args) {
DebugCommand command = popCommand(args);
if (command == null) {
printUsage();
return 0;
}
System.out.println(command.usageText);
System.out.println(command.helpText + "\n");
return 0;
}
}
public DebugAdmin(Configuration conf) {
super(conf);
}
private DebugCommand popCommand(List<String> args) {
String commandStr = (args.size() == 0) ? "" : args.get(0);
if (commandStr.startsWith("-")) {
commandStr = commandStr.substring(1);
}
for (DebugCommand command : DEBUG_COMMANDS) {
if (command.name.equals(commandStr)) {
args.remove(0);
return command;
}
}
return null;
}
public int run(String[] argv) {
LinkedList<String> args = new LinkedList<String>();
for (int j = 0; j < argv.length; ++j) {
args.add(argv[j]);
}
DebugCommand command = popCommand(args);
if (command == null) {
printUsage();
return 0;
}
try {
return command.run(args);
} catch (IOException e) {
System.err.println("IOException: " +
StringUtils.stringifyException(e));
return 1;
} catch (RuntimeException e) {
System.err.println("RuntimeException: " +
StringUtils.stringifyException(e));
return 1;
}
}
private void printUsage() {
System.out.println("Usage: hdfs debug <command> [arguments]\n");
for (DebugCommand command : DEBUG_COMMANDS) {
if (!command.name.equals("help")) {
System.out.println(command.usageText);
}
}
}
public static void main(String[] argsArray) throws IOException {
DebugAdmin debugAdmin = new DebugAdmin(new Configuration());
System.exit(debugAdmin.run(argsArray));
}
}
| 12,417 | 32.744565 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.net.HttpURLConnection;
import java.net.InetSocketAddress;
import java.net.URL;
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ha.HAServiceTarget;
import org.apache.hadoop.ha.HealthMonitor;
import org.apache.hadoop.ha.ZKFailoverController;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HAUtil;
import org.apache.hadoop.hdfs.HDFSPolicyProvider;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.ha.proto.HAZKInfoProtos.ActiveNodeInfo;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.security.authorize.PolicyProvider;
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.hadoop.util.StringUtils;
import com.google.protobuf.InvalidProtocolBufferException;
@InterfaceAudience.Private
public class DFSZKFailoverController extends ZKFailoverController {
private static final Log LOG =
LogFactory.getLog(DFSZKFailoverController.class);
private final AccessControlList adminAcl;
/* the same as superclass's localTarget, but with the more specfic NN type */
private final NNHAServiceTarget localNNTarget;
// This is used only for unit tests
private boolean isThreadDumpCaptured = false;
@Override
protected HAServiceTarget dataToTarget(byte[] data) {
ActiveNodeInfo proto;
try {
proto = ActiveNodeInfo.parseFrom(data);
} catch (InvalidProtocolBufferException e) {
throw new RuntimeException("Invalid data in ZK: " +
StringUtils.byteToHexString(data));
}
NNHAServiceTarget ret = new NNHAServiceTarget(
conf, proto.getNameserviceId(), proto.getNamenodeId());
InetSocketAddress addressFromProtobuf = new InetSocketAddress(
proto.getHostname(), proto.getPort());
if (!addressFromProtobuf.equals(ret.getAddress())) {
throw new RuntimeException("Mismatched address stored in ZK for " +
ret + ": Stored protobuf was " + proto + ", address from our own " +
"configuration for this NameNode was " + ret.getAddress());
}
ret.setZkfcPort(proto.getZkfcPort());
return ret;
}
@Override
protected byte[] targetToData(HAServiceTarget target) {
InetSocketAddress addr = target.getAddress();
return ActiveNodeInfo.newBuilder()
.setHostname(addr.getHostName())
.setPort(addr.getPort())
.setZkfcPort(target.getZKFCAddress().getPort())
.setNameserviceId(localNNTarget.getNameServiceId())
.setNamenodeId(localNNTarget.getNameNodeId())
.build()
.toByteArray();
}
@Override
protected InetSocketAddress getRpcAddressToBindTo() {
int zkfcPort = getZkfcPort(conf);
return new InetSocketAddress(localTarget.getAddress().getAddress(),
zkfcPort);
}
@Override
protected PolicyProvider getPolicyProvider() {
return new HDFSPolicyProvider();
}
static int getZkfcPort(Configuration conf) {
return conf.getInt(DFSConfigKeys.DFS_HA_ZKFC_PORT_KEY,
DFSConfigKeys.DFS_HA_ZKFC_PORT_DEFAULT);
}
public static DFSZKFailoverController create(Configuration conf) {
Configuration localNNConf = DFSHAAdmin.addSecurityConfiguration(conf);
String nsId = DFSUtil.getNamenodeNameServiceId(conf);
if (!HAUtil.isHAEnabled(localNNConf, nsId)) {
throw new HadoopIllegalArgumentException(
"HA is not enabled for this namenode.");
}
String nnId = HAUtil.getNameNodeId(localNNConf, nsId);
if (nnId == null) {
String msg = "Could not get the namenode ID of this node. " +
"You may run zkfc on the node other than namenode.";
throw new HadoopIllegalArgumentException(msg);
}
NameNode.initializeGenericKeys(localNNConf, nsId, nnId);
DFSUtil.setGenericConf(localNNConf, nsId, nnId, ZKFC_CONF_KEYS);
NNHAServiceTarget localTarget = new NNHAServiceTarget(
localNNConf, nsId, nnId);
return new DFSZKFailoverController(localNNConf, localTarget);
}
private DFSZKFailoverController(Configuration conf,
NNHAServiceTarget localTarget) {
super(conf, localTarget);
this.localNNTarget = localTarget;
// Setup ACLs
adminAcl = new AccessControlList(
conf.get(DFSConfigKeys.DFS_ADMIN, " "));
LOG.info("Failover controller configured for NameNode " +
localTarget);
}
@Override
protected void initRPC() throws IOException {
super.initRPC();
localNNTarget.setZkfcPort(rpcServer.getAddress().getPort());
}
@Override
public void loginAsFCUser() throws IOException {
InetSocketAddress socAddr = NameNode.getAddress(conf);
SecurityUtil.login(conf, DFS_NAMENODE_KEYTAB_FILE_KEY,
DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName());
}
@Override
protected String getScopeInsideParentNode() {
return localNNTarget.getNameServiceId();
}
public static void main(String args[])
throws Exception {
StringUtils.startupShutdownMessage(DFSZKFailoverController.class,
args, LOG);
if (DFSUtil.parseHelpArgument(args,
ZKFailoverController.USAGE, System.out, true)) {
System.exit(0);
}
GenericOptionsParser parser = new GenericOptionsParser(
new HdfsConfiguration(), args);
DFSZKFailoverController zkfc = DFSZKFailoverController.create(
parser.getConfiguration());
int retCode = 0;
try {
retCode = zkfc.run(parser.getRemainingArgs());
} catch (Throwable t) {
LOG.fatal("Got a fatal error, exiting now", t);
}
System.exit(retCode);
}
@Override
protected void checkRpcAdminAccess() throws IOException, AccessControlException {
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
UserGroupInformation zkfcUgi = UserGroupInformation.getLoginUser();
if (adminAcl.isUserAllowed(ugi) ||
ugi.getShortUserName().equals(zkfcUgi.getShortUserName())) {
LOG.info("Allowed RPC access from " + ugi + " at " + Server.getRemoteAddress());
return;
}
String msg = "Disallowed RPC access from " + ugi + " at " +
Server.getRemoteAddress() + ". Not listed in " + DFSConfigKeys.DFS_ADMIN;
LOG.warn(msg);
throw new AccessControlException(msg);
}
/**
* capture local NN's thread dump and write it to ZKFC's log.
*/
private void getLocalNNThreadDump() {
isThreadDumpCaptured = false;
// We use the same timeout value for both connection establishment
// timeout and read timeout.
int httpTimeOut = conf.getInt(
DFSConfigKeys.DFS_HA_ZKFC_NN_HTTP_TIMEOUT_KEY,
DFSConfigKeys.DFS_HA_ZKFC_NN_HTTP_TIMEOUT_KEY_DEFAULT);
if (httpTimeOut == 0) {
// If timeout value is set to zero, the feature is turned off.
return;
}
try {
String stacksUrl = DFSUtil.getInfoServer(localNNTarget.getAddress(),
conf, DFSUtil.getHttpClientScheme(conf)) + "/stacks";
URL url = new URL(stacksUrl);
HttpURLConnection conn = (HttpURLConnection)url.openConnection();
conn.setReadTimeout(httpTimeOut);
conn.setConnectTimeout(httpTimeOut);
conn.connect();
ByteArrayOutputStream out = new ByteArrayOutputStream();
IOUtils.copyBytes(conn.getInputStream(), out, 4096, true);
StringBuilder localNNThreadDumpContent =
new StringBuilder("-- Local NN thread dump -- \n");
localNNThreadDumpContent.append(out);
localNNThreadDumpContent.append("\n -- Local NN thread dump -- ");
LOG.info(localNNThreadDumpContent);
isThreadDumpCaptured = true;
} catch (IOException e) {
LOG.warn("Can't get local NN thread dump due to " + e.getMessage());
}
}
@Override
protected synchronized void setLastHealthState(HealthMonitor.State newState) {
super.setLastHealthState(newState);
// Capture local NN thread dump when the target NN health state changes.
if (getLastHealthState() == HealthMonitor.State.SERVICE_NOT_RESPONDING ||
getLastHealthState() == HealthMonitor.State.SERVICE_UNHEALTHY) {
getLocalNNThreadDump();
}
}
@VisibleForTesting
boolean isThreadDumpCaptured() {
return isThreadDumpCaptured;
}
}
| 9,843 | 36.14717 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.PrintStream;
import java.net.InetSocketAddress;
import java.net.URI;
import java.net.URL;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.TreeSet;
import com.google.common.base.Joiner;
import com.google.common.base.Optional;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.ReconfigurationTaskStatus;
import org.apache.hadoop.conf.ReconfigurationUtil.PropertyChange;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FsShell;
import org.apache.hadoop.fs.FsStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.shell.Command;
import org.apache.hadoop.fs.shell.CommandFormat;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.HAUtilClient;
import org.apache.hadoop.hdfs.client.BlockReportOptions;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HAUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.NameNodeProxies;
import org.apache.hadoop.hdfs.NameNodeProxies.ProxyAndInfo;
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DatanodeLocalInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
import org.apache.hadoop.hdfs.protocol.SnapshotException;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.TransferFsImage;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RefreshCallQueueProtocol;
import org.apache.hadoop.ipc.RefreshResponse;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.ipc.protocolPB.GenericRefreshProtocolClientSideTranslatorPB;
import org.apache.hadoop.ipc.protocolPB.GenericRefreshProtocolPB;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.RefreshUserMappingsProtocol;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.ToolRunner;
import com.google.common.base.Preconditions;
/**
* This class provides some DFS administrative access shell commands.
*/
@InterfaceAudience.Private
public class DFSAdmin extends FsShell {
static {
HdfsConfiguration.init();
}
private static final Log LOG = LogFactory.getLog(DFSAdmin.class);
/**
* An abstract class for the execution of a file system command
*/
abstract private static class DFSAdminCommand extends Command {
final DistributedFileSystem dfs;
/** Constructor */
public DFSAdminCommand(FileSystem fs) {
super(fs.getConf());
if (!(fs instanceof DistributedFileSystem)) {
throw new IllegalArgumentException("FileSystem " + fs.getUri() +
" is not an HDFS file system");
}
this.dfs = (DistributedFileSystem)fs;
}
}
/** A class that supports command clearQuota */
private static class ClearQuotaCommand extends DFSAdminCommand {
private static final String NAME = "clrQuota";
private static final String USAGE = "-"+NAME+" <dirname>...<dirname>";
private static final String DESCRIPTION = USAGE + ": " +
"Clear the quota for each directory <dirName>.\n" +
"\t\tFor each directory, attempt to clear the quota. An error will be reported if\n" +
"\t\t1. the directory does not exist or is a file, or\n" +
"\t\t2. user is not an administrator.\n" +
"\t\tIt does not fault if the directory has no quota.";
/** Constructor */
ClearQuotaCommand(String[] args, int pos, FileSystem fs) {
super(fs);
CommandFormat c = new CommandFormat(1, Integer.MAX_VALUE);
List<String> parameters = c.parse(args, pos);
this.args = parameters.toArray(new String[parameters.size()]);
}
/** Check if a command is the clrQuota command
*
* @param cmd A string representation of a command starting with "-"
* @return true if this is a clrQuota command; false otherwise
*/
public static boolean matches(String cmd) {
return ("-"+NAME).equals(cmd);
}
@Override
public String getCommandName() {
return NAME;
}
@Override
public void run(Path path) throws IOException {
dfs.setQuota(path, HdfsConstants.QUOTA_RESET, HdfsConstants.QUOTA_DONT_SET);
}
}
/** A class that supports command setQuota */
private static class SetQuotaCommand extends DFSAdminCommand {
private static final String NAME = "setQuota";
private static final String USAGE =
"-"+NAME+" <quota> <dirname>...<dirname>";
private static final String DESCRIPTION =
"-setQuota <quota> <dirname>...<dirname>: " +
"Set the quota <quota> for each directory <dirName>.\n" +
"\t\tThe directory quota is a long integer that puts a hard limit\n" +
"\t\ton the number of names in the directory tree\n" +
"\t\tFor each directory, attempt to set the quota. An error will be reported if\n" +
"\t\t1. N is not a positive integer, or\n" +
"\t\t2. User is not an administrator, or\n" +
"\t\t3. The directory does not exist or is a file.\n" +
"\t\tNote: A quota of 1 would force the directory to remain empty.\n";
private final long quota; // the quota to be set
/** Constructor */
SetQuotaCommand(String[] args, int pos, FileSystem fs) {
super(fs);
CommandFormat c = new CommandFormat(2, Integer.MAX_VALUE);
List<String> parameters = c.parse(args, pos);
this.quota = Long.parseLong(parameters.remove(0));
this.args = parameters.toArray(new String[parameters.size()]);
}
/** Check if a command is the setQuota command
*
* @param cmd A string representation of a command starting with "-"
* @return true if this is a count command; false otherwise
*/
public static boolean matches(String cmd) {
return ("-"+NAME).equals(cmd);
}
@Override
public String getCommandName() {
return NAME;
}
@Override
public void run(Path path) throws IOException {
dfs.setQuota(path, quota, HdfsConstants.QUOTA_DONT_SET);
}
}
/** A class that supports command clearSpaceQuota */
private static class ClearSpaceQuotaCommand extends DFSAdminCommand {
private static final String NAME = "clrSpaceQuota";
private static final String USAGE = "-"+NAME+" [-storageType <storagetype>] <dirname>...<dirname>";
private static final String DESCRIPTION = USAGE + ": " +
"Clear the space quota for each directory <dirName>.\n" +
"\t\tFor each directory, attempt to clear the quota. An error will be reported if\n" +
"\t\t1. the directory does not exist or is a file, or\n" +
"\t\t2. user is not an administrator.\n" +
"\t\tIt does not fault if the directory has no quota.\n" +
"\t\tThe storage type specific quota is cleared when -storageType option is specified.";
private StorageType type;
/** Constructor */
ClearSpaceQuotaCommand(String[] args, int pos, FileSystem fs) {
super(fs);
CommandFormat c = new CommandFormat(1, Integer.MAX_VALUE);
c.addOptionWithValue("storageType");
List<String> parameters = c.parse(args, pos);
String storageTypeString = c.getOptValue("storageType");
if (storageTypeString != null) {
this.type = StorageType.parseStorageType(storageTypeString);
}
this.args = parameters.toArray(new String[parameters.size()]);
}
/** Check if a command is the clrQuota command
*
* @param cmd A string representation of a command starting with "-"
* @return true if this is a clrQuota command; false otherwise
*/
public static boolean matches(String cmd) {
return ("-"+NAME).equals(cmd);
}
@Override
public String getCommandName() {
return NAME;
}
@Override
public void run(Path path) throws IOException {
if (type != null) {
dfs.setQuotaByStorageType(path, type, HdfsConstants.QUOTA_RESET);
} else {
dfs.setQuota(path, HdfsConstants.QUOTA_DONT_SET, HdfsConstants.QUOTA_RESET);
}
}
}
/** A class that supports command setQuota */
private static class SetSpaceQuotaCommand extends DFSAdminCommand {
private static final String NAME = "setSpaceQuota";
private static final String USAGE =
"-"+NAME+" <quota> [-storageType <storagetype>] <dirname>...<dirname>";
private static final String DESCRIPTION = USAGE + ": " +
"Set the space quota <quota> for each directory <dirName>.\n" +
"\t\tThe space quota is a long integer that puts a hard limit\n" +
"\t\ton the total size of all the files under the directory tree.\n" +
"\t\tThe extra space required for replication is also counted. E.g.\n" +
"\t\ta 1GB file with replication of 3 consumes 3GB of the quota.\n\n" +
"\t\tQuota can also be specified with a binary prefix for terabytes,\n" +
"\t\tpetabytes etc (e.g. 50t is 50TB, 5m is 5MB, 3p is 3PB).\n" +
"\t\tFor each directory, attempt to set the quota. An error will be reported if\n" +
"\t\t1. N is not a positive integer, or\n" +
"\t\t2. user is not an administrator, or\n" +
"\t\t3. the directory does not exist or is a file.\n" +
"\t\tThe storage type specific quota is set when -storageType option is specified.\n";
private long quota; // the quota to be set
private StorageType type;
/** Constructor */
SetSpaceQuotaCommand(String[] args, int pos, FileSystem fs) {
super(fs);
CommandFormat c = new CommandFormat(2, Integer.MAX_VALUE);
List<String> parameters = c.parse(args, pos);
String str = parameters.remove(0).trim();
try {
quota = StringUtils.TraditionalBinaryPrefix.string2long(str);
} catch (NumberFormatException nfe) {
throw new IllegalArgumentException("\"" + str + "\" is not a valid value for a quota.");
}
String storageTypeString =
StringUtils.popOptionWithArgument("-storageType", parameters);
if (storageTypeString != null) {
try {
this.type = StorageType.parseStorageType(storageTypeString);
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException("Storage type "
+ storageTypeString
+ " is not available. Available storage types are "
+ StorageType.getTypesSupportingQuota());
}
}
this.args = parameters.toArray(new String[parameters.size()]);
}
/** Check if a command is the setQuota command
*
* @param cmd A string representation of a command starting with "-"
* @return true if this is a count command; false otherwise
*/
public static boolean matches(String cmd) {
return ("-"+NAME).equals(cmd);
}
@Override
public String getCommandName() {
return NAME;
}
@Override
public void run(Path path) throws IOException {
if (type != null) {
dfs.setQuotaByStorageType(path, type, quota);
} else {
dfs.setQuota(path, HdfsConstants.QUOTA_DONT_SET, quota);
}
}
}
private static class RollingUpgradeCommand {
static final String NAME = "rollingUpgrade";
static final String USAGE = "-"+NAME+" [<query|prepare|finalize>]";
static final String DESCRIPTION = USAGE + ":\n"
+ " query: query the current rolling upgrade status.\n"
+ " prepare: prepare a new rolling upgrade.\n"
+ " finalize: finalize the current rolling upgrade.";
/** Check if a command is the rollingUpgrade command
*
* @param cmd A string representation of a command starting with "-"
* @return true if this is a clrQuota command; false otherwise
*/
static boolean matches(String cmd) {
return ("-"+NAME).equals(cmd);
}
private static void printMessage(RollingUpgradeInfo info,
PrintStream out) {
if (info != null && info.isStarted()) {
if (!info.createdRollbackImages() && !info.isFinalized()) {
out.println(
"Preparing for upgrade. Data is being saved for rollback."
+ "\nRun \"dfsadmin -rollingUpgrade query\" to check the status"
+ "\nfor proceeding with rolling upgrade");
out.println(info);
} else if (!info.isFinalized()) {
out.println("Proceed with rolling upgrade:");
out.println(info);
} else {
out.println("Rolling upgrade is finalized.");
out.println(info);
}
} else {
out.println("There is no rolling upgrade in progress or rolling " +
"upgrade has already been finalized.");
}
}
static int run(DistributedFileSystem dfs, String[] argv, int idx) throws IOException {
final RollingUpgradeAction action = RollingUpgradeAction.fromString(
argv.length >= 2? argv[1]: "");
if (action == null) {
throw new IllegalArgumentException("Failed to covert \"" + argv[1]
+"\" to " + RollingUpgradeAction.class.getSimpleName());
}
System.out.println(action + " rolling upgrade ...");
final RollingUpgradeInfo info = dfs.rollingUpgrade(action);
switch(action){
case QUERY:
break;
case PREPARE:
Preconditions.checkState(info.isStarted());
break;
case FINALIZE:
Preconditions.checkState(info == null || info.isFinalized());
break;
}
printMessage(info, System.out);
return 0;
}
}
/**
* Common usage summary shared between "hdfs dfsadmin -help" and
* "hdfs dfsadmin"
*/
private static final String commonUsageSummary =
"\t[-report [-live] [-dead] [-decommissioning]]\n" +
"\t[-safemode <enter | leave | get | wait>]\n" +
"\t[-saveNamespace]\n" +
"\t[-rollEdits]\n" +
"\t[-restoreFailedStorage true|false|check]\n" +
"\t[-refreshNodes]\n" +
"\t[" + SetQuotaCommand.USAGE + "]\n" +
"\t[" + ClearQuotaCommand.USAGE +"]\n" +
"\t[" + SetSpaceQuotaCommand.USAGE + "]\n" +
"\t[" + ClearSpaceQuotaCommand.USAGE +"]\n" +
"\t[-finalizeUpgrade]\n" +
"\t[" + RollingUpgradeCommand.USAGE +"]\n" +
"\t[-refreshServiceAcl]\n" +
"\t[-refreshUserToGroupsMappings]\n" +
"\t[-refreshSuperUserGroupsConfiguration]\n" +
"\t[-refreshCallQueue]\n" +
"\t[-refresh <host:ipc_port> <key> [arg1..argn]\n" +
"\t[-reconfig <datanode|...> <host:ipc_port> <start|status|properties>]\n" +
"\t[-printTopology]\n" +
"\t[-refreshNamenodes datanode_host:ipc_port]\n"+
"\t[-deleteBlockPool datanode_host:ipc_port blockpoolId [force]]\n"+
"\t[-setBalancerBandwidth <bandwidth in bytes per second>]\n" +
"\t[-fetchImage <local directory>]\n" +
"\t[-allowSnapshot <snapshotDir>]\n" +
"\t[-disallowSnapshot <snapshotDir>]\n" +
"\t[-shutdownDatanode <datanode_host:ipc_port> [upgrade]]\n" +
"\t[-getDatanodeInfo <datanode_host:ipc_port>]\n" +
"\t[-metasave filename]\n" +
"\t[-triggerBlockReport [-incremental] <datanode_host:ipc_port>]\n" +
"\t[-help [cmd]]\n";
/**
* Construct a DFSAdmin object.
*/
public DFSAdmin() {
this(new HdfsConfiguration());
}
/**
* Construct a DFSAdmin object.
*/
public DFSAdmin(Configuration conf) {
super(conf);
}
protected DistributedFileSystem getDFS() throws IOException {
FileSystem fs = getFS();
if (!(fs instanceof DistributedFileSystem)) {
throw new IllegalArgumentException("FileSystem " + fs.getUri() +
" is not an HDFS file system");
}
return (DistributedFileSystem)fs;
}
/**
* Gives a report on how the FileSystem is doing.
* @exception IOException if the filesystem does not exist.
*/
public void report(String[] argv, int i) throws IOException {
DistributedFileSystem dfs = getDFS();
FsStatus ds = dfs.getStatus();
long capacity = ds.getCapacity();
long used = ds.getUsed();
long remaining = ds.getRemaining();
long presentCapacity = used + remaining;
boolean mode = dfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_GET);
if (mode) {
System.out.println("Safe mode is ON");
}
System.out.println("Configured Capacity: " + capacity
+ " (" + StringUtils.byteDesc(capacity) + ")");
System.out.println("Present Capacity: " + presentCapacity
+ " (" + StringUtils.byteDesc(presentCapacity) + ")");
System.out.println("DFS Remaining: " + remaining
+ " (" + StringUtils.byteDesc(remaining) + ")");
System.out.println("DFS Used: " + used
+ " (" + StringUtils.byteDesc(used) + ")");
System.out.println("DFS Used%: "
+ StringUtils.formatPercent(used/(double)presentCapacity, 2));
/* These counts are not always upto date. They are updated after
* iteration of an internal list. Should be updated in a few seconds to
* minutes. Use "-metaSave" to list of all such blocks and accurate
* counts.
*/
System.out.println("Under replicated blocks: " +
dfs.getUnderReplicatedBlocksCount());
System.out.println("Blocks with corrupt replicas: " +
dfs.getCorruptBlocksCount());
System.out.println("Missing blocks: " +
dfs.getMissingBlocksCount());
System.out.println("Missing blocks (with replication factor 1): " +
dfs.getMissingReplOneBlocksCount());
System.out.println();
System.out.println("-------------------------------------------------");
// Parse arguments for filtering the node list
List<String> args = Arrays.asList(argv);
// Truncate already handled arguments before parsing report()-specific ones
args = new ArrayList<String>(args.subList(i, args.size()));
final boolean listLive = StringUtils.popOption("-live", args);
final boolean listDead = StringUtils.popOption("-dead", args);
final boolean listDecommissioning =
StringUtils.popOption("-decommissioning", args);
// If no filter flags are found, then list all DN types
boolean listAll = (!listLive && !listDead && !listDecommissioning);
if (listAll || listLive) {
DatanodeInfo[] live = dfs.getDataNodeStats(DatanodeReportType.LIVE);
if (live.length > 0 || listLive) {
System.out.println("Live datanodes (" + live.length + "):\n");
}
if (live.length > 0) {
for (DatanodeInfo dn : live) {
System.out.println(dn.getDatanodeReport());
System.out.println();
}
}
}
if (listAll || listDead) {
DatanodeInfo[] dead = dfs.getDataNodeStats(DatanodeReportType.DEAD);
if (dead.length > 0 || listDead) {
System.out.println("Dead datanodes (" + dead.length + "):\n");
}
if (dead.length > 0) {
for (DatanodeInfo dn : dead) {
System.out.println(dn.getDatanodeReport());
System.out.println();
}
}
}
if (listAll || listDecommissioning) {
DatanodeInfo[] decom =
dfs.getDataNodeStats(DatanodeReportType.DECOMMISSIONING);
if (decom.length > 0 || listDecommissioning) {
System.out.println("Decommissioning datanodes (" + decom.length
+ "):\n");
}
if (decom.length > 0) {
for (DatanodeInfo dn : decom) {
System.out.println(dn.getDatanodeReport());
System.out.println();
}
}
}
}
/**
* Safe mode maintenance command.
* Usage: hdfs dfsadmin -safemode [enter | leave | get]
* @param argv List of of command line parameters.
* @param idx The index of the command that is being processed.
* @exception IOException if the filesystem does not exist.
*/
public void setSafeMode(String[] argv, int idx) throws IOException {
if (idx != argv.length - 1) {
printUsage("-safemode");
return;
}
HdfsConstants.SafeModeAction action;
Boolean waitExitSafe = false;
if ("leave".equalsIgnoreCase(argv[idx])) {
action = HdfsConstants.SafeModeAction.SAFEMODE_LEAVE;
} else if ("enter".equalsIgnoreCase(argv[idx])) {
action = HdfsConstants.SafeModeAction.SAFEMODE_ENTER;
} else if ("get".equalsIgnoreCase(argv[idx])) {
action = HdfsConstants.SafeModeAction.SAFEMODE_GET;
} else if ("wait".equalsIgnoreCase(argv[idx])) {
action = HdfsConstants.SafeModeAction.SAFEMODE_GET;
waitExitSafe = true;
} else {
printUsage("-safemode");
return;
}
DistributedFileSystem dfs = getDFS();
Configuration dfsConf = dfs.getConf();
URI dfsUri = dfs.getUri();
boolean isHaEnabled = HAUtilClient.isLogicalUri(dfsConf, dfsUri);
if (isHaEnabled) {
String nsId = dfsUri.getHost();
List<ProxyAndInfo<ClientProtocol>> proxies =
HAUtil.getProxiesForAllNameNodesInNameservice(
dfsConf, nsId, ClientProtocol.class);
for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
ClientProtocol haNn = proxy.getProxy();
boolean inSafeMode = haNn.setSafeMode(action, false);
if (waitExitSafe) {
inSafeMode = waitExitSafeMode(haNn, inSafeMode);
}
System.out.println("Safe mode is " + (inSafeMode ? "ON" : "OFF")
+ " in " + proxy.getAddress());
}
} else {
boolean inSafeMode = dfs.setSafeMode(action);
if (waitExitSafe) {
inSafeMode = waitExitSafeMode(dfs, inSafeMode);
}
System.out.println("Safe mode is " + (inSafeMode ? "ON" : "OFF"));
}
}
private boolean waitExitSafeMode(DistributedFileSystem dfs, boolean inSafeMode)
throws IOException {
while (inSafeMode) {
try {
Thread.sleep(5000);
} catch (java.lang.InterruptedException e) {
throw new IOException("Wait Interrupted");
}
inSafeMode = dfs.setSafeMode(SafeModeAction.SAFEMODE_GET, false);
}
return inSafeMode;
}
private boolean waitExitSafeMode(ClientProtocol nn, boolean inSafeMode)
throws IOException {
while (inSafeMode) {
try {
Thread.sleep(5000);
} catch (java.lang.InterruptedException e) {
throw new IOException("Wait Interrupted");
}
inSafeMode = nn.setSafeMode(SafeModeAction.SAFEMODE_GET, false);
}
return inSafeMode;
}
public int triggerBlockReport(String[] argv) throws IOException {
List<String> args = new LinkedList<String>();
for (int j = 1; j < argv.length; j++) {
args.add(argv[j]);
}
boolean incremental = StringUtils.popOption("-incremental", args);
String hostPort = StringUtils.popFirstNonOption(args);
if (hostPort == null) {
System.err.println("You must specify a host:port pair.");
return 1;
}
if (!args.isEmpty()) {
System.err.print("Can't understand arguments: " +
Joiner.on(" ").join(args) + "\n");
return 1;
}
ClientDatanodeProtocol dnProxy = getDataNodeProxy(hostPort);
try {
dnProxy.triggerBlockReport(
new BlockReportOptions.Factory().
setIncremental(incremental).
build());
} catch (IOException e) {
System.err.println("triggerBlockReport error: " + e);
return 1;
}
System.out.println("Triggering " +
(incremental ? "an incremental " : "a full ") +
"block report on " + hostPort + ".");
return 0;
}
/**
* Allow snapshot on a directory.
* Usage: hdfs dfsadmin -allowSnapshot snapshotDir
* @param argv List of of command line parameters.
* @exception IOException
*/
public void allowSnapshot(String[] argv) throws IOException {
DistributedFileSystem dfs = getDFS();
try {
dfs.allowSnapshot(new Path(argv[1]));
} catch (SnapshotException e) {
throw new RemoteException(e.getClass().getName(), e.getMessage());
}
System.out.println("Allowing snaphot on " + argv[1] + " succeeded");
}
/**
* Allow snapshot on a directory.
* Usage: hdfs dfsadmin -disallowSnapshot snapshotDir
* @param argv List of of command line parameters.
* @exception IOException
*/
public void disallowSnapshot(String[] argv) throws IOException {
DistributedFileSystem dfs = getDFS();
try {
dfs.disallowSnapshot(new Path(argv[1]));
} catch (SnapshotException e) {
throw new RemoteException(e.getClass().getName(), e.getMessage());
}
System.out.println("Disallowing snaphot on " + argv[1] + " succeeded");
}
/**
* Command to ask the namenode to save the namespace.
* Usage: hdfs dfsadmin -saveNamespace
* @exception IOException
* @see org.apache.hadoop.hdfs.protocol.ClientProtocol#saveNamespace()
*/
public int saveNamespace() throws IOException {
int exitCode = -1;
DistributedFileSystem dfs = getDFS();
Configuration dfsConf = dfs.getConf();
URI dfsUri = dfs.getUri();
boolean isHaEnabled = HAUtilClient.isLogicalUri(dfsConf, dfsUri);
if (isHaEnabled) {
String nsId = dfsUri.getHost();
List<ProxyAndInfo<ClientProtocol>> proxies =
HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
nsId, ClientProtocol.class);
for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
proxy.getProxy().saveNamespace();
System.out.println("Save namespace successful for " +
proxy.getAddress());
}
} else {
dfs.saveNamespace();
System.out.println("Save namespace successful");
}
exitCode = 0;
return exitCode;
}
public int rollEdits() throws IOException {
DistributedFileSystem dfs = getDFS();
long txid = dfs.rollEdits();
System.out.println("Successfully rolled edit logs.");
System.out.println("New segment starts at txid " + txid);
return 0;
}
/**
* Command to enable/disable/check restoring of failed storage replicas in the namenode.
* Usage: hdfs dfsadmin -restoreFailedStorage true|false|check
* @exception IOException
* @see org.apache.hadoop.hdfs.protocol.ClientProtocol#restoreFailedStorage(String arg)
*/
public int restoreFailedStorage(String arg) throws IOException {
int exitCode = -1;
if(!arg.equals("check") && !arg.equals("true") && !arg.equals("false")) {
System.err.println("restoreFailedStorage valid args are true|false|check");
return exitCode;
}
DistributedFileSystem dfs = getDFS();
Configuration dfsConf = dfs.getConf();
URI dfsUri = dfs.getUri();
boolean isHaEnabled = HAUtilClient.isLogicalUri(dfsConf, dfsUri);
if (isHaEnabled) {
String nsId = dfsUri.getHost();
List<ProxyAndInfo<ClientProtocol>> proxies =
HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
nsId, ClientProtocol.class);
for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
Boolean res = proxy.getProxy().restoreFailedStorage(arg);
System.out.println("restoreFailedStorage is set to " + res + " for "
+ proxy.getAddress());
}
} else {
Boolean res = dfs.restoreFailedStorage(arg);
System.out.println("restoreFailedStorage is set to " + res);
}
exitCode = 0;
return exitCode;
}
/**
* Command to ask the namenode to reread the hosts and excluded hosts
* file.
* Usage: hdfs dfsadmin -refreshNodes
* @exception IOException
*/
public int refreshNodes() throws IOException {
int exitCode = -1;
DistributedFileSystem dfs = getDFS();
Configuration dfsConf = dfs.getConf();
URI dfsUri = dfs.getUri();
boolean isHaEnabled = HAUtilClient.isLogicalUri(dfsConf, dfsUri);
if (isHaEnabled) {
String nsId = dfsUri.getHost();
List<ProxyAndInfo<ClientProtocol>> proxies =
HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
nsId, ClientProtocol.class);
for (ProxyAndInfo<ClientProtocol> proxy: proxies) {
proxy.getProxy().refreshNodes();
System.out.println("Refresh nodes successful for " +
proxy.getAddress());
}
} else {
dfs.refreshNodes();
System.out.println("Refresh nodes successful");
}
exitCode = 0;
return exitCode;
}
/**
* Command to ask the namenode to set the balancer bandwidth for all of the
* datanodes.
* Usage: hdfs dfsadmin -setBalancerBandwidth bandwidth
* @param argv List of of command line parameters.
* @param idx The index of the command that is being processed.
* @exception IOException
*/
public int setBalancerBandwidth(String[] argv, int idx) throws IOException {
long bandwidth;
int exitCode = -1;
try {
bandwidth = Long.parseLong(argv[idx]);
} catch (NumberFormatException nfe) {
System.err.println("NumberFormatException: " + nfe.getMessage());
System.err.println("Usage: hdfs dfsadmin"
+ " [-setBalancerBandwidth <bandwidth in bytes per second>]");
return exitCode;
}
FileSystem fs = getFS();
if (!(fs instanceof DistributedFileSystem)) {
System.err.println("FileSystem is " + fs.getUri());
return exitCode;
}
DistributedFileSystem dfs = (DistributedFileSystem) fs;
Configuration dfsConf = dfs.getConf();
URI dfsUri = dfs.getUri();
boolean isHaEnabled = HAUtilClient.isLogicalUri(dfsConf, dfsUri);
if (isHaEnabled) {
String nsId = dfsUri.getHost();
List<ProxyAndInfo<ClientProtocol>> proxies =
HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
nsId, ClientProtocol.class);
for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
proxy.getProxy().setBalancerBandwidth(bandwidth);
System.out.println("Balancer bandwidth is set to " + bandwidth +
" for " + proxy.getAddress());
}
} else {
dfs.setBalancerBandwidth(bandwidth);
System.out.println("Balancer bandwidth is set to " + bandwidth);
}
exitCode = 0;
return exitCode;
}
/**
* Download the most recent fsimage from the name node, and save it to a local
* file in the given directory.
*
* @param argv
* List of of command line parameters.
* @param idx
* The index of the command that is being processed.
* @return an exit code indicating success or failure.
* @throws IOException
*/
public int fetchImage(final String[] argv, final int idx) throws IOException {
Configuration conf = getConf();
final URL infoServer = DFSUtil.getInfoServer(
HAUtil.getAddressOfActive(getDFS()), conf,
DFSUtil.getHttpClientScheme(conf)).toURL();
SecurityUtil.doAsCurrentUser(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
TransferFsImage.downloadMostRecentImageToDirectory(infoServer,
new File(argv[idx]));
return null;
}
});
return 0;
}
private void printHelp(String cmd) {
String summary = "hdfs dfsadmin performs DFS administrative commands.\n" +
"Note: Administrative commands can only be run with superuser permission.\n" +
"The full syntax is: \n\n" +
"hdfs dfsadmin\n" +
commonUsageSummary;
String report ="-report [-live] [-dead] [-decommissioning]:\n" +
"\tReports basic filesystem information and statistics. \n" +
"\tThe dfs usage can be different from \"du\" usage, because it\n" +
"\tmeasures raw space used by replication, checksums, snapshots\n" +
"\tand etc. on all the DNs.\n" +
"\tOptional flags may be used to filter the list of displayed DNs.\n";
String safemode = "-safemode <enter|leave|get|wait>: Safe mode maintenance command.\n" +
"\t\tSafe mode is a Namenode state in which it\n" +
"\t\t\t1. does not accept changes to the name space (read-only)\n" +
"\t\t\t2. does not replicate or delete blocks.\n" +
"\t\tSafe mode is entered automatically at Namenode startup, and\n" +
"\t\tleaves safe mode automatically when the configured minimum\n" +
"\t\tpercentage of blocks satisfies the minimum replication\n" +
"\t\tcondition. Safe mode can also be entered manually, but then\n" +
"\t\tit can only be turned off manually as well.\n";
String saveNamespace = "-saveNamespace:\t" +
"Save current namespace into storage directories and reset edits log.\n" +
"\t\tRequires safe mode.\n";
String rollEdits = "-rollEdits:\t" +
"Rolls the edit log.\n";
String restoreFailedStorage = "-restoreFailedStorage:\t" +
"Set/Unset/Check flag to attempt restore of failed storage replicas if they become available.\n";
String refreshNodes = "-refreshNodes: \tUpdates the namenode with the " +
"set of datanodes allowed to connect to the namenode.\n\n" +
"\t\tNamenode re-reads datanode hostnames from the file defined by \n" +
"\t\tdfs.hosts, dfs.hosts.exclude configuration parameters.\n" +
"\t\tHosts defined in dfs.hosts are the datanodes that are part of \n" +
"\t\tthe cluster. If there are entries in dfs.hosts, only the hosts \n" +
"\t\tin it are allowed to register with the namenode.\n\n" +
"\t\tEntries in dfs.hosts.exclude are datanodes that need to be \n" +
"\t\tdecommissioned. Datanodes complete decommissioning when \n" +
"\t\tall the replicas from them are replicated to other datanodes.\n" +
"\t\tDecommissioned nodes are not automatically shutdown and \n" +
"\t\tare not chosen for writing new replicas.\n";
String finalizeUpgrade = "-finalizeUpgrade: Finalize upgrade of HDFS.\n" +
"\t\tDatanodes delete their previous version working directories,\n" +
"\t\tfollowed by Namenode doing the same.\n" +
"\t\tThis completes the upgrade process.\n";
String metaSave = "-metasave <filename>: \tSave Namenode's primary data structures\n" +
"\t\tto <filename> in the directory specified by hadoop.log.dir property.\n" +
"\t\t<filename> is overwritten if it exists.\n" +
"\t\t<filename> will contain one line for each of the following\n" +
"\t\t\t1. Datanodes heart beating with Namenode\n" +
"\t\t\t2. Blocks waiting to be replicated\n" +
"\t\t\t3. Blocks currrently being replicated\n" +
"\t\t\t4. Blocks waiting to be deleted\n";
String refreshServiceAcl = "-refreshServiceAcl: Reload the service-level authorization policy file\n" +
"\t\tNamenode will reload the authorization policy file.\n";
String refreshUserToGroupsMappings =
"-refreshUserToGroupsMappings: Refresh user-to-groups mappings\n";
String refreshSuperUserGroupsConfiguration =
"-refreshSuperUserGroupsConfiguration: Refresh superuser proxy groups mappings\n";
String refreshCallQueue = "-refreshCallQueue: Reload the call queue from config\n";
String reconfig = "-reconfig <datanode|...> <host:ipc_port> <start|status|properties>:\n" +
"\tStarts or gets the status of a reconfiguration operation, \n" +
"\tor gets a list of reconfigurable properties.\n" +
"\tThe second parameter specifies the node type.\n" +
"\tCurrently, only reloading DataNode's configuration is supported.\n";
String genericRefresh = "-refresh: Arguments are <hostname:port> <resource_identifier> [arg1..argn]\n" +
"\tTriggers a runtime-refresh of the resource specified by <resource_identifier>\n" +
"\ton <hostname:port>. All other args after are sent to the host.\n";
String printTopology = "-printTopology: Print a tree of the racks and their\n" +
"\t\tnodes as reported by the Namenode\n";
String refreshNamenodes = "-refreshNamenodes: Takes a datanodehost:port as argument,\n"+
"\t\tFor the given datanode, reloads the configuration files,\n" +
"\t\tstops serving the removed block-pools\n"+
"\t\tand starts serving new block-pools\n";
String deleteBlockPool = "-deleteBlockPool: Arguments are datanodehost:port, blockpool id\n"+
"\t\t and an optional argument \"force\". If force is passed,\n"+
"\t\t block pool directory for the given blockpool id on the given\n"+
"\t\t datanode is deleted along with its contents, otherwise\n"+
"\t\t the directory is deleted only if it is empty. The command\n" +
"\t\t will fail if datanode is still serving the block pool.\n" +
"\t\t Refer to refreshNamenodes to shutdown a block pool\n" +
"\t\t service on a datanode.\n";
String setBalancerBandwidth = "-setBalancerBandwidth <bandwidth>:\n" +
"\tChanges the network bandwidth used by each datanode during\n" +
"\tHDFS block balancing.\n\n" +
"\t\t<bandwidth> is the maximum number of bytes per second\n" +
"\t\tthat will be used by each datanode. This value overrides\n" +
"\t\tthe dfs.balance.bandwidthPerSec parameter.\n\n" +
"\t\t--- NOTE: The new value is not persistent on the DataNode.---\n";
String fetchImage = "-fetchImage <local directory>:\n" +
"\tDownloads the most recent fsimage from the Name Node and saves it in" +
"\tthe specified local directory.\n";
String allowSnapshot = "-allowSnapshot <snapshotDir>:\n" +
"\tAllow snapshots to be taken on a directory.\n";
String disallowSnapshot = "-disallowSnapshot <snapshotDir>:\n" +
"\tDo not allow snapshots to be taken on a directory any more.\n";
String shutdownDatanode = "-shutdownDatanode <datanode_host:ipc_port> [upgrade]\n"
+ "\tSubmit a shutdown request for the given datanode. If an optional\n"
+ "\t\"upgrade\" argument is specified, clients accessing the datanode\n"
+ "\twill be advised to wait for it to restart and the fast start-up\n"
+ "\tmode will be enabled. When the restart does not happen in time,\n"
+ "\tclients will timeout and ignore the datanode. In such case, the\n"
+ "\tfast start-up mode will also be disabled.\n";
String getDatanodeInfo = "-getDatanodeInfo <datanode_host:ipc_port>\n"
+ "\tGet the information about the given datanode. This command can\n"
+ "\tbe used for checking if a datanode is alive.\n";
String triggerBlockReport =
"-triggerBlockReport [-incremental] <datanode_host:ipc_port>\n"
+ "\tTrigger a block report for the datanode.\n"
+ "\tIf 'incremental' is specified, it will be an incremental\n"
+ "\tblock report; otherwise, it will be a full block report.\n";
String help = "-help [cmd]: \tDisplays help for the given command or all commands if none\n" +
"\t\tis specified.\n";
if ("report".equals(cmd)) {
System.out.println(report);
} else if ("safemode".equals(cmd)) {
System.out.println(safemode);
} else if ("saveNamespace".equals(cmd)) {
System.out.println(saveNamespace);
} else if ("rollEdits".equals(cmd)) {
System.out.println(rollEdits);
} else if ("restoreFailedStorage".equals(cmd)) {
System.out.println(restoreFailedStorage);
} else if ("refreshNodes".equals(cmd)) {
System.out.println(refreshNodes);
} else if ("finalizeUpgrade".equals(cmd)) {
System.out.println(finalizeUpgrade);
} else if (RollingUpgradeCommand.matches("-"+cmd)) {
System.out.println(RollingUpgradeCommand.DESCRIPTION);
} else if ("metasave".equals(cmd)) {
System.out.println(metaSave);
} else if (SetQuotaCommand.matches("-"+cmd)) {
System.out.println(SetQuotaCommand.DESCRIPTION);
} else if (ClearQuotaCommand.matches("-"+cmd)) {
System.out.println(ClearQuotaCommand.DESCRIPTION);
} else if (SetSpaceQuotaCommand.matches("-"+cmd)) {
System.out.println(SetSpaceQuotaCommand.DESCRIPTION);
} else if (ClearSpaceQuotaCommand.matches("-"+cmd)) {
System.out.println(ClearSpaceQuotaCommand.DESCRIPTION);
} else if ("refreshServiceAcl".equals(cmd)) {
System.out.println(refreshServiceAcl);
} else if ("refreshUserToGroupsMappings".equals(cmd)) {
System.out.println(refreshUserToGroupsMappings);
} else if ("refreshSuperUserGroupsConfiguration".equals(cmd)) {
System.out.println(refreshSuperUserGroupsConfiguration);
} else if ("refreshCallQueue".equals(cmd)) {
System.out.println(refreshCallQueue);
} else if ("refresh".equals(cmd)) {
System.out.println(genericRefresh);
} else if ("reconfig".equals(cmd)) {
System.out.println(reconfig);
} else if ("printTopology".equals(cmd)) {
System.out.println(printTopology);
} else if ("refreshNamenodes".equals(cmd)) {
System.out.println(refreshNamenodes);
} else if ("deleteBlockPool".equals(cmd)) {
System.out.println(deleteBlockPool);
} else if ("setBalancerBandwidth".equals(cmd)) {
System.out.println(setBalancerBandwidth);
} else if ("fetchImage".equals(cmd)) {
System.out.println(fetchImage);
} else if ("allowSnapshot".equalsIgnoreCase(cmd)) {
System.out.println(allowSnapshot);
} else if ("disallowSnapshot".equalsIgnoreCase(cmd)) {
System.out.println(disallowSnapshot);
} else if ("shutdownDatanode".equalsIgnoreCase(cmd)) {
System.out.println(shutdownDatanode);
} else if ("getDatanodeInfo".equalsIgnoreCase(cmd)) {
System.out.println(getDatanodeInfo);
} else if ("help".equals(cmd)) {
System.out.println(help);
} else {
System.out.println(summary);
System.out.println(report);
System.out.println(safemode);
System.out.println(saveNamespace);
System.out.println(rollEdits);
System.out.println(restoreFailedStorage);
System.out.println(refreshNodes);
System.out.println(finalizeUpgrade);
System.out.println(RollingUpgradeCommand.DESCRIPTION);
System.out.println(metaSave);
System.out.println(SetQuotaCommand.DESCRIPTION);
System.out.println(ClearQuotaCommand.DESCRIPTION);
System.out.println(SetSpaceQuotaCommand.DESCRIPTION);
System.out.println(ClearSpaceQuotaCommand.DESCRIPTION);
System.out.println(refreshServiceAcl);
System.out.println(refreshUserToGroupsMappings);
System.out.println(refreshSuperUserGroupsConfiguration);
System.out.println(refreshCallQueue);
System.out.println(genericRefresh);
System.out.println(reconfig);
System.out.println(printTopology);
System.out.println(refreshNamenodes);
System.out.println(deleteBlockPool);
System.out.println(setBalancerBandwidth);
System.out.println(fetchImage);
System.out.println(allowSnapshot);
System.out.println(disallowSnapshot);
System.out.println(shutdownDatanode);
System.out.println(getDatanodeInfo);
System.out.println(triggerBlockReport);
System.out.println(help);
System.out.println();
ToolRunner.printGenericCommandUsage(System.out);
}
}
/**
* Command to ask the namenode to finalize previously performed upgrade.
* Usage: hdfs dfsadmin -finalizeUpgrade
* @exception IOException
*/
public int finalizeUpgrade() throws IOException {
DistributedFileSystem dfs = getDFS();
Configuration dfsConf = dfs.getConf();
URI dfsUri = dfs.getUri();
boolean isHaAndLogicalUri = HAUtilClient.isLogicalUri(dfsConf, dfsUri);
if (isHaAndLogicalUri) {
// In the case of HA and logical URI, run finalizeUpgrade for all
// NNs in this nameservice.
String nsId = dfsUri.getHost();
List<ClientProtocol> namenodes =
HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, nsId);
if (!HAUtil.isAtLeastOneActive(namenodes)) {
throw new IOException("Cannot finalize with no NameNode active");
}
List<ProxyAndInfo<ClientProtocol>> proxies =
HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
nsId, ClientProtocol.class);
for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
proxy.getProxy().finalizeUpgrade();
System.out.println("Finalize upgrade successful for " +
proxy.getAddress());
}
} else {
dfs.finalizeUpgrade();
System.out.println("Finalize upgrade successful");
}
return 0;
}
/**
* Dumps DFS data structures into specified file.
* Usage: hdfs dfsadmin -metasave filename
* @param argv List of of command line parameters.
* @param idx The index of the command that is being processed.
* @exception IOException if an error occurred while accessing
* the file or path.
*/
public int metaSave(String[] argv, int idx) throws IOException {
String pathname = argv[idx];
DistributedFileSystem dfs = getDFS();
Configuration dfsConf = dfs.getConf();
URI dfsUri = dfs.getUri();
boolean isHaEnabled = HAUtilClient.isLogicalUri(dfsConf, dfsUri);
if (isHaEnabled) {
String nsId = dfsUri.getHost();
List<ProxyAndInfo<ClientProtocol>> proxies =
HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
nsId, ClientProtocol.class);
for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
proxy.getProxy().metaSave(pathname);
System.out.println("Created metasave file " + pathname + " in the log "
+ "directory of namenode " + proxy.getAddress());
}
} else {
dfs.metaSave(pathname);
System.out.println("Created metasave file " + pathname + " in the log " +
"directory of namenode " + dfs.getUri());
}
return 0;
}
/**
* Display each rack and the nodes assigned to that rack, as determined
* by the NameNode, in a hierarchical manner. The nodes and racks are
* sorted alphabetically.
*
* @throws IOException If an error while getting datanode report
*/
public int printTopology() throws IOException {
DistributedFileSystem dfs = getDFS();
final DatanodeInfo[] report = dfs.getDataNodeStats();
// Build a map of rack -> nodes from the datanode report
HashMap<String, TreeSet<String> > tree = new HashMap<String, TreeSet<String>>();
for(DatanodeInfo dni : report) {
String location = dni.getNetworkLocation();
String name = dni.getName();
if(!tree.containsKey(location)) {
tree.put(location, new TreeSet<String>());
}
tree.get(location).add(name);
}
// Sort the racks (and nodes) alphabetically, display in order
ArrayList<String> racks = new ArrayList<String>(tree.keySet());
Collections.sort(racks);
for(String r : racks) {
System.out.println("Rack: " + r);
TreeSet<String> nodes = tree.get(r);
for(String n : nodes) {
System.out.print(" " + n);
String hostname = NetUtils.getHostNameOfIP(n);
if(hostname != null)
System.out.print(" (" + hostname + ")");
System.out.println();
}
System.out.println();
}
return 0;
}
private static UserGroupInformation getUGI()
throws IOException {
return UserGroupInformation.getCurrentUser();
}
/**
* Refresh the authorization policy on the {@link NameNode}.
* @return exitcode 0 on success, non-zero on failure
* @throws IOException
*/
public int refreshServiceAcl() throws IOException {
// Get the current configuration
Configuration conf = getConf();
// for security authorization
// server principal for this call
// should be NN's one.
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY,
conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, ""));
DistributedFileSystem dfs = getDFS();
URI dfsUri = dfs.getUri();
boolean isHaEnabled = HAUtilClient.isLogicalUri(conf, dfsUri);
if (isHaEnabled) {
// Run refreshServiceAcl for all NNs if HA is enabled
String nsId = dfsUri.getHost();
List<ProxyAndInfo<RefreshAuthorizationPolicyProtocol>> proxies =
HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId,
RefreshAuthorizationPolicyProtocol.class);
for (ProxyAndInfo<RefreshAuthorizationPolicyProtocol> proxy : proxies) {
proxy.getProxy().refreshServiceAcl();
System.out.println("Refresh service acl successful for "
+ proxy.getAddress());
}
} else {
// Create the client
RefreshAuthorizationPolicyProtocol refreshProtocol =
NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
RefreshAuthorizationPolicyProtocol.class).getProxy();
// Refresh the authorization policy in-effect
refreshProtocol.refreshServiceAcl();
System.out.println("Refresh service acl successful");
}
return 0;
}
/**
* Refresh the user-to-groups mappings on the {@link NameNode}.
* @return exitcode 0 on success, non-zero on failure
* @throws IOException
*/
public int refreshUserToGroupsMappings() throws IOException {
// Get the current configuration
Configuration conf = getConf();
// for security authorization
// server principal for this call
// should be NN's one.
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY,
conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, ""));
DistributedFileSystem dfs = getDFS();
URI dfsUri = dfs.getUri();
boolean isHaEnabled = HAUtilClient.isLogicalUri(conf, dfsUri);
if (isHaEnabled) {
// Run refreshUserToGroupsMapings for all NNs if HA is enabled
String nsId = dfsUri.getHost();
List<ProxyAndInfo<RefreshUserMappingsProtocol>> proxies =
HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId,
RefreshUserMappingsProtocol.class);
for (ProxyAndInfo<RefreshUserMappingsProtocol> proxy : proxies) {
proxy.getProxy().refreshUserToGroupsMappings();
System.out.println("Refresh user to groups mapping successful for "
+ proxy.getAddress());
}
} else {
// Create the client
RefreshUserMappingsProtocol refreshProtocol =
NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
RefreshUserMappingsProtocol.class).getProxy();
// Refresh the user-to-groups mappings
refreshProtocol.refreshUserToGroupsMappings();
System.out.println("Refresh user to groups mapping successful");
}
return 0;
}
/**
* refreshSuperUserGroupsConfiguration {@link NameNode}.
* @return exitcode 0 on success, non-zero on failure
* @throws IOException
*/
public int refreshSuperUserGroupsConfiguration() throws IOException {
// Get the current configuration
Configuration conf = getConf();
// for security authorization
// server principal for this call
// should be NAMENODE's one.
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY,
conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, ""));
DistributedFileSystem dfs = getDFS();
URI dfsUri = dfs.getUri();
boolean isHaEnabled = HAUtilClient.isLogicalUri(conf, dfsUri);
if (isHaEnabled) {
// Run refreshSuperUserGroupsConfiguration for all NNs if HA is enabled
String nsId = dfsUri.getHost();
List<ProxyAndInfo<RefreshUserMappingsProtocol>> proxies =
HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId,
RefreshUserMappingsProtocol.class);
for (ProxyAndInfo<RefreshUserMappingsProtocol> proxy : proxies) {
proxy.getProxy().refreshSuperUserGroupsConfiguration();
System.out.println("Refresh super user groups configuration " +
"successful for " + proxy.getAddress());
}
} else {
// Create the client
RefreshUserMappingsProtocol refreshProtocol =
NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
RefreshUserMappingsProtocol.class).getProxy();
// Refresh the user-to-groups mappings
refreshProtocol.refreshSuperUserGroupsConfiguration();
System.out.println("Refresh super user groups configuration successful");
}
return 0;
}
public int refreshCallQueue() throws IOException {
// Get the current configuration
Configuration conf = getConf();
// for security authorization
// server principal for this call
// should be NN's one.
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY,
conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, ""));
DistributedFileSystem dfs = getDFS();
URI dfsUri = dfs.getUri();
boolean isHaEnabled = HAUtilClient.isLogicalUri(conf, dfsUri);
if (isHaEnabled) {
// Run refreshCallQueue for all NNs if HA is enabled
String nsId = dfsUri.getHost();
List<ProxyAndInfo<RefreshCallQueueProtocol>> proxies =
HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId,
RefreshCallQueueProtocol.class);
for (ProxyAndInfo<RefreshCallQueueProtocol> proxy : proxies) {
proxy.getProxy().refreshCallQueue();
System.out.println("Refresh call queue successful for "
+ proxy.getAddress());
}
} else {
// Create the client
RefreshCallQueueProtocol refreshProtocol =
NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
RefreshCallQueueProtocol.class).getProxy();
// Refresh the call queue
refreshProtocol.refreshCallQueue();
System.out.println("Refresh call queue successful");
}
return 0;
}
public int reconfig(String[] argv, int i) throws IOException {
String nodeType = argv[i];
String address = argv[i + 1];
String op = argv[i + 2];
if ("start".equals(op)) {
return startReconfiguration(nodeType, address);
} else if ("status".equals(op)) {
return getReconfigurationStatus(nodeType, address, System.out, System.err);
} else if ("properties".equals(op)) {
return getReconfigurableProperties(
nodeType, address, System.out, System.err);
}
System.err.println("Unknown operation: " + op);
return -1;
}
int startReconfiguration(String nodeType, String address) throws IOException {
if ("datanode".equals(nodeType)) {
ClientDatanodeProtocol dnProxy = getDataNodeProxy(address);
dnProxy.startReconfiguration();
System.out.println("Started reconfiguration task on DataNode " + address);
return 0;
} else {
System.err.println("Node type " + nodeType +
" does not support reconfiguration.");
return 1;
}
}
int getReconfigurationStatus(String nodeType, String address,
PrintStream out, PrintStream err) throws IOException {
if ("datanode".equals(nodeType)) {
ClientDatanodeProtocol dnProxy = getDataNodeProxy(address);
try {
ReconfigurationTaskStatus status = dnProxy.getReconfigurationStatus();
out.print("Reconfiguring status for DataNode[" + address + "]: ");
if (!status.hasTask()) {
out.println("no task was found.");
return 0;
}
out.print("started at " + new Date(status.getStartTime()));
if (!status.stopped()) {
out.println(" and is still running.");
return 0;
}
out.println(" and finished at " +
new Date(status.getEndTime()).toString() + ".");
if (status.getStatus() == null) {
// Nothing to report.
return 0;
}
for (Map.Entry<PropertyChange, Optional<String>> result :
status.getStatus().entrySet()) {
if (!result.getValue().isPresent()) {
out.printf(
"SUCCESS: Changed property %s%n\tFrom: \"%s\"%n\tTo: \"%s\"%n",
result.getKey().prop, result.getKey().oldVal,
result.getKey().newVal);
} else {
final String errorMsg = result.getValue().get();
out.printf(
"FAILED: Change property %s%n\tFrom: \"%s\"%n\tTo: \"%s\"%n",
result.getKey().prop, result.getKey().oldVal,
result.getKey().newVal);
out.println("\tError: " + errorMsg + ".");
}
}
} catch (IOException e) {
err.println("DataNode reloading configuration: " + e + ".");
return 1;
}
} else {
err.println("Node type " + nodeType +
" does not support reconfiguration.");
return 1;
}
return 0;
}
int getReconfigurableProperties(String nodeType, String address,
PrintStream out, PrintStream err) throws IOException {
if ("datanode".equals(nodeType)) {
ClientDatanodeProtocol dnProxy = getDataNodeProxy(address);
try {
List<String> properties =
dnProxy.listReconfigurableProperties();
out.println(
"Configuration properties that are allowed to be reconfigured:");
for (String name : properties) {
out.println(name);
}
} catch (IOException e) {
err.println("DataNode reconfiguration: " + e + ".");
return 1;
}
} else {
err.println("Node type " + nodeType +
" does not support reconfiguration.");
return 1;
}
return 0;
}
public int genericRefresh(String[] argv, int i) throws IOException {
String hostport = argv[i++];
String identifier = argv[i++];
String[] args = Arrays.copyOfRange(argv, i, argv.length);
// Get the current configuration
Configuration conf = getConf();
// for security authorization
// server principal for this call
// should be NN's one.
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY,
conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, ""));
// Create the client
Class<?> xface = GenericRefreshProtocolPB.class;
InetSocketAddress address = NetUtils.createSocketAddr(hostport);
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
RPC.setProtocolEngine(conf, xface, ProtobufRpcEngine.class);
GenericRefreshProtocolPB proxy = (GenericRefreshProtocolPB)
RPC.getProxy(xface, RPC.getProtocolVersion(xface), address,
ugi, conf, NetUtils.getDefaultSocketFactory(conf), 0);
Collection<RefreshResponse> responses = null;
try (GenericRefreshProtocolClientSideTranslatorPB xlator =
new GenericRefreshProtocolClientSideTranslatorPB(proxy);) {
// Refresh
responses = xlator.refresh(identifier, args);
int returnCode = 0;
// Print refresh responses
System.out.println("Refresh Responses:\n");
for (RefreshResponse response : responses) {
System.out.println(response.toString());
if (returnCode == 0 && response.getReturnCode() != 0) {
// This is the first non-zero return code, so we should return this
returnCode = response.getReturnCode();
} else if (returnCode != 0 && response.getReturnCode() != 0) {
// Then now we have multiple non-zero return codes,
// so we merge them into -1
returnCode = - 1;
}
}
return returnCode;
} finally {
if (responses == null) {
System.out.println("Failed to get response.\n");
return -1;
}
}
}
/**
* Displays format of commands.
* @param cmd The command that is being executed.
*/
private static void printUsage(String cmd) {
if ("-report".equals(cmd)) {
System.err.println("Usage: hdfs dfsadmin"
+ " [-report] [-live] [-dead] [-decommissioning]");
} else if ("-safemode".equals(cmd)) {
System.err.println("Usage: hdfs dfsadmin"
+ " [-safemode enter | leave | get | wait]");
} else if ("-allowSnapshot".equalsIgnoreCase(cmd)) {
System.err.println("Usage: hdfs dfsadmin"
+ " [-allowSnapshot <snapshotDir>]");
} else if ("-disallowSnapshot".equalsIgnoreCase(cmd)) {
System.err.println("Usage: hdfs dfsadmin"
+ " [-disallowSnapshot <snapshotDir>]");
} else if ("-saveNamespace".equals(cmd)) {
System.err.println("Usage: hdfs dfsadmin"
+ " [-saveNamespace]");
} else if ("-rollEdits".equals(cmd)) {
System.err.println("Usage: hdfs dfsadmin"
+ " [-rollEdits]");
} else if ("-restoreFailedStorage".equals(cmd)) {
System.err.println("Usage: hdfs dfsadmin"
+ " [-restoreFailedStorage true|false|check ]");
} else if ("-refreshNodes".equals(cmd)) {
System.err.println("Usage: hdfs dfsadmin"
+ " [-refreshNodes]");
} else if ("-finalizeUpgrade".equals(cmd)) {
System.err.println("Usage: hdfs dfsadmin"
+ " [-finalizeUpgrade]");
} else if (RollingUpgradeCommand.matches(cmd)) {
System.err.println("Usage: hdfs dfsadmin"
+ " [" + RollingUpgradeCommand.USAGE+"]");
} else if ("-metasave".equals(cmd)) {
System.err.println("Usage: hdfs dfsadmin"
+ " [-metasave filename]");
} else if (SetQuotaCommand.matches(cmd)) {
System.err.println("Usage: hdfs dfsadmin"
+ " [" + SetQuotaCommand.USAGE+"]");
} else if (ClearQuotaCommand.matches(cmd)) {
System.err.println("Usage: hdfs dfsadmin"
+ " ["+ClearQuotaCommand.USAGE+"]");
} else if (SetSpaceQuotaCommand.matches(cmd)) {
System.err.println("Usage: hdfs dfsadmin"
+ " [" + SetSpaceQuotaCommand.USAGE+"]");
} else if (ClearSpaceQuotaCommand.matches(cmd)) {
System.err.println("Usage: hdfs dfsadmin"
+ " ["+ClearSpaceQuotaCommand.USAGE+"]");
} else if ("-refreshServiceAcl".equals(cmd)) {
System.err.println("Usage: hdfs dfsadmin"
+ " [-refreshServiceAcl]");
} else if ("-refreshUserToGroupsMappings".equals(cmd)) {
System.err.println("Usage: hdfs dfsadmin"
+ " [-refreshUserToGroupsMappings]");
} else if ("-refreshSuperUserGroupsConfiguration".equals(cmd)) {
System.err.println("Usage: hdfs dfsadmin"
+ " [-refreshSuperUserGroupsConfiguration]");
} else if ("-refreshCallQueue".equals(cmd)) {
System.err.println("Usage: hdfs dfsadmin"
+ " [-refreshCallQueue]");
} else if ("-reconfig".equals(cmd)) {
System.err.println("Usage: hdfs dfsadmin"
+ " [-reconfig <datanode|...> <host:port> <start|status>]");
} else if ("-refresh".equals(cmd)) {
System.err.println("Usage: hdfs dfsadmin"
+ " [-refresh <hostname:port> <resource_identifier> [arg1..argn]");
} else if ("-printTopology".equals(cmd)) {
System.err.println("Usage: hdfs dfsadmin"
+ " [-printTopology]");
} else if ("-refreshNamenodes".equals(cmd)) {
System.err.println("Usage: hdfs dfsadmin"
+ " [-refreshNamenodes datanode-host:port]");
} else if ("-deleteBlockPool".equals(cmd)) {
System.err.println("Usage: hdfs dfsadmin"
+ " [-deleteBlockPool datanode-host:port blockpoolId [force]]");
} else if ("-setBalancerBandwidth".equals(cmd)) {
System.err.println("Usage: hdfs dfsadmin"
+ " [-setBalancerBandwidth <bandwidth in bytes per second>]");
} else if ("-fetchImage".equals(cmd)) {
System.err.println("Usage: hdfs dfsadmin"
+ " [-fetchImage <local directory>]");
} else if ("-shutdownDatanode".equals(cmd)) {
System.err.println("Usage: hdfs dfsadmin"
+ " [-shutdownDatanode <datanode_host:ipc_port> [upgrade]]");
} else if ("-getDatanodeInfo".equals(cmd)) {
System.err.println("Usage: hdfs dfsadmin"
+ " [-getDatanodeInfo <datanode_host:ipc_port>]");
} else if ("-triggerBlockReport".equals(cmd)) {
System.err.println("Usage: hdfs dfsadmin"
+ " [-triggerBlockReport [-incremental] <datanode_host:ipc_port>]");
} else {
System.err.println("Usage: hdfs dfsadmin");
System.err.println("Note: Administrative commands can only be run as the HDFS superuser.");
System.err.println(commonUsageSummary);
ToolRunner.printGenericCommandUsage(System.err);
}
}
/**
* @param argv The parameters passed to this program.
* @exception Exception if the filesystem does not exist.
* @return 0 on success, non zero on error.
*/
@Override
public int run(String[] argv) throws Exception {
if (argv.length < 1) {
printUsage("");
return -1;
}
int exitCode = -1;
int i = 0;
String cmd = argv[i++];
//
// verify that we have enough command line parameters
//
if ("-safemode".equals(cmd)) {
if (argv.length != 2) {
printUsage(cmd);
return exitCode;
}
} else if ("-allowSnapshot".equalsIgnoreCase(cmd)) {
if (argv.length != 2) {
printUsage(cmd);
return exitCode;
}
} else if ("-disallowSnapshot".equalsIgnoreCase(cmd)) {
if (argv.length != 2) {
printUsage(cmd);
return exitCode;
}
} else if ("-report".equals(cmd)) {
if (argv.length < 1) {
printUsage(cmd);
return exitCode;
}
} else if ("-saveNamespace".equals(cmd)) {
if (argv.length != 1) {
printUsage(cmd);
return exitCode;
}
} else if ("-rollEdits".equals(cmd)) {
if (argv.length != 1) {
printUsage(cmd);
return exitCode;
}
} else if ("-restoreFailedStorage".equals(cmd)) {
if (argv.length != 2) {
printUsage(cmd);
return exitCode;
}
} else if ("-refreshNodes".equals(cmd)) {
if (argv.length != 1) {
printUsage(cmd);
return exitCode;
}
} else if ("-finalizeUpgrade".equals(cmd)) {
if (argv.length != 1) {
printUsage(cmd);
return exitCode;
}
} else if (RollingUpgradeCommand.matches(cmd)) {
if (argv.length < 1 || argv.length > 2) {
printUsage(cmd);
return exitCode;
}
} else if ("-metasave".equals(cmd)) {
if (argv.length != 2) {
printUsage(cmd);
return exitCode;
}
} else if ("-refreshServiceAcl".equals(cmd)) {
if (argv.length != 1) {
printUsage(cmd);
return exitCode;
}
} else if ("-refresh".equals(cmd)) {
if (argv.length < 3) {
printUsage(cmd);
return exitCode;
}
} else if ("-refreshUserToGroupsMappings".equals(cmd)) {
if (argv.length != 1) {
printUsage(cmd);
return exitCode;
}
} else if ("-printTopology".equals(cmd)) {
if(argv.length != 1) {
printUsage(cmd);
return exitCode;
}
} else if ("-refreshNamenodes".equals(cmd)) {
if (argv.length != 2) {
printUsage(cmd);
return exitCode;
}
} else if ("-reconfig".equals(cmd)) {
if (argv.length != 4) {
printUsage(cmd);
return exitCode;
}
} else if ("-deleteBlockPool".equals(cmd)) {
if ((argv.length != 3) && (argv.length != 4)) {
printUsage(cmd);
return exitCode;
}
} else if ("-setBalancerBandwidth".equals(cmd)) {
if (argv.length != 2) {
printUsage(cmd);
return exitCode;
}
} else if ("-fetchImage".equals(cmd)) {
if (argv.length != 2) {
printUsage(cmd);
return exitCode;
}
} else if ("-shutdownDatanode".equals(cmd)) {
if ((argv.length != 2) && (argv.length != 3)) {
printUsage(cmd);
return exitCode;
}
} else if ("-getDatanodeInfo".equals(cmd)) {
if (argv.length != 2) {
printUsage(cmd);
return exitCode;
}
} else if ("-triggerBlockReport".equals(cmd)) {
if (argv.length < 1) {
printUsage(cmd);
return exitCode;
}
}
// initialize DFSAdmin
try {
init();
} catch (RPC.VersionMismatch v) {
System.err.println("Version Mismatch between client and server"
+ "... command aborted.");
return exitCode;
} catch (IOException e) {
System.err.println("Bad connection to DFS... command aborted.");
return exitCode;
}
Exception debugException = null;
exitCode = 0;
try {
if ("-report".equals(cmd)) {
report(argv, i);
} else if ("-safemode".equals(cmd)) {
setSafeMode(argv, i);
} else if ("-allowSnapshot".equalsIgnoreCase(cmd)) {
allowSnapshot(argv);
} else if ("-disallowSnapshot".equalsIgnoreCase(cmd)) {
disallowSnapshot(argv);
} else if ("-saveNamespace".equals(cmd)) {
exitCode = saveNamespace();
} else if ("-rollEdits".equals(cmd)) {
exitCode = rollEdits();
} else if ("-restoreFailedStorage".equals(cmd)) {
exitCode = restoreFailedStorage(argv[i]);
} else if ("-refreshNodes".equals(cmd)) {
exitCode = refreshNodes();
} else if ("-finalizeUpgrade".equals(cmd)) {
exitCode = finalizeUpgrade();
} else if (RollingUpgradeCommand.matches(cmd)) {
exitCode = RollingUpgradeCommand.run(getDFS(), argv, i);
} else if ("-metasave".equals(cmd)) {
exitCode = metaSave(argv, i);
} else if (ClearQuotaCommand.matches(cmd)) {
exitCode = new ClearQuotaCommand(argv, i, getDFS()).runAll();
} else if (SetQuotaCommand.matches(cmd)) {
exitCode = new SetQuotaCommand(argv, i, getDFS()).runAll();
} else if (ClearSpaceQuotaCommand.matches(cmd)) {
exitCode = new ClearSpaceQuotaCommand(argv, i, getDFS()).runAll();
} else if (SetSpaceQuotaCommand.matches(cmd)) {
exitCode = new SetSpaceQuotaCommand(argv, i, getDFS()).runAll();
} else if ("-refreshServiceAcl".equals(cmd)) {
exitCode = refreshServiceAcl();
} else if ("-refreshUserToGroupsMappings".equals(cmd)) {
exitCode = refreshUserToGroupsMappings();
} else if ("-refreshSuperUserGroupsConfiguration".equals(cmd)) {
exitCode = refreshSuperUserGroupsConfiguration();
} else if ("-refreshCallQueue".equals(cmd)) {
exitCode = refreshCallQueue();
} else if ("-refresh".equals(cmd)) {
exitCode = genericRefresh(argv, i);
} else if ("-printTopology".equals(cmd)) {
exitCode = printTopology();
} else if ("-refreshNamenodes".equals(cmd)) {
exitCode = refreshNamenodes(argv, i);
} else if ("-deleteBlockPool".equals(cmd)) {
exitCode = deleteBlockPool(argv, i);
} else if ("-setBalancerBandwidth".equals(cmd)) {
exitCode = setBalancerBandwidth(argv, i);
} else if ("-fetchImage".equals(cmd)) {
exitCode = fetchImage(argv, i);
} else if ("-shutdownDatanode".equals(cmd)) {
exitCode = shutdownDatanode(argv, i);
} else if ("-getDatanodeInfo".equals(cmd)) {
exitCode = getDatanodeInfo(argv, i);
} else if ("-reconfig".equals(cmd)) {
exitCode = reconfig(argv, i);
} else if ("-triggerBlockReport".equals(cmd)) {
exitCode = triggerBlockReport(argv);
} else if ("-help".equals(cmd)) {
if (i < argv.length) {
printHelp(argv[i]);
} else {
printHelp("");
}
} else {
exitCode = -1;
System.err.println(cmd.substring(1) + ": Unknown command");
printUsage("");
}
} catch (IllegalArgumentException arge) {
debugException = arge;
exitCode = -1;
System.err.println(cmd.substring(1) + ": " + arge.getLocalizedMessage());
printUsage(cmd);
} catch (RemoteException e) {
//
// This is a error returned by hadoop server. Print
// out the first line of the error message, ignore the stack trace.
exitCode = -1;
debugException = e;
try {
String[] content;
content = e.getLocalizedMessage().split("\n");
System.err.println(cmd.substring(1) + ": "
+ content[0]);
} catch (Exception ex) {
System.err.println(cmd.substring(1) + ": "
+ ex.getLocalizedMessage());
debugException = ex;
}
} catch (Exception e) {
exitCode = -1;
debugException = e;
System.err.println(cmd.substring(1) + ": "
+ e.getLocalizedMessage());
}
if (LOG.isDebugEnabled()) {
LOG.debug("Exception encountered:", debugException);
}
return exitCode;
}
private ClientDatanodeProtocol getDataNodeProxy(String datanode)
throws IOException {
InetSocketAddress datanodeAddr = NetUtils.createSocketAddr(datanode);
// Get the current configuration
Configuration conf = getConf();
// For datanode proxy the server principal should be DN's one.
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY,
conf.get(DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, ""));
// Create the client
ClientDatanodeProtocol dnProtocol =
DFSUtil.createClientDatanodeProtocolProxy(datanodeAddr, getUGI(), conf,
NetUtils.getSocketFactory(conf, ClientDatanodeProtocol.class));
return dnProtocol;
}
private int deleteBlockPool(String[] argv, int i) throws IOException {
ClientDatanodeProtocol dnProxy = getDataNodeProxy(argv[i]);
boolean force = false;
if (argv.length-1 == i+2) {
if ("force".equals(argv[i+2])) {
force = true;
} else {
printUsage("-deleteBlockPool");
return -1;
}
}
dnProxy.deleteBlockPool(argv[i+1], force);
return 0;
}
private int refreshNamenodes(String[] argv, int i) throws IOException {
String datanode = argv[i];
ClientDatanodeProtocol refreshProtocol = getDataNodeProxy(datanode);
refreshProtocol.refreshNamenodes();
return 0;
}
private int shutdownDatanode(String[] argv, int i) throws IOException {
final String dn = argv[i];
ClientDatanodeProtocol dnProxy = getDataNodeProxy(dn);
boolean upgrade = false;
if (argv.length-1 == i+1) {
if ("upgrade".equalsIgnoreCase(argv[i+1])) {
upgrade = true;
} else {
printUsage("-shutdownDatanode");
return -1;
}
}
dnProxy.shutdownDatanode(upgrade);
System.out.println("Submitted a shutdown request to datanode " + dn);
return 0;
}
private int getDatanodeInfo(String[] argv, int i) throws IOException {
ClientDatanodeProtocol dnProxy = getDataNodeProxy(argv[i]);
try {
DatanodeLocalInfo dnInfo = dnProxy.getDatanodeInfo();
System.out.println(dnInfo.getDatanodeLocalReport());
} catch (IOException ioe) {
System.err.println("Datanode unreachable.");
return -1;
}
return 0;
}
/**
* main() has some simple utility methods.
* @param argv Command line parameters.
* @exception Exception if the filesystem does not exist.
*/
public static void main(String[] argv) throws Exception {
int res = ToolRunner.run(new DFSAdmin(), argv);
System.exit(res);
}
}
| 77,472 | 37.6399 | 108 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CryptoAdmin.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools;
import java.io.IOException;
import java.util.LinkedList;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.tools.TableListing;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Tool;
/**
* This class implements crypto command-line operations.
*/
@InterfaceAudience.Private
public class CryptoAdmin extends Configured implements Tool {
public CryptoAdmin() {
this(null);
}
public CryptoAdmin(Configuration conf) {
super(conf);
}
@Override
public int run(String[] args) throws IOException {
if (args.length == 0) {
AdminHelper.printUsage(false, "crypto", COMMANDS);
return 1;
}
final AdminHelper.Command command = AdminHelper.determineCommand(args[0],
COMMANDS);
if (command == null) {
System.err.println("Can't understand command '" + args[0] + "'");
if (!args[0].startsWith("-")) {
System.err.println("Command names must start with dashes.");
}
AdminHelper.printUsage(false, "crypto", COMMANDS);
return 1;
}
final List<String> argsList = new LinkedList<String>();
for (int j = 1; j < args.length; j++) {
argsList.add(args[j]);
}
try {
return command.run(getConf(), argsList);
} catch (IllegalArgumentException e) {
System.err.println(prettifyException(e));
return -1;
}
}
public static void main(String[] argsArray) throws IOException {
final CryptoAdmin cryptoAdmin = new CryptoAdmin(new Configuration());
System.exit(cryptoAdmin.run(argsArray));
}
/**
* NN exceptions contain the stack trace as part of the exception message.
* When it's a known error, pretty-print the error and squish the stack trace.
*/
private static String prettifyException(Exception e) {
return e.getClass().getSimpleName() + ": " +
e.getLocalizedMessage().split("\n")[0];
}
private static class CreateZoneCommand implements AdminHelper.Command {
@Override
public String getName() {
return "-createZone";
}
@Override
public String getShortUsage() {
return "[" + getName() + " -keyName <keyName> -path <path>]\n";
}
@Override
public String getLongUsage() {
final TableListing listing = AdminHelper.getOptionDescriptionListing();
listing.addRow("<path>", "The path of the encryption zone to create. " +
"It must be an empty directory.");
listing.addRow("<keyName>", "Name of the key to use for the " +
"encryption zone.");
return getShortUsage() + "\n" +
"Create a new encryption zone.\n\n" +
listing.toString();
}
@Override
public int run(Configuration conf, List<String> args) throws IOException {
final String path = StringUtils.popOptionWithArgument("-path", args);
if (path == null) {
System.err.println("You must specify a path with -path.");
return 1;
}
final String keyName =
StringUtils.popOptionWithArgument("-keyName", args);
if (keyName == null) {
System.err.println("You must specify a key name with -keyName.");
return 1;
}
if (!args.isEmpty()) {
System.err.println("Can't understand argument: " + args.get(0));
return 1;
}
final DistributedFileSystem dfs = AdminHelper.getDFS(conf);
try {
dfs.createEncryptionZone(new Path(path), keyName);
System.out.println("Added encryption zone " + path);
} catch (IOException e) {
System.err.println(prettifyException(e));
return 2;
}
return 0;
}
}
private static class ListZonesCommand implements AdminHelper.Command {
@Override
public String getName() {
return "-listZones";
}
@Override
public String getShortUsage() {
return "[" + getName()+ "]\n";
}
@Override
public String getLongUsage() {
return getShortUsage() + "\n" +
"List all encryption zones. Requires superuser permissions.\n\n";
}
@Override
public int run(Configuration conf, List<String> args) throws IOException {
if (!args.isEmpty()) {
System.err.println("Can't understand argument: " + args.get(0));
return 1;
}
final DistributedFileSystem dfs = AdminHelper.getDFS(conf);
try {
final TableListing listing = new TableListing.Builder()
.addField("").addField("", true)
.wrapWidth(AdminHelper.MAX_LINE_WIDTH).hideHeaders().build();
final RemoteIterator<EncryptionZone> it = dfs.listEncryptionZones();
while (it.hasNext()) {
EncryptionZone ez = it.next();
listing.addRow(ez.getPath(), ez.getKeyName());
}
System.out.println(listing.toString());
} catch (IOException e) {
System.err.println(prettifyException(e));
return 2;
}
return 0;
}
}
private static final AdminHelper.Command[] COMMANDS = {
new CreateZoneCommand(),
new ListZonesCommand()
};
}
| 6,181 | 30.540816 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools;
import java.io.IOException;
import java.util.EnumSet;
import java.util.LinkedList;
import java.util.List;
import org.apache.commons.lang.WordUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.CacheFlag;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo.Expiration;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveStats;
import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.CachePoolStats;
import org.apache.hadoop.tools.TableListing;
import org.apache.hadoop.tools.TableListing.Justification;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Tool;
import com.google.common.base.Joiner;
/**
* This class implements command-line operations on the HDFS Cache.
*/
@InterfaceAudience.Private
public class CacheAdmin extends Configured implements Tool {
public CacheAdmin() {
this(null);
}
public CacheAdmin(Configuration conf) {
super(conf);
}
@Override
public int run(String[] args) throws IOException {
if (args.length == 0) {
AdminHelper.printUsage(false, "cacheadmin", COMMANDS);
return 1;
}
AdminHelper.Command command = AdminHelper.determineCommand(args[0],
COMMANDS);
if (command == null) {
System.err.println("Can't understand command '" + args[0] + "'");
if (!args[0].startsWith("-")) {
System.err.println("Command names must start with dashes.");
}
AdminHelper.printUsage(false, "cacheadmin", COMMANDS);
return 1;
}
List<String> argsList = new LinkedList<String>();
for (int j = 1; j < args.length; j++) {
argsList.add(args[j]);
}
try {
return command.run(getConf(), argsList);
} catch (IllegalArgumentException e) {
System.err.println(AdminHelper.prettifyException(e));
return -1;
}
}
public static void main(String[] argsArray) throws IOException {
CacheAdmin cacheAdmin = new CacheAdmin(new Configuration());
System.exit(cacheAdmin.run(argsArray));
}
private static CacheDirectiveInfo.Expiration parseExpirationString(String ttlString)
throws IOException {
CacheDirectiveInfo.Expiration ex = null;
if (ttlString != null) {
if (ttlString.equalsIgnoreCase("never")) {
ex = CacheDirectiveInfo.Expiration.NEVER;
} else {
long ttl = DFSUtil.parseRelativeTime(ttlString);
ex = CacheDirectiveInfo.Expiration.newRelative(ttl);
}
}
return ex;
}
private static class AddCacheDirectiveInfoCommand
implements AdminHelper.Command {
@Override
public String getName() {
return "-addDirective";
}
@Override
public String getShortUsage() {
return "[" + getName() +
" -path <path> -pool <pool-name> " +
"[-force] " +
"[-replication <replication>] [-ttl <time-to-live>]]\n";
}
@Override
public String getLongUsage() {
TableListing listing = AdminHelper.getOptionDescriptionListing();
listing.addRow("<path>", "A path to cache. The path can be " +
"a directory or a file.");
listing.addRow("<pool-name>", "The pool to which the directive will be " +
"added. You must have write permission on the cache pool "
+ "in order to add new directives.");
listing.addRow("-force",
"Skips checking of cache pool resource limits.");
listing.addRow("<replication>", "The cache replication factor to use. " +
"Defaults to 1.");
listing.addRow("<time-to-live>", "How long the directive is " +
"valid. Can be specified in minutes, hours, and days, e.g. " +
"30m, 4h, 2d. Valid units are [smhd]." +
" \"never\" indicates a directive that never expires." +
" If unspecified, the directive never expires.");
return getShortUsage() + "\n" +
"Add a new cache directive.\n\n" +
listing.toString();
}
@Override
public int run(Configuration conf, List<String> args) throws IOException {
CacheDirectiveInfo.Builder builder = new CacheDirectiveInfo.Builder();
String path = StringUtils.popOptionWithArgument("-path", args);
if (path == null) {
System.err.println("You must specify a path with -path.");
return 1;
}
builder.setPath(new Path(path));
String poolName = StringUtils.popOptionWithArgument("-pool", args);
if (poolName == null) {
System.err.println("You must specify a pool name with -pool.");
return 1;
}
builder.setPool(poolName);
boolean force = StringUtils.popOption("-force", args);
String replicationString =
StringUtils.popOptionWithArgument("-replication", args);
if (replicationString != null) {
Short replication = Short.parseShort(replicationString);
builder.setReplication(replication);
}
String ttlString = StringUtils.popOptionWithArgument("-ttl", args);
try {
Expiration ex = parseExpirationString(ttlString);
if (ex != null) {
builder.setExpiration(ex);
}
} catch (IOException e) {
System.err.println(
"Error while parsing ttl value: " + e.getMessage());
return 1;
}
if (!args.isEmpty()) {
System.err.println("Can't understand argument: " + args.get(0));
return 1;
}
DistributedFileSystem dfs = AdminHelper.getDFS(conf);
CacheDirectiveInfo directive = builder.build();
EnumSet<CacheFlag> flags = EnumSet.noneOf(CacheFlag.class);
if (force) {
flags.add(CacheFlag.FORCE);
}
try {
long id = dfs.addCacheDirective(directive, flags);
System.out.println("Added cache directive " + id);
} catch (IOException e) {
System.err.println(AdminHelper.prettifyException(e));
return 2;
}
return 0;
}
}
private static class RemoveCacheDirectiveInfoCommand
implements AdminHelper.Command {
@Override
public String getName() {
return "-removeDirective";
}
@Override
public String getShortUsage() {
return "[" + getName() + " <id>]\n";
}
@Override
public String getLongUsage() {
TableListing listing = AdminHelper.getOptionDescriptionListing();
listing.addRow("<id>", "The id of the cache directive to remove. " +
"You must have write permission on the pool of the " +
"directive in order to remove it. To see a list " +
"of cache directive IDs, use the -listDirectives command.");
return getShortUsage() + "\n" +
"Remove a cache directive.\n\n" +
listing.toString();
}
@Override
public int run(Configuration conf, List<String> args) throws IOException {
String idString= StringUtils.popFirstNonOption(args);
if (idString == null) {
System.err.println("You must specify a directive ID to remove.");
return 1;
}
long id;
try {
id = Long.parseLong(idString);
} catch (NumberFormatException e) {
System.err.println("Invalid directive ID " + idString + ": expected " +
"a numeric value.");
return 1;
}
if (id <= 0) {
System.err.println("Invalid directive ID " + id + ": ids must " +
"be greater than 0.");
return 1;
}
if (!args.isEmpty()) {
System.err.println("Can't understand argument: " + args.get(0));
System.err.println("Usage is " + getShortUsage());
return 1;
}
DistributedFileSystem dfs = AdminHelper.getDFS(conf);
try {
dfs.getClient().removeCacheDirective(id);
System.out.println("Removed cached directive " + id);
} catch (IOException e) {
System.err.println(AdminHelper.prettifyException(e));
return 2;
}
return 0;
}
}
private static class ModifyCacheDirectiveInfoCommand
implements AdminHelper.Command {
@Override
public String getName() {
return "-modifyDirective";
}
@Override
public String getShortUsage() {
return "[" + getName() +
" -id <id> [-path <path>] [-force] [-replication <replication>] " +
"[-pool <pool-name>] [-ttl <time-to-live>]]\n";
}
@Override
public String getLongUsage() {
TableListing listing = AdminHelper.getOptionDescriptionListing();
listing.addRow("<id>", "The ID of the directive to modify (required)");
listing.addRow("<path>", "A path to cache. The path can be " +
"a directory or a file. (optional)");
listing.addRow("-force",
"Skips checking of cache pool resource limits.");
listing.addRow("<replication>", "The cache replication factor to use. " +
"(optional)");
listing.addRow("<pool-name>", "The pool to which the directive will be " +
"added. You must have write permission on the cache pool "
+ "in order to move a directive into it. (optional)");
listing.addRow("<time-to-live>", "How long the directive is " +
"valid. Can be specified in minutes, hours, and days, e.g. " +
"30m, 4h, 2d. Valid units are [smhd]." +
" \"never\" indicates a directive that never expires.");
return getShortUsage() + "\n" +
"Modify a cache directive.\n\n" +
listing.toString();
}
@Override
public int run(Configuration conf, List<String> args) throws IOException {
CacheDirectiveInfo.Builder builder =
new CacheDirectiveInfo.Builder();
boolean modified = false;
String idString = StringUtils.popOptionWithArgument("-id", args);
if (idString == null) {
System.err.println("You must specify a directive ID with -id.");
return 1;
}
builder.setId(Long.parseLong(idString));
String path = StringUtils.popOptionWithArgument("-path", args);
if (path != null) {
builder.setPath(new Path(path));
modified = true;
}
boolean force = StringUtils.popOption("-force", args);
String replicationString =
StringUtils.popOptionWithArgument("-replication", args);
if (replicationString != null) {
builder.setReplication(Short.parseShort(replicationString));
modified = true;
}
String poolName =
StringUtils.popOptionWithArgument("-pool", args);
if (poolName != null) {
builder.setPool(poolName);
modified = true;
}
String ttlString = StringUtils.popOptionWithArgument("-ttl", args);
try {
Expiration ex = parseExpirationString(ttlString);
if (ex != null) {
builder.setExpiration(ex);
modified = true;
}
} catch (IOException e) {
System.err.println(
"Error while parsing ttl value: " + e.getMessage());
return 1;
}
if (!args.isEmpty()) {
System.err.println("Can't understand argument: " + args.get(0));
System.err.println("Usage is " + getShortUsage());
return 1;
}
if (!modified) {
System.err.println("No modifications were specified.");
return 1;
}
DistributedFileSystem dfs = AdminHelper.getDFS(conf);
EnumSet<CacheFlag> flags = EnumSet.noneOf(CacheFlag.class);
if (force) {
flags.add(CacheFlag.FORCE);
}
try {
dfs.modifyCacheDirective(builder.build(), flags);
System.out.println("Modified cache directive " + idString);
} catch (IOException e) {
System.err.println(AdminHelper.prettifyException(e));
return 2;
}
return 0;
}
}
private static class RemoveCacheDirectiveInfosCommand
implements AdminHelper.Command {
@Override
public String getName() {
return "-removeDirectives";
}
@Override
public String getShortUsage() {
return "[" + getName() + " -path <path>]\n";
}
@Override
public String getLongUsage() {
TableListing listing = AdminHelper.getOptionDescriptionListing();
listing.addRow("-path <path>", "The path of the cache directives to remove. " +
"You must have write permission on the pool of the directive in order " +
"to remove it. To see a list of cache directives, use the " +
"-listDirectives command.");
return getShortUsage() + "\n" +
"Remove every cache directive with the specified path.\n\n" +
listing.toString();
}
@Override
public int run(Configuration conf, List<String> args) throws IOException {
String path = StringUtils.popOptionWithArgument("-path", args);
if (path == null) {
System.err.println("You must specify a path with -path.");
return 1;
}
if (!args.isEmpty()) {
System.err.println("Can't understand argument: " + args.get(0));
System.err.println("Usage is " + getShortUsage());
return 1;
}
int exitCode = 0;
try {
DistributedFileSystem dfs = AdminHelper.getDFS(conf);
RemoteIterator<CacheDirectiveEntry> iter =
dfs.listCacheDirectives(
new CacheDirectiveInfo.Builder().
setPath(new Path(path)).build());
while (iter.hasNext()) {
CacheDirectiveEntry entry = iter.next();
try {
dfs.removeCacheDirective(entry.getInfo().getId());
System.out.println("Removed cache directive " +
entry.getInfo().getId());
} catch (IOException e) {
System.err.println(AdminHelper.prettifyException(e));
exitCode = 2;
}
}
} catch (IOException e) {
System.err.println(AdminHelper.prettifyException(e));
exitCode = 2;
}
if (exitCode == 0) {
System.out.println("Removed every cache directive with path " +
path);
}
return exitCode;
}
}
private static class ListCacheDirectiveInfoCommand
implements AdminHelper.Command {
@Override
public String getName() {
return "-listDirectives";
}
@Override
public String getShortUsage() {
return "[" + getName()
+ " [-stats] [-path <path>] [-pool <pool>] [-id <id>]\n";
}
@Override
public String getLongUsage() {
TableListing listing = AdminHelper.getOptionDescriptionListing();
listing.addRow("-stats", "List path-based cache directive statistics.");
listing.addRow("<path>", "List only " +
"cache directives with this path. " +
"Note that if there is a cache directive for <path> " +
"in a cache pool that we don't have read access for, it " +
"will not be listed.");
listing.addRow("<pool>", "List only path cache directives in that pool.");
listing.addRow("<id>", "List the cache directive with this id.");
return getShortUsage() + "\n" +
"List cache directives.\n\n" +
listing.toString();
}
@Override
public int run(Configuration conf, List<String> args) throws IOException {
CacheDirectiveInfo.Builder builder =
new CacheDirectiveInfo.Builder();
String pathFilter = StringUtils.popOptionWithArgument("-path", args);
if (pathFilter != null) {
builder.setPath(new Path(pathFilter));
}
String poolFilter = StringUtils.popOptionWithArgument("-pool", args);
if (poolFilter != null) {
builder.setPool(poolFilter);
}
boolean printStats = StringUtils.popOption("-stats", args);
String idFilter = StringUtils.popOptionWithArgument("-id", args);
if (idFilter != null) {
builder.setId(Long.parseLong(idFilter));
}
if (!args.isEmpty()) {
System.err.println("Can't understand argument: " + args.get(0));
return 1;
}
TableListing.Builder tableBuilder = new TableListing.Builder().
addField("ID", Justification.RIGHT).
addField("POOL", Justification.LEFT).
addField("REPL", Justification.RIGHT).
addField("EXPIRY", Justification.LEFT).
addField("PATH", Justification.LEFT);
if (printStats) {
tableBuilder.addField("BYTES_NEEDED", Justification.RIGHT).
addField("BYTES_CACHED", Justification.RIGHT).
addField("FILES_NEEDED", Justification.RIGHT).
addField("FILES_CACHED", Justification.RIGHT);
}
TableListing tableListing = tableBuilder.build();
try {
DistributedFileSystem dfs = AdminHelper.getDFS(conf);
RemoteIterator<CacheDirectiveEntry> iter =
dfs.listCacheDirectives(builder.build());
int numEntries = 0;
while (iter.hasNext()) {
CacheDirectiveEntry entry = iter.next();
CacheDirectiveInfo directive = entry.getInfo();
CacheDirectiveStats stats = entry.getStats();
List<String> row = new LinkedList<String>();
row.add("" + directive.getId());
row.add(directive.getPool());
row.add("" + directive.getReplication());
String expiry;
// This is effectively never, round for nice printing
if (directive.getExpiration().getMillis() >
Expiration.MAX_RELATIVE_EXPIRY_MS / 2) {
expiry = "never";
} else {
expiry = directive.getExpiration().toString();
}
row.add(expiry);
row.add(directive.getPath().toUri().getPath());
if (printStats) {
row.add("" + stats.getBytesNeeded());
row.add("" + stats.getBytesCached());
row.add("" + stats.getFilesNeeded());
row.add("" + stats.getFilesCached());
}
tableListing.addRow(row.toArray(new String[row.size()]));
numEntries++;
}
System.out.print(String.format("Found %d entr%s%n",
numEntries, numEntries == 1 ? "y" : "ies"));
if (numEntries > 0) {
System.out.print(tableListing);
}
} catch (IOException e) {
System.err.println(AdminHelper.prettifyException(e));
return 2;
}
return 0;
}
}
private static class AddCachePoolCommand implements AdminHelper.Command {
private static final String NAME = "-addPool";
@Override
public String getName() {
return NAME;
}
@Override
public String getShortUsage() {
return "[" + NAME + " <name> [-owner <owner>] " +
"[-group <group>] [-mode <mode>] [-limit <limit>] " +
"[-maxTtl <maxTtl>]\n";
}
@Override
public String getLongUsage() {
TableListing listing = AdminHelper.getOptionDescriptionListing();
listing.addRow("<name>", "Name of the new pool.");
listing.addRow("<owner>", "Username of the owner of the pool. " +
"Defaults to the current user.");
listing.addRow("<group>", "Group of the pool. " +
"Defaults to the primary group name of the current user.");
listing.addRow("<mode>", "UNIX-style permissions for the pool. " +
"Permissions are specified in octal, e.g. 0755. " +
"By default, this is set to " + String.format("0%03o",
FsPermission.getCachePoolDefault().toShort()) + ".");
listing.addRow("<limit>", "The maximum number of bytes that can be " +
"cached by directives in this pool, in aggregate. By default, " +
"no limit is set.");
listing.addRow("<maxTtl>", "The maximum allowed time-to-live for " +
"directives being added to the pool. This can be specified in " +
"seconds, minutes, hours, and days, e.g. 120s, 30m, 4h, 2d. " +
"Valid units are [smhd]. By default, no maximum is set. " +
"A value of \"never\" specifies that there is no limit.");
return getShortUsage() + "\n" +
"Add a new cache pool.\n\n" +
listing.toString();
}
@Override
public int run(Configuration conf, List<String> args) throws IOException {
String name = StringUtils.popFirstNonOption(args);
if (name == null) {
System.err.println("You must specify a name when creating a " +
"cache pool.");
return 1;
}
CachePoolInfo info = new CachePoolInfo(name);
String owner = StringUtils.popOptionWithArgument("-owner", args);
if (owner != null) {
info.setOwnerName(owner);
}
String group = StringUtils.popOptionWithArgument("-group", args);
if (group != null) {
info.setGroupName(group);
}
String modeString = StringUtils.popOptionWithArgument("-mode", args);
if (modeString != null) {
short mode = Short.parseShort(modeString, 8);
info.setMode(new FsPermission(mode));
}
String limitString = StringUtils.popOptionWithArgument("-limit", args);
Long limit = AdminHelper.parseLimitString(limitString);
if (limit != null) {
info.setLimit(limit);
}
String maxTtlString = StringUtils.popOptionWithArgument("-maxTtl", args);
try {
Long maxTtl = AdminHelper.parseTtlString(maxTtlString);
if (maxTtl != null) {
info.setMaxRelativeExpiryMs(maxTtl);
}
} catch (IOException e) {
System.err.println(
"Error while parsing maxTtl value: " + e.getMessage());
return 1;
}
if (!args.isEmpty()) {
System.err.print("Can't understand arguments: " +
Joiner.on(" ").join(args) + "\n");
System.err.println("Usage is " + getShortUsage());
return 1;
}
DistributedFileSystem dfs = AdminHelper.getDFS(conf);
try {
dfs.addCachePool(info);
} catch (IOException e) {
System.err.println(AdminHelper.prettifyException(e));
return 2;
}
System.out.println("Successfully added cache pool " + name + ".");
return 0;
}
}
private static class ModifyCachePoolCommand implements AdminHelper.Command {
@Override
public String getName() {
return "-modifyPool";
}
@Override
public String getShortUsage() {
return "[" + getName() + " <name> [-owner <owner>] " +
"[-group <group>] [-mode <mode>] [-limit <limit>] " +
"[-maxTtl <maxTtl>]]\n";
}
@Override
public String getLongUsage() {
TableListing listing = AdminHelper.getOptionDescriptionListing();
listing.addRow("<name>", "Name of the pool to modify.");
listing.addRow("<owner>", "Username of the owner of the pool");
listing.addRow("<group>", "Groupname of the group of the pool.");
listing.addRow("<mode>", "Unix-style permissions of the pool in octal.");
listing.addRow("<limit>", "Maximum number of bytes that can be cached " +
"by this pool.");
listing.addRow("<maxTtl>", "The maximum allowed time-to-live for " +
"directives being added to the pool.");
return getShortUsage() + "\n" +
WordUtils.wrap("Modifies the metadata of an existing cache pool. " +
"See usage of " + AddCachePoolCommand.NAME + " for more details.",
AdminHelper.MAX_LINE_WIDTH) + "\n\n" +
listing.toString();
}
@Override
public int run(Configuration conf, List<String> args) throws IOException {
String owner = StringUtils.popOptionWithArgument("-owner", args);
String group = StringUtils.popOptionWithArgument("-group", args);
String modeString = StringUtils.popOptionWithArgument("-mode", args);
Integer mode = (modeString == null) ?
null : Integer.parseInt(modeString, 8);
String limitString = StringUtils.popOptionWithArgument("-limit", args);
Long limit = AdminHelper.parseLimitString(limitString);
String maxTtlString = StringUtils.popOptionWithArgument("-maxTtl", args);
Long maxTtl;
try {
maxTtl = AdminHelper.parseTtlString(maxTtlString);
} catch (IOException e) {
System.err.println(
"Error while parsing maxTtl value: " + e.getMessage());
return 1;
}
String name = StringUtils.popFirstNonOption(args);
if (name == null) {
System.err.println("You must specify a name when creating a " +
"cache pool.");
return 1;
}
if (!args.isEmpty()) {
System.err.print("Can't understand arguments: " +
Joiner.on(" ").join(args) + "\n");
System.err.println("Usage is " + getShortUsage());
return 1;
}
boolean changed = false;
CachePoolInfo info = new CachePoolInfo(name);
if (owner != null) {
info.setOwnerName(owner);
changed = true;
}
if (group != null) {
info.setGroupName(group);
changed = true;
}
if (mode != null) {
info.setMode(new FsPermission(mode.shortValue()));
changed = true;
}
if (limit != null) {
info.setLimit(limit);
changed = true;
}
if (maxTtl != null) {
info.setMaxRelativeExpiryMs(maxTtl);
changed = true;
}
if (!changed) {
System.err.println("You must specify at least one attribute to " +
"change in the cache pool.");
return 1;
}
DistributedFileSystem dfs = AdminHelper.getDFS(conf);
try {
dfs.modifyCachePool(info);
} catch (IOException e) {
System.err.println(AdminHelper.prettifyException(e));
return 2;
}
System.out.print("Successfully modified cache pool " + name);
String prefix = " to have ";
if (owner != null) {
System.out.print(prefix + "owner name " + owner);
prefix = " and ";
}
if (group != null) {
System.out.print(prefix + "group name " + group);
prefix = " and ";
}
if (mode != null) {
System.out.print(prefix + "mode " + new FsPermission(mode.shortValue()));
prefix = " and ";
}
if (limit != null) {
System.out.print(prefix + "limit " + limit);
prefix = " and ";
}
if (maxTtl != null) {
System.out.print(prefix + "max time-to-live " + maxTtlString);
}
System.out.print("\n");
return 0;
}
}
private static class RemoveCachePoolCommand implements AdminHelper.Command {
@Override
public String getName() {
return "-removePool";
}
@Override
public String getShortUsage() {
return "[" + getName() + " <name>]\n";
}
@Override
public String getLongUsage() {
return getShortUsage() + "\n" +
WordUtils.wrap("Remove a cache pool. This also uncaches paths " +
"associated with the pool.\n\n", AdminHelper.MAX_LINE_WIDTH) +
"<name> Name of the cache pool to remove.\n";
}
@Override
public int run(Configuration conf, List<String> args) throws IOException {
String name = StringUtils.popFirstNonOption(args);
if (name == null) {
System.err.println("You must specify a name when deleting a " +
"cache pool.");
return 1;
}
if (!args.isEmpty()) {
System.err.print("Can't understand arguments: " +
Joiner.on(" ").join(args) + "\n");
System.err.println("Usage is " + getShortUsage());
return 1;
}
DistributedFileSystem dfs = AdminHelper.getDFS(conf);
try {
dfs.removeCachePool(name);
} catch (IOException e) {
System.err.println(AdminHelper.prettifyException(e));
return 2;
}
System.out.println("Successfully removed cache pool " + name + ".");
return 0;
}
}
private static class ListCachePoolsCommand implements AdminHelper.Command {
@Override
public String getName() {
return "-listPools";
}
@Override
public String getShortUsage() {
return "[" + getName() + " [-stats] [<name>]]\n";
}
@Override
public String getLongUsage() {
TableListing listing = AdminHelper.getOptionDescriptionListing();
listing.addRow("-stats", "Display additional cache pool statistics.");
listing.addRow("<name>", "If specified, list only the named cache pool.");
return getShortUsage() + "\n" +
WordUtils.wrap("Display information about one or more cache pools, " +
"e.g. name, owner, group, permissions, etc.",
AdminHelper.MAX_LINE_WIDTH) + "\n\n" + listing.toString();
}
@Override
public int run(Configuration conf, List<String> args) throws IOException {
String name = StringUtils.popFirstNonOption(args);
final boolean printStats = StringUtils.popOption("-stats", args);
if (!args.isEmpty()) {
System.err.print("Can't understand arguments: " +
Joiner.on(" ").join(args) + "\n");
System.err.println("Usage is " + getShortUsage());
return 1;
}
DistributedFileSystem dfs = AdminHelper.getDFS(conf);
TableListing.Builder builder = new TableListing.Builder().
addField("NAME", Justification.LEFT).
addField("OWNER", Justification.LEFT).
addField("GROUP", Justification.LEFT).
addField("MODE", Justification.LEFT).
addField("LIMIT", Justification.RIGHT).
addField("MAXTTL", Justification.RIGHT);
if (printStats) {
builder.
addField("BYTES_NEEDED", Justification.RIGHT).
addField("BYTES_CACHED", Justification.RIGHT).
addField("BYTES_OVERLIMIT", Justification.RIGHT).
addField("FILES_NEEDED", Justification.RIGHT).
addField("FILES_CACHED", Justification.RIGHT);
}
TableListing listing = builder.build();
int numResults = 0;
try {
RemoteIterator<CachePoolEntry> iter = dfs.listCachePools();
while (iter.hasNext()) {
CachePoolEntry entry = iter.next();
CachePoolInfo info = entry.getInfo();
LinkedList<String> row = new LinkedList<String>();
if (name == null || info.getPoolName().equals(name)) {
row.add(info.getPoolName());
row.add(info.getOwnerName());
row.add(info.getGroupName());
row.add(info.getMode() != null ? info.getMode().toString() : null);
Long limit = info.getLimit();
String limitString;
if (limit != null && limit.equals(CachePoolInfo.LIMIT_UNLIMITED)) {
limitString = "unlimited";
} else {
limitString = "" + limit;
}
row.add(limitString);
Long maxTtl = info.getMaxRelativeExpiryMs();
String maxTtlString = null;
if (maxTtl != null) {
if (maxTtl == CachePoolInfo.RELATIVE_EXPIRY_NEVER) {
maxTtlString = "never";
} else {
maxTtlString = DFSUtil.durationToString(maxTtl);
}
}
row.add(maxTtlString);
if (printStats) {
CachePoolStats stats = entry.getStats();
row.add(Long.toString(stats.getBytesNeeded()));
row.add(Long.toString(stats.getBytesCached()));
row.add(Long.toString(stats.getBytesOverlimit()));
row.add(Long.toString(stats.getFilesNeeded()));
row.add(Long.toString(stats.getFilesCached()));
}
listing.addRow(row.toArray(new String[row.size()]));
++numResults;
if (name != null) {
break;
}
}
}
} catch (IOException e) {
System.err.println(AdminHelper.prettifyException(e));
return 2;
}
System.out.print(String.format("Found %d result%s.%n", numResults,
(numResults == 1 ? "" : "s")));
if (numResults > 0) {
System.out.print(listing);
}
// If list pools succeed, we return 0 (success exit code)
return 0;
}
}
private static final AdminHelper.Command[] COMMANDS = {
new AddCacheDirectiveInfoCommand(),
new ModifyCacheDirectiveInfoCommand(),
new ListCacheDirectiveInfoCommand(),
new RemoveCacheDirectiveInfoCommand(),
new RemoveCacheDirectiveInfosCommand(),
new AddCachePoolCommand(),
new ModifyCachePoolCommand(),
new RemoveCachePoolCommand(),
new ListCachePoolsCommand()
};
}
| 33,809 | 35.083244 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetGroups.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools;
import java.io.IOException;
import java.io.PrintStream;
import java.net.InetSocketAddress;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.NameNodeProxies;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.tools.GetGroupsBase;
import org.apache.hadoop.tools.GetUserMappingsProtocol;
import org.apache.hadoop.util.ToolRunner;
/**
* HDFS implementation of a tool for getting the groups which a given user
* belongs to.
*/
@InterfaceAudience.Private
public class GetGroups extends GetGroupsBase {
private static final Log LOG = LogFactory.getLog(GetGroups.class);
static final String USAGE = "Usage: hdfs groups [username ...]";
static{
HdfsConfiguration.init();
}
public GetGroups(Configuration conf) {
super(conf);
}
public GetGroups(Configuration conf, PrintStream out) {
super(conf, out);
}
@Override
protected InetSocketAddress getProtocolAddress(Configuration conf)
throws IOException {
return NameNode.getAddress(conf);
}
@Override
public void setConf(Configuration conf) {
conf = new HdfsConfiguration(conf);
String nameNodePrincipal = conf.get(
DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, "");
if (LOG.isDebugEnabled()) {
LOG.debug("Using NN principal: " + nameNodePrincipal);
}
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY,
nameNodePrincipal);
super.setConf(conf);
}
@Override
protected GetUserMappingsProtocol getUgmProtocol() throws IOException {
return NameNodeProxies.createProxy(getConf(), FileSystem.getDefaultUri(getConf()),
GetUserMappingsProtocol.class).getProxy();
}
public static void main(String[] argv) throws Exception {
if (DFSUtil.parseHelpArgument(argv, USAGE, System.out, true)) {
System.exit(0);
}
int res = ToolRunner.run(new GetGroups(new HdfsConfiguration()), argv);
System.exit(res);
}
}
| 3,207 | 31.40404 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools;
import java.io.IOException;
import java.io.PrintStream;
import java.net.InetSocketAddress;
import java.security.PrivilegedExceptionAction;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.DFSUtil.ConfiguredNNAddress;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
/**
* Tool for getting configuration information from a configuration file.
*
* Adding more options:
* <ul>
* <li>
* If adding a simple option to get a value corresponding to a key in the
* configuration, use regular {@link GetConf.CommandHandler}.
* See {@link GetConf.Command#EXCLUDE_FILE} example.
* </li>
* <li>
* If adding an option that is does not return a value for a key, add
* a subclass of {@link GetConf.CommandHandler} and set it up in
* {@link GetConf.Command}.
*
* See {@link GetConf.Command#NAMENODE} for example.
*
* Add for the new option added, a map entry with the corresponding
* {@link GetConf.CommandHandler}.
* </ul>
*/
public class GetConf extends Configured implements Tool {
private static final String DESCRIPTION = "hdfs getconf is utility for "
+ "getting configuration information from the config file.\n";
enum Command {
NAMENODE("-namenodes", "gets list of namenodes in the cluster."),
SECONDARY("-secondaryNameNodes",
"gets list of secondary namenodes in the cluster."),
BACKUP("-backupNodes", "gets list of backup nodes in the cluster."),
INCLUDE_FILE("-includeFile",
"gets the include file path that defines the datanodes " +
"that can join the cluster."),
EXCLUDE_FILE("-excludeFile",
"gets the exclude file path that defines the datanodes " +
"that need to decommissioned."),
NNRPCADDRESSES("-nnRpcAddresses", "gets the namenode rpc addresses"),
CONFKEY("-confKey [key]", "gets a specific key from the configuration");
private static final Map<String, CommandHandler> map;
static {
map = new HashMap<String, CommandHandler>();
map.put(StringUtils.toLowerCase(NAMENODE.getName()),
new NameNodesCommandHandler());
map.put(StringUtils.toLowerCase(SECONDARY.getName()),
new SecondaryNameNodesCommandHandler());
map.put(StringUtils.toLowerCase(BACKUP.getName()),
new BackupNodesCommandHandler());
map.put(StringUtils.toLowerCase(INCLUDE_FILE.getName()),
new CommandHandler(DFSConfigKeys.DFS_HOSTS));
map.put(StringUtils.toLowerCase(EXCLUDE_FILE.getName()),
new CommandHandler(DFSConfigKeys.DFS_HOSTS_EXCLUDE));
map.put(StringUtils.toLowerCase(NNRPCADDRESSES.getName()),
new NNRpcAddressesCommandHandler());
map.put(StringUtils.toLowerCase(CONFKEY.getName()),
new PrintConfKeyCommandHandler());
}
private final String cmd;
private final String description;
Command(String cmd, String description) {
this.cmd = cmd;
this.description = description;
}
public String getName() {
return cmd.split(" ")[0];
}
public String getUsage() {
return cmd;
}
public String getDescription() {
return description;
}
public static CommandHandler getHandler(String cmd) {
return map.get(StringUtils.toLowerCase(cmd));
}
}
static final String USAGE;
static {
HdfsConfiguration.init();
/* Initialize USAGE based on Command values */
StringBuilder usage = new StringBuilder(DESCRIPTION);
usage.append("\nhadoop getconf \n");
for (Command cmd : Command.values()) {
usage.append("\t[" + cmd.getUsage() + "]\t\t\t" + cmd.getDescription()
+ "\n");
}
USAGE = usage.toString();
}
/**
* Handler to return value for key corresponding to the {@link Command}
*/
static class CommandHandler {
String key; // Configuration key to lookup
CommandHandler() {
this(null);
}
CommandHandler(String key) {
this.key = key;
}
final int doWork(GetConf tool, String[] args) {
try {
checkArgs(args);
return doWorkInternal(tool, args);
} catch (Exception e) {
tool.printError(e.getMessage());
}
return -1;
}
protected void checkArgs(String args[]) {
if (args.length > 0) {
throw new HadoopIllegalArgumentException(
"Did not expect argument: " + args[0]);
}
}
/** Method to be overridden by sub classes for specific behavior */
int doWorkInternal(GetConf tool, String[] args) throws Exception {
String value = tool.getConf().getTrimmed(key);
if (value != null) {
tool.printOut(value);
return 0;
}
tool.printError("Configuration " + key + " is missing.");
return -1;
}
}
/**
* Handler for {@link Command#NAMENODE}
*/
static class NameNodesCommandHandler extends CommandHandler {
@Override
int doWorkInternal(GetConf tool, String []args) throws IOException {
tool.printMap(DFSUtil.getNNServiceRpcAddressesForCluster(tool.getConf()));
return 0;
}
}
/**
* Handler for {@link Command#BACKUP}
*/
static class BackupNodesCommandHandler extends CommandHandler {
@Override
public int doWorkInternal(GetConf tool, String []args) throws IOException {
tool.printMap(DFSUtil.getBackupNodeAddresses(tool.getConf()));
return 0;
}
}
/**
* Handler for {@link Command#SECONDARY}
*/
static class SecondaryNameNodesCommandHandler extends CommandHandler {
@Override
public int doWorkInternal(GetConf tool, String []args) throws IOException {
tool.printMap(DFSUtil.getSecondaryNameNodeAddresses(tool.getConf()));
return 0;
}
}
/**
* Handler for {@link Command#NNRPCADDRESSES}
* If rpc addresses are defined in configuration, we return them. Otherwise,
* return empty string.
*/
static class NNRpcAddressesCommandHandler extends CommandHandler {
@Override
public int doWorkInternal(GetConf tool, String []args) throws IOException {
Configuration config = tool.getConf();
List<ConfiguredNNAddress> cnnlist = DFSUtil.flattenAddressMap(
DFSUtil.getNNServiceRpcAddressesForCluster(config));
if (!cnnlist.isEmpty()) {
for (ConfiguredNNAddress cnn : cnnlist) {
InetSocketAddress rpc = cnn.getAddress();
tool.printOut(rpc.getHostName()+":"+rpc.getPort());
}
return 0;
}
tool.printError("Did not get namenode service rpc addresses.");
return -1;
}
}
static class PrintConfKeyCommandHandler extends CommandHandler {
@Override
protected void checkArgs(String[] args) {
if (args.length != 1) {
throw new HadoopIllegalArgumentException(
"usage: " + Command.CONFKEY.getUsage());
}
}
@Override
int doWorkInternal(GetConf tool, String[] args) throws Exception {
this.key = args[0];
return super.doWorkInternal(tool, args);
}
}
private final PrintStream out; // Stream for printing command output
private final PrintStream err; // Stream for printing error
GetConf(Configuration conf) {
this(conf, System.out, System.err);
}
GetConf(Configuration conf, PrintStream out, PrintStream err) {
super(conf);
this.out = out;
this.err = err;
}
void printError(String message) {
err.println(message);
}
void printOut(String message) {
out.println(message);
}
void printMap(Map<String, Map<String, InetSocketAddress>> map) {
StringBuilder buffer = new StringBuilder();
List<ConfiguredNNAddress> cnns = DFSUtil.flattenAddressMap(map);
for (ConfiguredNNAddress cnn : cnns) {
InetSocketAddress address = cnn.getAddress();
if (buffer.length() > 0) {
buffer.append(" ");
}
buffer.append(address.getHostName());
}
printOut(buffer.toString());
}
private void printUsage() {
printError(USAGE);
}
/**
* Main method that runs the tool for given arguments.
* @param args arguments
* @return return status of the command
*/
private int doWork(String[] args) {
if (args.length >= 1) {
CommandHandler handler = Command.getHandler(args[0]);
if (handler != null) {
return handler.doWork(this,
Arrays.copyOfRange(args, 1, args.length));
}
}
printUsage();
return -1;
}
@Override
public int run(final String[] args) throws Exception {
try {
return UserGroupInformation.getCurrentUser().doAs(
new PrivilegedExceptionAction<Integer>() {
@Override
public Integer run() throws Exception {
return doWork(args);
}
});
} catch (InterruptedException e) {
throw new IOException(e);
}
}
public static void main(String[] args) throws Exception {
if (DFSUtil.parseHelpArgument(args, USAGE, System.out, true)) {
System.exit(0);
}
int res = ToolRunner.run(new GetConf(new HdfsConfiguration()), args);
System.exit(res);
}
}
| 10,390 | 29.925595 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/AdminHelper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools;
import com.google.common.base.Preconditions;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.tools.TableListing;
import java.io.IOException;
import java.util.List;
/**
* Helper methods for CacheAdmin/CryptoAdmin/StoragePolicyAdmin
*/
public class AdminHelper {
/**
* Maximum length for printed lines
*/
static final int MAX_LINE_WIDTH = 80;
static final String HELP_COMMAND_NAME = "-help";
static DistributedFileSystem getDFS(Configuration conf)
throws IOException {
FileSystem fs = FileSystem.get(conf);
if (!(fs instanceof DistributedFileSystem)) {
throw new IllegalArgumentException("FileSystem " + fs.getUri() +
" is not an HDFS file system");
}
return (DistributedFileSystem)fs;
}
/**
* NN exceptions contain the stack trace as part of the exception message.
* When it's a known error, pretty-print the error and squish the stack trace.
*/
static String prettifyException(Exception e) {
return e.getClass().getSimpleName() + ": "
+ e.getLocalizedMessage().split("\n")[0];
}
static TableListing getOptionDescriptionListing() {
return new TableListing.Builder()
.addField("").addField("", true)
.wrapWidth(MAX_LINE_WIDTH).hideHeaders().build();
}
/**
* Parses a time-to-live value from a string
* @return The ttl in milliseconds
* @throws IOException if it could not be parsed
*/
static Long parseTtlString(String maxTtlString) throws IOException {
Long maxTtl = null;
if (maxTtlString != null) {
if (maxTtlString.equalsIgnoreCase("never")) {
maxTtl = CachePoolInfo.RELATIVE_EXPIRY_NEVER;
} else {
maxTtl = DFSUtil.parseRelativeTime(maxTtlString);
}
}
return maxTtl;
}
static Long parseLimitString(String limitString) {
Long limit = null;
if (limitString != null) {
if (limitString.equalsIgnoreCase("unlimited")) {
limit = CachePoolInfo.LIMIT_UNLIMITED;
} else {
limit = Long.parseLong(limitString);
}
}
return limit;
}
static Command determineCommand(String commandName, Command[] commands) {
Preconditions.checkNotNull(commands);
if (HELP_COMMAND_NAME.equals(commandName)) {
return new HelpCommand(commands);
}
for (Command command : commands) {
if (command.getName().equals(commandName)) {
return command;
}
}
return null;
}
static void printUsage(boolean longUsage, String toolName,
Command[] commands) {
Preconditions.checkNotNull(commands);
System.err.println("Usage: bin/hdfs " + toolName + " [COMMAND]");
final HelpCommand helpCommand = new HelpCommand(commands);
for (AdminHelper.Command command : commands) {
if (longUsage) {
System.err.print(command.getLongUsage());
} else {
System.err.print(" " + command.getShortUsage());
}
}
System.err.print(longUsage ? helpCommand.getLongUsage() :
(" " + helpCommand.getShortUsage()));
System.err.println();
}
interface Command {
String getName();
String getShortUsage();
String getLongUsage();
int run(Configuration conf, List<String> args) throws IOException;
}
static class HelpCommand implements Command {
private final Command[] commands;
public HelpCommand(Command[] commands) {
Preconditions.checkNotNull(commands != null);
this.commands = commands;
}
@Override
public String getName() {
return HELP_COMMAND_NAME;
}
@Override
public String getShortUsage() {
return "[-help <command-name>]\n";
}
@Override
public String getLongUsage() {
final TableListing listing = AdminHelper.getOptionDescriptionListing();
listing.addRow("<command-name>", "The command for which to get " +
"detailed help. If no command is specified, print detailed help for " +
"all commands");
return getShortUsage() + "\n" +
"Get detailed help about a command.\n\n" +
listing.toString();
}
@Override
public int run(Configuration conf, List<String> args) throws IOException {
if (args.size() == 0) {
for (AdminHelper.Command command : commands) {
System.err.println(command.getLongUsage());
}
return 0;
}
if (args.size() != 1) {
System.out.println("You must give exactly one argument to -help.");
return 0;
}
final String commandName = args.get(0);
// prepend a dash to match against the command names
final AdminHelper.Command command = AdminHelper
.determineCommand("-" + commandName, commands);
if (command == null) {
System.err.print("Unknown command '" + commandName + "'.\n");
System.err.print("Valid help command names are:\n");
String separator = "";
for (AdminHelper.Command c : commands) {
System.err.print(separator + c.getName().substring(1));
separator = ", ";
}
System.err.print("\n");
return 1;
}
System.err.print(command.getLongUsage());
return 0;
}
}
}
| 6,219 | 31.227979 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools;
import java.net.InetSocketAddress;
import java.util.Map;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ha.BadFencingConfigurationException;
import org.apache.hadoop.ha.HAServiceTarget;
import org.apache.hadoop.ha.NodeFencer;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.net.NetUtils;
import com.google.common.base.Preconditions;
/**
* One of the NN NameNodes acting as the target of an administrative command
* (e.g. failover).
*/
@InterfaceAudience.Private
public class NNHAServiceTarget extends HAServiceTarget {
// Keys added to the fencing script environment
private static final String NAMESERVICE_ID_KEY = "nameserviceid";
private static final String NAMENODE_ID_KEY = "namenodeid";
private final InetSocketAddress addr;
private InetSocketAddress zkfcAddr;
private NodeFencer fencer;
private BadFencingConfigurationException fenceConfigError;
private final String nnId;
private final String nsId;
private final boolean autoFailoverEnabled;
public NNHAServiceTarget(Configuration conf,
String nsId, String nnId) {
Preconditions.checkNotNull(nnId);
if (nsId == null) {
nsId = DFSUtil.getOnlyNameServiceIdOrNull(conf);
if (nsId == null) {
throw new IllegalArgumentException(
"Unable to determine the nameservice id.");
}
}
assert nsId != null;
// Make a copy of the conf, and override configs based on the
// target node -- not the node we happen to be running on.
HdfsConfiguration targetConf = new HdfsConfiguration(conf);
NameNode.initializeGenericKeys(targetConf, nsId, nnId);
String serviceAddr =
DFSUtil.getNamenodeServiceAddr(targetConf, nsId, nnId);
if (serviceAddr == null) {
throw new IllegalArgumentException(
"Unable to determine service address for namenode '" + nnId + "'");
}
this.addr = NetUtils.createSocketAddr(serviceAddr,
NameNode.DEFAULT_PORT);
this.autoFailoverEnabled = targetConf.getBoolean(
DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY,
DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_DEFAULT);
if (autoFailoverEnabled) {
int port = DFSZKFailoverController.getZkfcPort(targetConf);
if (port != 0) {
setZkfcPort(port);
}
}
try {
this.fencer = NodeFencer.create(targetConf,
DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY);
} catch (BadFencingConfigurationException e) {
this.fenceConfigError = e;
}
this.nnId = nnId;
this.nsId = nsId;
}
/**
* @return the NN's IPC address.
*/
@Override
public InetSocketAddress getAddress() {
return addr;
}
@Override
public InetSocketAddress getZKFCAddress() {
Preconditions.checkState(autoFailoverEnabled,
"ZKFC address not relevant when auto failover is off");
assert zkfcAddr != null;
return zkfcAddr;
}
void setZkfcPort(int port) {
assert autoFailoverEnabled;
this.zkfcAddr = new InetSocketAddress(addr.getAddress(), port);
}
@Override
public void checkFencingConfigured() throws BadFencingConfigurationException {
if (fenceConfigError != null) {
throw fenceConfigError;
}
if (fencer == null) {
throw new BadFencingConfigurationException(
"No fencer configured for " + this);
}
}
@Override
public NodeFencer getFencer() {
return fencer;
}
@Override
public String toString() {
return "NameNode at " + addr;
}
public String getNameServiceId() {
return this.nsId;
}
public String getNameNodeId() {
return this.nnId;
}
@Override
protected void addFencingParameters(Map<String, String> ret) {
super.addFencingParameters(ret);
ret.put(NAMESERVICE_ID_KEY, getNameServiceId());
ret.put(NAMENODE_ID_KEY, getNameNodeId());
}
@Override
public boolean isAutoFailoverEnabled() {
return autoFailoverEnabled;
}
}
| 5,034 | 28.970238 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/JMXGet.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools;
import java.lang.management.ManagementFactory;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Set;
import java.util.TreeSet;
import java.util.regex.Pattern;
import javax.management.AttributeNotFoundException;
import javax.management.MBeanAttributeInfo;
import javax.management.MBeanInfo;
import javax.management.MBeanServerConnection;
import javax.management.ObjectName;
import javax.management.ReflectionException;
import javax.management.remote.JMXConnector;
import javax.management.remote.JMXConnectorFactory;
import javax.management.remote.JMXServiceURL;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.GnuParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.OptionBuilder;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.util.ExitUtil;
/**
* tool to get data from NameNode or DataNode using MBeans currently the
* following MBeans are available (under hadoop domain):
* hadoop:service=NameNode,name=FSNamesystemState (static)
* hadoop:service=NameNode,name=NameNodeActivity (dynamic)
* hadoop:service=NameNode,name=RpcActivityForPort9000 (dynamic)
* hadoop:service=DataNode,name=RpcActivityForPort50020 (dynamic)
* hadoop:name=service=DataNode,FSDatasetState-UndefinedStorageId663800459
* (static)
* hadoop:service=DataNode,name=DataNodeActivity-UndefinedStorageId-520845215
* (dynamic)
*
*
* implementation note: all logging is sent to System.err (since it is a command
* line tool)
*/
@InterfaceAudience.Private
public class JMXGet {
private static final String format = "%s=%s%n";
private ArrayList<ObjectName> hadoopObjectNames;
private MBeanServerConnection mbsc;
private String service = "NameNode", port = "", server = "localhost";
private String localVMUrl = null;
public JMXGet() {
}
public void setService(String service) {
this.service = service;
}
public void setPort(String port) {
this.port = port;
}
public void setServer(String server) {
this.server = server;
}
public void setLocalVMUrl(String url) {
this.localVMUrl = url;
}
/**
* print all attributes' values
*/
public void printAllValues() throws Exception {
err("List of all the available keys:");
Object val = null;
for (ObjectName oname : hadoopObjectNames) {
err(">>>>>>>>jmx name: " + oname.getCanonicalKeyPropertyListString());
MBeanInfo mbinfo = mbsc.getMBeanInfo(oname);
MBeanAttributeInfo[] mbinfos = mbinfo.getAttributes();
for (MBeanAttributeInfo mb : mbinfos) {
val = mbsc.getAttribute(oname, mb.getName());
System.out.format(format, mb.getName(), (val==null)?"":val.toString());
}
}
}
public void printAllMatchedAttributes(String attrRegExp) throws Exception {
err("List of the keys matching " + attrRegExp + " :");
Object val = null;
Pattern p = Pattern.compile(attrRegExp);
for (ObjectName oname : hadoopObjectNames) {
err(">>>>>>>>jmx name: " + oname.getCanonicalKeyPropertyListString());
MBeanInfo mbinfo = mbsc.getMBeanInfo(oname);
MBeanAttributeInfo[] mbinfos = mbinfo.getAttributes();
for (MBeanAttributeInfo mb : mbinfos) {
if (p.matcher(mb.getName()).lookingAt()) {
val = mbsc.getAttribute(oname, mb.getName());
System.out.format(format, mb.getName(), (val == null) ? "" : val.toString());
}
}
}
}
/**
* get single value by key
*/
public String getValue(String key) throws Exception {
Object val = null;
for (ObjectName oname : hadoopObjectNames) {
try {
val = mbsc.getAttribute(oname, key);
} catch (AttributeNotFoundException anfe) {
/* just go to the next */
continue;
} catch (ReflectionException re) {
if (re.getCause() instanceof NoSuchMethodException) {
continue;
}
}
err("Info: key = " + key + "; val = " +
(val == null ? "null" : val.getClass()) + ":" + val);
break;
}
return (val == null) ? "" : val.toString();
}
/**
* @throws Exception
* initializes MBeanServer
*/
public void init() throws Exception {
err("init: server=" + server + ";port=" + port + ";service=" + service
+ ";localVMUrl=" + localVMUrl);
String url_string = null;
// build connection url
if (localVMUrl != null) {
// use
// jstat -snap <vmpid> | grep sun.management.JMXConnectorServer.address
// to get url
url_string = localVMUrl;
err("url string for local pid = " + localVMUrl + " = " + url_string);
} else if (!port.isEmpty() && !server.isEmpty()) {
// using server and port
url_string = "service:jmx:rmi:///jndi/rmi://" + server + ":" + port
+ "/jmxrmi";
} // else url stays null
// Create an RMI connector client and
// connect it to the RMI connector server
if (url_string == null) { // assume local vm (for example for Testing)
mbsc = ManagementFactory.getPlatformMBeanServer();
} else {
JMXServiceURL url = new JMXServiceURL(url_string);
err("Create RMI connector and connect to the RMI connector server" + url);
JMXConnector jmxc = JMXConnectorFactory.connect(url, null);
// Get an MBeanServerConnection
//
err("\nGet an MBeanServerConnection");
mbsc = jmxc.getMBeanServerConnection();
}
// Get domains from MBeanServer
//
err("\nDomains:");
String domains[] = mbsc.getDomains();
Arrays.sort(domains);
for (String domain : domains) {
err("\tDomain = " + domain);
}
// Get MBeanServer's default domain
//
err("\nMBeanServer default domain = " + mbsc.getDefaultDomain());
// Get MBean count
//
err("\nMBean count = " + mbsc.getMBeanCount());
// Query MBean names for specific domain "hadoop" and service
ObjectName query = new ObjectName("Hadoop:service=" + service + ",*");
hadoopObjectNames = new ArrayList<ObjectName>(5);
err("\nQuery MBeanServer MBeans:");
Set<ObjectName> names = new TreeSet<ObjectName>(mbsc
.queryNames(query, null));
for (ObjectName name : names) {
hadoopObjectNames.add(name);
err("Hadoop service: " + name);
}
}
/**
* Print JMXGet usage information
*/
static void printUsage(Options opts) {
HelpFormatter formatter = new HelpFormatter();
formatter.printHelp("jmxget options are: ", opts);
}
/**
* @param msg error message
*/
private static void err(String msg) {
System.err.println(msg);
}
/**
* parse args
*/
private static CommandLine parseArgs(Options opts, String... args)
throws IllegalArgumentException {
OptionBuilder.withArgName("NameNode|DataNode");
OptionBuilder.hasArg();
OptionBuilder.withDescription("specify jmx service (NameNode by default)");
Option jmx_service = OptionBuilder.create("service");
OptionBuilder.withArgName("mbean server");
OptionBuilder.hasArg();
OptionBuilder
.withDescription("specify mbean server (localhost by default)");
Option jmx_server = OptionBuilder.create("server");
OptionBuilder.withDescription("print help");
Option jmx_help = OptionBuilder.create("help");
OptionBuilder.withArgName("mbean server port");
OptionBuilder.hasArg();
OptionBuilder.withDescription("specify mbean server port, "
+ "if missing - it will try to connect to MBean Server in the same VM");
Option jmx_port = OptionBuilder.create("port");
OptionBuilder.withArgName("VM's connector url");
OptionBuilder.hasArg();
OptionBuilder.withDescription("connect to the VM on the same machine;"
+ "\n use:\n jstat -J-Djstat.showUnsupported=true -snap <vmpid> | "
+ "grep sun.management.JMXConnectorServer.address\n "
+ "to find the url");
Option jmx_localVM = OptionBuilder.create("localVM");
opts.addOption(jmx_server);
opts.addOption(jmx_help);
opts.addOption(jmx_service);
opts.addOption(jmx_port);
opts.addOption(jmx_localVM);
CommandLine commandLine = null;
CommandLineParser parser = new GnuParser();
try {
commandLine = parser.parse(opts, args, true);
} catch (ParseException e) {
printUsage(opts);
throw new IllegalArgumentException("invalid args: " + e.getMessage());
}
return commandLine;
}
public static void main(String[] args) {
int res = -1;
// parse arguments
Options opts = new Options();
CommandLine commandLine = null;
try {
commandLine = parseArgs(opts, args);
} catch (IllegalArgumentException iae) {
commandLine = null;
}
if (commandLine == null) {
// invalid arguments
err("Invalid args");
printUsage(opts);
ExitUtil.terminate(-1);
}
JMXGet jm = new JMXGet();
if (commandLine.hasOption("port")) {
jm.setPort(commandLine.getOptionValue("port"));
}
if (commandLine.hasOption("service")) {
jm.setService(commandLine.getOptionValue("service"));
}
if (commandLine.hasOption("server")) {
jm.setServer(commandLine.getOptionValue("server"));
}
if (commandLine.hasOption("localVM")) {
// from the file /tmp/hsperfdata*
jm.setLocalVMUrl(commandLine.getOptionValue("localVM"));
}
if (commandLine.hasOption("help")) {
printUsage(opts);
ExitUtil.terminate(0);
}
// rest of args
args = commandLine.getArgs();
try {
jm.init();
if (args.length == 0) {
jm.printAllValues();
} else {
for (String key : args) {
err("key = " + key);
String val = jm.getValue(key);
if (val != null)
System.out.format(JMXGet.format, key, val);
}
}
res = 0;
} catch (Exception re) {
re.printStackTrace();
res = -1;
}
ExitUtil.terminate(res);
}
}
| 10,981 | 29.421053 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.PrintStream;
import java.net.URI;
import java.net.URL;
import java.net.URLConnection;
import java.net.URLEncoder;
import java.security.PrivilegedExceptionAction;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HAUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck;
import org.apache.hadoop.hdfs.web.URLConnectionFactory;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
/**
* This class provides rudimentary checking of DFS volumes for errors and
* sub-optimal conditions.
* <p>The tool scans all files and directories, starting from an indicated
* root path. The following abnormal conditions are detected and handled:</p>
* <ul>
* <li>files with blocks that are completely missing from all datanodes.<br/>
* In this case the tool can perform one of the following actions:
* <ul>
* <li>none ({@link org.apache.hadoop.hdfs.server.namenode.NamenodeFsck#FIXING_NONE})</li>
* <li>move corrupted files to /lost+found directory on DFS
* ({@link org.apache.hadoop.hdfs.server.namenode.NamenodeFsck#FIXING_MOVE}). Remaining data blocks are saved as a
* block chains, representing longest consecutive series of valid blocks.</li>
* <li>delete corrupted files ({@link org.apache.hadoop.hdfs.server.namenode.NamenodeFsck#FIXING_DELETE})</li>
* </ul>
* </li>
* <li>detect files with under-replicated or over-replicated blocks</li>
* </ul>
* Additionally, the tool collects a detailed overall DFS statistics, and
* optionally can print detailed statistics on block locations and replication
* factors of each file.
* The tool also provides and option to filter open files during the scan.
*
*/
@InterfaceAudience.Private
public class DFSck extends Configured implements Tool {
static{
HdfsConfiguration.init();
}
private static final String USAGE = "Usage: hdfs fsck <path> "
+ "[-list-corruptfileblocks | "
+ "[-move | -delete | -openforwrite] "
+ "[-files [-blocks [-locations | -racks | -replicaDetails]]]] "
+ "[-includeSnapshots] "
+ "[-storagepolicies] [-blockId <blk_Id>]\n"
+ "\t<path>\tstart checking from this path\n"
+ "\t-move\tmove corrupted files to /lost+found\n"
+ "\t-delete\tdelete corrupted files\n"
+ "\t-files\tprint out files being checked\n"
+ "\t-openforwrite\tprint out files opened for write\n"
+ "\t-includeSnapshots\tinclude snapshot data if the given path"
+ " indicates a snapshottable directory or there are "
+ "snapshottable directories under it\n"
+ "\t-list-corruptfileblocks\tprint out list of missing "
+ "blocks and files they belong to\n"
+ "\t-files -blocks\tprint out block report\n"
+ "\t-files -blocks -locations\tprint out locations for every block\n"
+ "\t-files -blocks -racks"
+ "\tprint out network topology for data-node locations\n"
+ "\t-files -blocks -replicaDetails\tprint out each replica details \n"
+ "\t-storagepolicies\tprint out storage policy summary for the blocks\n"
+ "\t-blockId\tprint out which file this blockId belongs to, locations"
+ " (nodes, racks) of this block, and other diagnostics info"
+ " (under replicated, corrupted or not, etc)\n\n"
+ "Please Note:\n"
+ "\t1. By default fsck ignores files opened for write, "
+ "use -openforwrite to report such files. They are usually "
+ " tagged CORRUPT or HEALTHY depending on their block "
+ "allocation status\n"
+ "\t2. Option -includeSnapshots should not be used for comparing stats,"
+ " should be used only for HEALTH check, as this may contain duplicates"
+ " if the same file present in both original fs tree "
+ "and inside snapshots.";
private final UserGroupInformation ugi;
private final PrintStream out;
private final URLConnectionFactory connectionFactory;
private final boolean isSpnegoEnabled;
/**
* Filesystem checker.
* @param conf current Configuration
*/
public DFSck(Configuration conf) throws IOException {
this(conf, System.out);
}
public DFSck(Configuration conf, PrintStream out) throws IOException {
super(conf);
this.ugi = UserGroupInformation.getCurrentUser();
this.out = out;
this.connectionFactory = URLConnectionFactory
.newDefaultURLConnectionFactory(conf);
this.isSpnegoEnabled = UserGroupInformation.isSecurityEnabled();
}
/**
* Print fsck usage information
*/
static void printUsage(PrintStream out) {
out.println(USAGE + "\n");
ToolRunner.printGenericCommandUsage(out);
}
@Override
public int run(final String[] args) throws IOException {
if (args.length == 0) {
printUsage(System.err);
return -1;
}
try {
return UserGroupInformation.getCurrentUser().doAs(
new PrivilegedExceptionAction<Integer>() {
@Override
public Integer run() throws Exception {
return doWork(args);
}
});
} catch (InterruptedException e) {
throw new IOException(e);
}
}
/*
* To get the list, we need to call iteratively until the server says
* there is no more left.
*/
private Integer listCorruptFileBlocks(String dir, String baseUrl)
throws IOException {
int errCode = -1;
int numCorrupt = 0;
int cookie = 0;
final String noCorruptLine = "has no CORRUPT files";
final String noMoreCorruptLine = "has no more CORRUPT files";
final String cookiePrefix = "Cookie:";
boolean allDone = false;
while (!allDone) {
final StringBuffer url = new StringBuffer(baseUrl);
if (cookie > 0) {
url.append("&startblockafter=").append(String.valueOf(cookie));
}
URL path = new URL(url.toString());
URLConnection connection;
try {
connection = connectionFactory.openConnection(path, isSpnegoEnabled);
} catch (AuthenticationException e) {
throw new IOException(e);
}
InputStream stream = connection.getInputStream();
BufferedReader input = new BufferedReader(new InputStreamReader(
stream, "UTF-8"));
try {
String line = null;
while ((line = input.readLine()) != null) {
if (line.startsWith(cookiePrefix)){
try{
cookie = Integer.parseInt(line.split("\t")[1]);
} catch (Exception e){
allDone = true;
break;
}
continue;
}
if ((line.endsWith(noCorruptLine)) ||
(line.endsWith(noMoreCorruptLine)) ||
(line.endsWith(NamenodeFsck.NONEXISTENT_STATUS))) {
allDone = true;
break;
}
if ((line.isEmpty())
|| (line.startsWith("FSCK started by"))
|| (line.startsWith("The filesystem under path")))
continue;
numCorrupt++;
if (numCorrupt == 1) {
out.println("The list of corrupt files under path '"
+ dir + "' are:");
}
out.println(line);
}
} finally {
input.close();
}
}
out.println("The filesystem under path '" + dir + "' has "
+ numCorrupt + " CORRUPT files");
if (numCorrupt == 0)
errCode = 0;
return errCode;
}
private Path getResolvedPath(String dir) throws IOException {
Configuration conf = getConf();
Path dirPath = new Path(dir);
FileSystem fs = dirPath.getFileSystem(conf);
return fs.resolvePath(dirPath);
}
/**
* Derive the namenode http address from the current file system,
* either default or as set by "-fs" in the generic options.
* @return Returns http address or null if failure.
* @throws IOException if we can't determine the active NN address
*/
private URI getCurrentNamenodeAddress(Path target) throws IOException {
//String nnAddress = null;
Configuration conf = getConf();
//get the filesystem object to verify it is an HDFS system
final FileSystem fs = target.getFileSystem(conf);
if (!(fs instanceof DistributedFileSystem)) {
System.err.println("FileSystem is " + fs.getUri());
return null;
}
return DFSUtil.getInfoServer(HAUtil.getAddressOfActive(fs), conf,
DFSUtil.getHttpClientScheme(conf));
}
private int doWork(final String[] args) throws IOException {
final StringBuilder url = new StringBuilder();
url.append("/fsck?ugi=").append(ugi.getShortUserName());
String dir = null;
boolean doListCorruptFileBlocks = false;
for (int idx = 0; idx < args.length; idx++) {
if (args[idx].equals("-move")) { url.append("&move=1"); }
else if (args[idx].equals("-delete")) { url.append("&delete=1"); }
else if (args[idx].equals("-files")) { url.append("&files=1"); }
else if (args[idx].equals("-openforwrite")) { url.append("&openforwrite=1"); }
else if (args[idx].equals("-blocks")) { url.append("&blocks=1"); }
else if (args[idx].equals("-locations")) { url.append("&locations=1"); }
else if (args[idx].equals("-racks")) { url.append("&racks=1"); }
else if (args[idx].equals("-replicaDetails")) {
url.append("&replicadetails=1");
}
else if (args[idx].equals("-storagepolicies")) { url.append("&storagepolicies=1"); }
else if (args[idx].equals("-list-corruptfileblocks")) {
url.append("&listcorruptfileblocks=1");
doListCorruptFileBlocks = true;
} else if (args[idx].equals("-includeSnapshots")) {
url.append("&includeSnapshots=1");
} else if (args[idx].equals("-blockId")) {
StringBuilder sb = new StringBuilder();
idx++;
while(idx < args.length && !args[idx].startsWith("-")){
sb.append(args[idx]);
sb.append(" ");
idx++;
}
url.append("&blockId=").append(URLEncoder.encode(sb.toString(), "UTF-8"));
} else if (!args[idx].startsWith("-")) {
if (null == dir) {
dir = args[idx];
} else {
System.err.println("fsck: can only operate on one path at a time '"
+ args[idx] + "'");
printUsage(System.err);
return -1;
}
} else {
System.err.println("fsck: Illegal option '" + args[idx] + "'");
printUsage(System.err);
return -1;
}
}
if (null == dir) {
dir = "/";
}
Path dirpath = null;
URI namenodeAddress = null;
try {
dirpath = getResolvedPath(dir);
namenodeAddress = getCurrentNamenodeAddress(dirpath);
} catch (IOException ioe) {
System.err.println("FileSystem is inaccessible due to:\n"
+ StringUtils.stringifyException(ioe));
}
if (namenodeAddress == null) {
//Error message already output in {@link #getCurrentNamenodeAddress()}
System.err.println("DFSck exiting.");
return 0;
}
url.insert(0, namenodeAddress.toString());
url.append("&path=").append(URLEncoder.encode(
Path.getPathWithoutSchemeAndAuthority(dirpath).toString(), "UTF-8"));
System.err.println("Connecting to namenode via " + url.toString());
if (doListCorruptFileBlocks) {
return listCorruptFileBlocks(dir, url.toString());
}
URL path = new URL(url.toString());
URLConnection connection;
try {
connection = connectionFactory.openConnection(path, isSpnegoEnabled);
} catch (AuthenticationException e) {
throw new IOException(e);
}
InputStream stream = connection.getInputStream();
BufferedReader input = new BufferedReader(new InputStreamReader(
stream, "UTF-8"));
String line = null;
String lastLine = null;
int errCode = -1;
try {
while ((line = input.readLine()) != null) {
out.println(line);
lastLine = line;
}
} finally {
input.close();
}
if (lastLine.endsWith(NamenodeFsck.HEALTHY_STATUS)) {
errCode = 0;
} else if (lastLine.endsWith(NamenodeFsck.CORRUPT_STATUS)) {
errCode = 1;
} else if (lastLine.endsWith(NamenodeFsck.NONEXISTENT_STATUS)) {
errCode = 0;
} else if (lastLine.contains("Incorrect blockId format:")) {
errCode = 0;
} else if (lastLine.endsWith(NamenodeFsck.DECOMMISSIONED_STATUS)) {
errCode = 2;
} else if (lastLine.endsWith(NamenodeFsck.DECOMMISSIONING_STATUS)) {
errCode = 3;
}
return errCode;
}
public static void main(String[] args) throws Exception {
// -files option is also used by GenericOptionsParser
// Make sure that is not the first argument for fsck
int res = -1;
if ((args.length == 0) || ("-files".equals(args[0]))) {
printUsage(System.err);
} else if (DFSUtil.parseHelpArgument(args, USAGE, System.out, true)) {
res = 0;
} else {
res = ToolRunner.run(new DFSck(new HdfsConfiguration()), args);
}
System.exit(res);
}
}
| 14,502 | 36.572539 | 119 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/HDFSConcat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DistributedFileSystem;
@InterfaceAudience.Private
public class HDFSConcat {
private final static String def_uri = "hdfs://localhost:9000";
public static void main(String... args) throws IOException {
if(args.length < 2) {
System.err.println("Usage HDFSConcat target srcs..");
System.exit(0);
}
Configuration conf = new Configuration();
String uri = conf.get("fs.default.name", def_uri);
Path path = new Path(uri);
DistributedFileSystem dfs =
(DistributedFileSystem)FileSystem.get(path.toUri(), conf);
Path [] srcs = new Path[args.length-1];
for(int i=1; i<args.length; i++) {
srcs[i-1] = new Path(args[i]);
}
dfs.concat(new Path(args[0]), srcs);
}
}
| 1,820 | 32.109091 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.tools.TableListing;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Tool;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collection;
import java.util.LinkedList;
import java.util.List;
/**
* This class implements block storage policy operations.
*/
public class StoragePolicyAdmin extends Configured implements Tool {
public static void main(String[] argsArray) throws Exception {
final StoragePolicyAdmin admin = new StoragePolicyAdmin(new
Configuration());
System.exit(admin.run(argsArray));
}
public StoragePolicyAdmin(Configuration conf) {
super(conf);
}
@Override
public int run(String[] args) throws Exception {
if (args.length == 0) {
AdminHelper.printUsage(false, "storagepolicies", COMMANDS);
return 1;
}
final AdminHelper.Command command = AdminHelper.determineCommand(args[0],
COMMANDS);
if (command == null) {
System.err.println("Can't understand command '" + args[0] + "'");
if (!args[0].startsWith("-")) {
System.err.println("Command names must start with dashes.");
}
AdminHelper.printUsage(false, "storagepolicies", COMMANDS);
return 1;
}
final List<String> argsList = new LinkedList<>();
argsList.addAll(Arrays.asList(args).subList(1, args.length));
try {
return command.run(getConf(), argsList);
} catch (IllegalArgumentException e) {
System.err.println(AdminHelper.prettifyException(e));
return -1;
}
}
/** Command to list all the existing storage policies */
private static class ListStoragePoliciesCommand
implements AdminHelper.Command {
@Override
public String getName() {
return "-listPolicies";
}
@Override
public String getShortUsage() {
return "[" + getName() + "]\n";
}
@Override
public String getLongUsage() {
return getShortUsage() + "\n" +
"List all the existing block storage policies.\n";
}
@Override
public int run(Configuration conf, List<String> args) throws IOException {
final DistributedFileSystem dfs = AdminHelper.getDFS(conf);
try {
Collection<BlockStoragePolicy> policies = dfs.getAllStoragePolicies();
System.out.println("Block Storage Policies:");
for (BlockStoragePolicy policy : policies) {
if (policy != null) {
System.out.println("\t" + policy);
}
}
} catch (IOException e) {
System.err.println(AdminHelper.prettifyException(e));
return 2;
}
return 0;
}
}
/** Command to get the storage policy of a file/directory */
private static class GetStoragePolicyCommand implements AdminHelper.Command {
@Override
public String getName() {
return "-getStoragePolicy";
}
@Override
public String getShortUsage() {
return "[" + getName() + " -path <path>]\n";
}
@Override
public String getLongUsage() {
final TableListing listing = AdminHelper.getOptionDescriptionListing();
listing.addRow("<path>",
"The path of the file/directory for getting the storage policy");
return getShortUsage() + "\n" +
"Get the storage policy of a file/directory.\n\n" +
listing.toString();
}
@Override
public int run(Configuration conf, List<String> args) throws IOException {
final String path = StringUtils.popOptionWithArgument("-path", args);
if (path == null) {
System.err.println("Please specify the path with -path.\nUsage:" +
getLongUsage());
return 1;
}
final DistributedFileSystem dfs = AdminHelper.getDFS(conf);
try {
HdfsFileStatus status = dfs.getClient().getFileInfo(path);
if (status == null) {
System.err.println("File/Directory does not exist: " + path);
return 2;
}
byte storagePolicyId = status.getStoragePolicy();
if (storagePolicyId == HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED) {
System.out.println("The storage policy of " + path + " is unspecified");
return 0;
}
Collection<BlockStoragePolicy> policies = dfs.getAllStoragePolicies();
for (BlockStoragePolicy p : policies) {
if (p.getId() == storagePolicyId) {
System.out.println("The storage policy of " + path + ":\n" + p);
return 0;
}
}
} catch (Exception e) {
System.err.println(AdminHelper.prettifyException(e));
return 2;
}
System.err.println("Cannot identify the storage policy for " + path);
return 2;
}
}
/** Command to set the storage policy to a file/directory */
private static class SetStoragePolicyCommand implements AdminHelper.Command {
@Override
public String getName() {
return "-setStoragePolicy";
}
@Override
public String getShortUsage() {
return "[" + getName() + " -path <path> -policy <policy>]\n";
}
@Override
public String getLongUsage() {
TableListing listing = AdminHelper.getOptionDescriptionListing();
listing.addRow("<path>", "The path of the file/directory to set storage" +
" policy");
listing.addRow("<policy>", "The name of the block storage policy");
return getShortUsage() + "\n" +
"Set the storage policy to a file/directory.\n\n" +
listing.toString();
}
@Override
public int run(Configuration conf, List<String> args) throws IOException {
final String path = StringUtils.popOptionWithArgument("-path", args);
if (path == null) {
System.err.println("Please specify the path for setting the storage " +
"policy.\nUsage: " + getLongUsage());
return 1;
}
final String policyName = StringUtils.popOptionWithArgument("-policy",
args);
if (policyName == null) {
System.err.println("Please specify the policy name.\nUsage: " +
getLongUsage());
return 1;
}
final DistributedFileSystem dfs = AdminHelper.getDFS(conf);
try {
dfs.setStoragePolicy(new Path(path), policyName);
System.out.println("Set storage policy " + policyName + " on " + path);
} catch (Exception e) {
System.err.println(AdminHelper.prettifyException(e));
return 2;
}
return 0;
}
}
private static final AdminHelper.Command[] COMMANDS = {
new ListStoragePoliciesCommand(),
new SetStoragePolicyCommand(),
new GetStoragePolicyCommand()
};
}
| 7,838 | 32.788793 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools;
import java.io.BufferedReader;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.PrintStream;
import java.net.HttpURLConnection;
import java.net.InetSocketAddress;
import java.net.URI;
import java.net.URL;
import java.security.PrivilegedExceptionAction;
import java.util.Collection;
import java.util.Date;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.Options;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
import org.apache.hadoop.hdfs.server.namenode.CancelDelegationTokenServlet;
import org.apache.hadoop.hdfs.server.namenode.GetDelegationTokenServlet;
import org.apache.hadoop.hdfs.server.namenode.RenewDelegationTokenServlet;
import org.apache.hadoop.hdfs.web.URLConnectionFactory;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.hdfs.web.WebHdfsConstants;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.ExitUtil;
import org.apache.hadoop.util.GenericOptionsParser;
import com.google.common.base.Charsets;
/**
* Fetch a DelegationToken from the current Namenode and store it in the
* specified file.
*/
@InterfaceAudience.Private
public class DelegationTokenFetcher {
private static final Log LOG =
LogFactory.getLog(DelegationTokenFetcher.class);
private static final String WEBSERVICE = "webservice";
private static final String RENEWER = "renewer";
private static final String CANCEL = "cancel";
private static final String RENEW = "renew";
private static final String PRINT = "print";
private static final String HELP = "help";
private static final String HELP_SHORT = "h";
private static void printUsage(PrintStream err) {
err.println("fetchdt retrieves delegation tokens from the NameNode");
err.println();
err.println("fetchdt <opts> <token file>");
err.println("Options:");
err.println(" --webservice <url> Url to contact NN on");
err.println(" --renewer <name> Name of the delegation token renewer");
err.println(" --cancel Cancel the delegation token");
err.println(" --renew Renew the delegation token. Delegation "
+ "token must have been fetched using the --renewer <name> option.");
err.println(" --print Print the delegation token");
err.println();
GenericOptionsParser.printGenericCommandUsage(err);
ExitUtil.terminate(1);
}
private static Collection<Token<?>> readTokens(Path file, Configuration conf)
throws IOException {
Credentials creds = Credentials.readTokenStorageFile(file, conf);
return creds.getAllTokens();
}
/**
* Command-line interface
*/
public static void main(final String[] args) throws Exception {
final Configuration conf = new HdfsConfiguration();
Options fetcherOptions = new Options();
fetcherOptions.addOption(WEBSERVICE, true,
"HTTP url to reach the NameNode at");
fetcherOptions.addOption(RENEWER, true,
"Name of the delegation token renewer");
fetcherOptions.addOption(CANCEL, false, "cancel the token");
fetcherOptions.addOption(RENEW, false, "renew the token");
fetcherOptions.addOption(PRINT, false, "print the token");
fetcherOptions.addOption(HELP_SHORT, HELP, false, "print out help information");
GenericOptionsParser parser = new GenericOptionsParser(conf,
fetcherOptions, args);
CommandLine cmd = parser.getCommandLine();
// get options
final String webUrl = cmd.hasOption(WEBSERVICE) ? cmd
.getOptionValue(WEBSERVICE) : null;
final String renewer = cmd.hasOption(RENEWER) ?
cmd.getOptionValue(RENEWER) : null;
final boolean cancel = cmd.hasOption(CANCEL);
final boolean renew = cmd.hasOption(RENEW);
final boolean print = cmd.hasOption(PRINT);
final boolean help = cmd.hasOption(HELP);
String[] remaining = parser.getRemainingArgs();
// check option validity
if (help) {
printUsage(System.out);
System.exit(0);
}
if (cancel && renew || cancel && print || renew && print || cancel && renew
&& print) {
System.err.println("ERROR: Only specify cancel, renew or print.");
printUsage(System.err);
}
if (remaining.length != 1 || remaining[0].charAt(0) == '-') {
System.err.println("ERROR: Must specify exacltly one token file");
printUsage(System.err);
}
// default to using the local file system
FileSystem local = FileSystem.getLocal(conf);
final Path tokenFile = new Path(local.getWorkingDirectory(), remaining[0]);
final URLConnectionFactory connectionFactory = URLConnectionFactory.DEFAULT_SYSTEM_CONNECTION_FACTORY;
// Login the current user
UserGroupInformation.getCurrentUser().doAs(
new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
if (print) {
DelegationTokenIdentifier id = new DelegationTokenSecretManager(
0, 0, 0, 0, null).createIdentifier();
for (Token<?> token : readTokens(tokenFile, conf)) {
DataInputStream in = new DataInputStream(
new ByteArrayInputStream(token.getIdentifier()));
id.readFields(in);
System.out.println("Token (" + id + ") for " + token.getService());
}
return null;
}
if (renew) {
for (Token<?> token : readTokens(tokenFile, conf)) {
if (token.isManaged()) {
long result = token.renew(conf);
if (LOG.isDebugEnabled()) {
LOG.debug("Renewed token for " + token.getService()
+ " until: " + new Date(result));
}
}
}
} else if (cancel) {
for(Token<?> token: readTokens(tokenFile, conf)) {
if (token.isManaged()) {
token.cancel(conf);
if (LOG.isDebugEnabled()) {
LOG.debug("Cancelled token for " + token.getService());
}
}
}
} else {
// otherwise we are fetching
if (webUrl != null) {
Credentials creds = getDTfromRemote(connectionFactory, new URI(
webUrl), renewer, null);
creds.writeTokenStorageFile(tokenFile, conf);
for (Token<?> token : creds.getAllTokens()) {
System.out.println("Fetched token via " + webUrl + " for "
+ token.getService() + " into " + tokenFile);
}
} else {
FileSystem fs = FileSystem.get(conf);
Credentials cred = new Credentials();
Token<?> tokens[] = fs.addDelegationTokens(renewer, cred);
cred.writeTokenStorageFile(tokenFile, conf);
for (Token<?> token : tokens) {
System.out.println("Fetched token for " + token.getService()
+ " into " + tokenFile);
}
}
}
return null;
}
});
}
static public Credentials getDTfromRemote(URLConnectionFactory factory,
URI nnUri, String renewer, String proxyUser) throws IOException {
StringBuilder buf = new StringBuilder(nnUri.toString())
.append(GetDelegationTokenServlet.PATH_SPEC);
String separator = "?";
if (renewer != null) {
buf.append("?").append(GetDelegationTokenServlet.RENEWER).append("=")
.append(renewer);
separator = "&";
}
if (proxyUser != null) {
buf.append(separator).append("doas=").append(proxyUser);
}
boolean isHttps = nnUri.getScheme().equals("https");
HttpURLConnection conn = null;
DataInputStream dis = null;
InetSocketAddress serviceAddr = NetUtils.createSocketAddr(nnUri
.getAuthority());
try {
if(LOG.isDebugEnabled()) {
LOG.debug("Retrieving token from: " + buf);
}
conn = run(factory, new URL(buf.toString()));
InputStream in = conn.getInputStream();
Credentials ts = new Credentials();
dis = new DataInputStream(in);
ts.readFields(dis);
for (Token<?> token : ts.getAllTokens()) {
token.setKind(isHttps ? WebHdfsConstants.HSFTP_TOKEN_KIND : WebHdfsConstants.HFTP_TOKEN_KIND);
SecurityUtil.setTokenService(token, serviceAddr);
}
return ts;
} catch (Exception e) {
throw new IOException("Unable to obtain remote token", e);
} finally {
IOUtils.cleanup(LOG, dis);
if (conn != null) {
conn.disconnect();
}
}
}
/**
* Cancel a Delegation Token.
* @param nnAddr the NameNode's address
* @param tok the token to cancel
* @throws IOException
* @throws AuthenticationException
*/
static public void cancelDelegationToken(URLConnectionFactory factory,
URI nnAddr, Token<DelegationTokenIdentifier> tok) throws IOException,
AuthenticationException {
StringBuilder buf = new StringBuilder(nnAddr.toString())
.append(CancelDelegationTokenServlet.PATH_SPEC).append("?")
.append(CancelDelegationTokenServlet.TOKEN).append("=")
.append(tok.encodeToUrlString());
HttpURLConnection conn = run(factory, new URL(buf.toString()));
conn.disconnect();
}
/**
* Renew a Delegation Token.
* @param nnAddr the NameNode's address
* @param tok the token to renew
* @return the Date that the token will expire next.
* @throws IOException
* @throws AuthenticationException
*/
static public long renewDelegationToken(URLConnectionFactory factory,
URI nnAddr, Token<DelegationTokenIdentifier> tok) throws IOException,
AuthenticationException {
StringBuilder buf = new StringBuilder(nnAddr.toString())
.append(RenewDelegationTokenServlet.PATH_SPEC).append("?")
.append(RenewDelegationTokenServlet.TOKEN).append("=")
.append(tok.encodeToUrlString());
HttpURLConnection connection = null;
BufferedReader in = null;
try {
connection = run(factory, new URL(buf.toString()));
in = new BufferedReader(new InputStreamReader(
connection.getInputStream(), Charsets.UTF_8));
long result = Long.parseLong(in.readLine());
return result;
} catch (IOException ie) {
LOG.info("error in renew over HTTP", ie);
IOException e = getExceptionFromResponse(connection);
if (e != null) {
LOG.info("rethrowing exception from HTTP request: "
+ e.getLocalizedMessage());
throw e;
}
throw ie;
} finally {
IOUtils.cleanup(LOG, in);
if (connection != null) {
connection.disconnect();
}
}
}
// parse the message and extract the name of the exception and the message
static private IOException getExceptionFromResponse(HttpURLConnection con) {
IOException e = null;
String resp;
if(con == null)
return null;
try {
resp = con.getResponseMessage();
} catch (IOException ie) { return null; }
if(resp == null || resp.isEmpty())
return null;
String exceptionClass = "", exceptionMsg = "";
String[] rs = resp.split(";");
if(rs.length < 2)
return null;
exceptionClass = rs[0];
exceptionMsg = rs[1];
LOG.info("Error response from HTTP request=" + resp +
";ec=" + exceptionClass + ";em="+exceptionMsg);
if(exceptionClass == null || exceptionClass.isEmpty())
return null;
// recreate exception objects
try {
Class<? extends Exception> ec =
Class.forName(exceptionClass).asSubclass(Exception.class);
// we are interested in constructor with String arguments
java.lang.reflect.Constructor<? extends Exception> constructor =
ec.getConstructor (new Class[] {String.class});
// create an instance
e = (IOException) constructor.newInstance (exceptionMsg);
} catch (Exception ee) {
LOG.warn("failed to create object of this class", ee);
}
if(e == null)
return null;
e.setStackTrace(new StackTraceElement[0]); // local stack is not relevant
LOG.info("Exception from HTTP response=" + e.getLocalizedMessage());
return e;
}
private static HttpURLConnection run(URLConnectionFactory factory, URL url)
throws IOException, AuthenticationException {
HttpURLConnection conn = null;
try {
conn = (HttpURLConnection) factory.openConnection(url, true);
if (conn.getResponseCode() != HttpURLConnection.HTTP_OK) {
String msg = conn.getResponseMessage();
throw new IOException("Error when dealing remote token: " + msg);
}
} catch (IOException ie) {
LOG.info("Error when dealing remote token:", ie);
IOException e = getExceptionFromResponse(conn);
if (e != null) {
LOG.info("rethrowing exception from HTTP request: "
+ e.getLocalizedMessage());
throw e;
}
throw ie;
}
return conn;
}
}
| 14,801 | 36.953846 | 106 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionCalculator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools.offlineImageViewer;
import java.io.BufferedInputStream;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.PrintStream;
import java.io.RandomAccessFile;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.SectionName;
import org.apache.hadoop.hdfs.server.namenode.FSImageUtil;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection;
import org.apache.hadoop.util.LimitInputStream;
import com.google.common.base.Preconditions;
/**
* This is the tool for analyzing file sizes in the namespace image. In order to
* run the tool one should define a range of integers <tt>[0, maxSize]</tt> by
* specifying <tt>maxSize</tt> and a <tt>step</tt>. The range of integers is
* divided into segments of size <tt>step</tt>:
* <tt>[0, s<sub>1</sub>, ..., s<sub>n-1</sub>, maxSize]</tt>, and the visitor
* calculates how many files in the system fall into each segment
* <tt>[s<sub>i-1</sub>, s<sub>i</sub>)</tt>. Note that files larger than
* <tt>maxSize</tt> always fall into the very last segment.
*
* <h3>Input.</h3>
* <ul>
* <li><tt>filename</tt> specifies the location of the image file;</li>
* <li><tt>maxSize</tt> determines the range <tt>[0, maxSize]</tt> of files
* sizes considered by the visitor;</li>
* <li><tt>step</tt> the range is divided into segments of size step.</li>
* </ul>
*
* <h3>Output.</h3> The output file is formatted as a tab separated two column
* table: Size and NumFiles. Where Size represents the start of the segment, and
* numFiles is the number of files form the image which size falls in this
* segment.
*
*/
final class FileDistributionCalculator {
private final static long MAX_SIZE_DEFAULT = 0x2000000000L; // 1/8 TB = 2^37
private final static int INTERVAL_DEFAULT = 0x200000; // 2 MB = 2^21
private final static int MAX_INTERVALS = 0x8000000; // 128 M = 2^27
private final Configuration conf;
private final long maxSize;
private final int steps;
private final PrintStream out;
private final int[] distribution;
private int totalFiles;
private int totalDirectories;
private int totalBlocks;
private long totalSpace;
private long maxFileSize;
FileDistributionCalculator(Configuration conf, long maxSize, int steps,
PrintStream out) {
this.conf = conf;
this.maxSize = maxSize == 0 ? MAX_SIZE_DEFAULT : maxSize;
this.steps = steps == 0 ? INTERVAL_DEFAULT : steps;
this.out = out;
long numIntervals = this.maxSize / this.steps;
// avoid OutOfMemoryError when allocating an array
Preconditions.checkState(numIntervals <= MAX_INTERVALS,
"Too many distribution intervals (maxSize/step): " + numIntervals +
", should be less than " + (MAX_INTERVALS+1) + ".");
this.distribution = new int[1 + (int) (numIntervals)];
}
void visit(RandomAccessFile file) throws IOException {
if (!FSImageUtil.checkFileFormat(file)) {
throw new IOException("Unrecognized FSImage");
}
FileSummary summary = FSImageUtil.loadSummary(file);
try (FileInputStream in = new FileInputStream(file.getFD())) {
for (FileSummary.Section s : summary.getSectionsList()) {
if (SectionName.fromString(s.getName()) != SectionName.INODE) {
continue;
}
in.getChannel().position(s.getOffset());
InputStream is = FSImageUtil.wrapInputStreamForCompression(conf,
summary.getCodec(), new BufferedInputStream(new LimitInputStream(
in, s.getLength())));
run(is);
output();
}
}
}
private void run(InputStream in) throws IOException {
INodeSection s = INodeSection.parseDelimitedFrom(in);
for (int i = 0; i < s.getNumInodes(); ++i) {
INodeSection.INode p = INodeSection.INode.parseDelimitedFrom(in);
if (p.getType() == INodeSection.INode.Type.FILE) {
++totalFiles;
INodeSection.INodeFile f = p.getFile();
totalBlocks += f.getBlocksCount();
long fileSize = 0;
for (BlockProto b : f.getBlocksList()) {
fileSize += b.getNumBytes();
}
maxFileSize = Math.max(fileSize, maxFileSize);
totalSpace += fileSize * f.getReplication();
int bucket = fileSize > maxSize ? distribution.length - 1 : (int) Math
.ceil((double)fileSize / steps);
++distribution[bucket];
} else if (p.getType() == INodeSection.INode.Type.DIRECTORY) {
++totalDirectories;
}
if (i % (1 << 20) == 0) {
out.println("Processed " + i + " inodes.");
}
}
}
private void output() {
// write the distribution into the output file
out.print("Size\tNumFiles\n");
for (int i = 0; i < distribution.length; i++) {
if (distribution[i] != 0) {
out.print(((long) i * steps) + "\t" + distribution[i]);
out.print('\n');
}
}
out.print("totalFiles = " + totalFiles + "\n");
out.print("totalDirectories = " + totalDirectories + "\n");
out.print("totalBlocks = " + totalBlocks + "\n");
out.print("totalSpace = " + totalSpace + "\n");
out.print("maxFileSize = " + maxFileSize + "\n");
}
}
| 6,198 | 37.987421 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageDelimitedTextWriter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools.offlineImageViewer;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink;
import java.io.IOException;
import java.io.PrintStream;
import java.text.SimpleDateFormat;
import java.util.Date;
/**
* A PBImageDelimitedTextWriter generates a text representation of the PB fsimage,
* with each element separated by a delimiter string. All of the elements
* common to both inodes and inodes-under-construction are included. When
* processing an fsimage with a layout version that did not include an
* element, such as AccessTime, the output file will include a column
* for the value, but no value will be included.
*
* Individual block information for each file is not currently included.
*
* The default delimiter is tab, as this is an unlikely value to be included in
* an inode path or other text metadata. The delimiter value can be via the
* constructor.
*/
public class PBImageDelimitedTextWriter extends PBImageTextWriter {
static final String DEFAULT_DELIMITER = "\t";
private static final String DATE_FORMAT="yyyy-MM-dd HH:mm";
private final SimpleDateFormat dateFormatter =
new SimpleDateFormat(DATE_FORMAT);
private final String delimiter;
PBImageDelimitedTextWriter(PrintStream out, String delimiter, String tempPath)
throws IOException {
super(out, tempPath);
this.delimiter = delimiter;
}
private String formatDate(long date) {
return dateFormatter.format(new Date(date));
}
private void append(StringBuffer buffer, int field) {
buffer.append(delimiter);
buffer.append(field);
}
private void append(StringBuffer buffer, long field) {
buffer.append(delimiter);
buffer.append(field);
}
private void append(StringBuffer buffer, String field) {
buffer.append(delimiter);
buffer.append(field);
}
@Override
public String getEntry(String parent, INode inode) {
StringBuffer buffer = new StringBuffer();
String inodeName = inode.getName().toStringUtf8();
Path path = new Path(parent.isEmpty() ? "/" : parent,
inodeName.isEmpty() ? "/" : inodeName);
buffer.append(path.toString());
PermissionStatus p = null;
switch (inode.getType()) {
case FILE:
INodeFile file = inode.getFile();
p = getPermission(file.getPermission());
append(buffer, file.getReplication());
append(buffer, formatDate(file.getModificationTime()));
append(buffer, formatDate(file.getAccessTime()));
append(buffer, file.getPreferredBlockSize());
append(buffer, file.getBlocksCount());
append(buffer, FSImageLoader.getFileSize(file));
append(buffer, 0); // NS_QUOTA
append(buffer, 0); // DS_QUOTA
break;
case DIRECTORY:
INodeDirectory dir = inode.getDirectory();
p = getPermission(dir.getPermission());
append(buffer, 0); // Replication
append(buffer, formatDate(dir.getModificationTime()));
append(buffer, formatDate(0)); // Access time.
append(buffer, 0); // Block size.
append(buffer, 0); // Num blocks.
append(buffer, 0); // Num bytes.
append(buffer, dir.getNsQuota());
append(buffer, dir.getDsQuota());
break;
case SYMLINK:
INodeSymlink s = inode.getSymlink();
p = getPermission(s.getPermission());
append(buffer, 0); // Replication
append(buffer, formatDate(s.getModificationTime()));
append(buffer, formatDate(s.getAccessTime()));
append(buffer, 0); // Block size.
append(buffer, 0); // Num blocks.
append(buffer, 0); // Num bytes.
append(buffer, 0); // NS_QUOTA
append(buffer, 0); // DS_QUOTA
break;
default:
break;
}
assert p != null;
append(buffer, p.getPermission().toString());
append(buffer, p.getUserName());
append(buffer, p.getGroupName());
return buffer.toString();
}
}
| 5,061 | 36.496296 | 87 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionVisitor.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools.offlineImageViewer;
import java.io.IOException;
import java.util.LinkedList;
/**
* File size distribution visitor.
*
* <h3>Description.</h3>
* This is the tool for analyzing file sizes in the namespace image.
* In order to run the tool one should define a range of integers
* <tt>[0, maxSize]</tt> by specifying <tt>maxSize</tt> and a <tt>step</tt>.
* The range of integers is divided into segments of size <tt>step</tt>:
* <tt>[0, s<sub>1</sub>, ..., s<sub>n-1</sub>, maxSize]</tt>,
* and the visitor calculates how many files in the system fall into
* each segment <tt>[s<sub>i-1</sub>, s<sub>i</sub>)</tt>.
* Note that files larger than <tt>maxSize</tt> always fall into
* the very last segment.
*
* <h3>Input.</h3>
* <ul>
* <li><tt>filename</tt> specifies the location of the image file;</li>
* <li><tt>maxSize</tt> determines the range <tt>[0, maxSize]</tt> of files
* sizes considered by the visitor;</li>
* <li><tt>step</tt> the range is divided into segments of size step.</li>
* </ul>
*
* <h3>Output.</h3>
* The output file is formatted as a tab separated two column table:
* Size and NumFiles. Where Size represents the start of the segment,
* and numFiles is the number of files form the image which size falls in
* this segment.
*/
class FileDistributionVisitor extends TextWriterImageVisitor {
final private LinkedList<ImageElement> elemS = new LinkedList<ImageElement>();
private final static long MAX_SIZE_DEFAULT = 0x2000000000L; // 1/8 TB = 2^37
private final static int INTERVAL_DEFAULT = 0x200000; // 2 MB = 2^21
private int[] distribution;
private long maxSize;
private int step;
private int totalFiles;
private int totalDirectories;
private int totalBlocks;
private long totalSpace;
private long maxFileSize;
private FileContext current;
private boolean inInode = false;
/**
* File or directory information.
*/
private static class FileContext {
String path;
long fileSize;
int numBlocks;
int replication;
}
public FileDistributionVisitor(String filename,
long maxSize,
int step) throws IOException {
super(filename, false);
this.maxSize = (maxSize == 0 ? MAX_SIZE_DEFAULT : maxSize);
this.step = (step == 0 ? INTERVAL_DEFAULT : step);
long numIntervals = this.maxSize / this.step;
if(numIntervals >= Integer.MAX_VALUE)
throw new IOException("Too many distribution intervals " + numIntervals);
this.distribution = new int[1 + (int)(numIntervals)];
this.totalFiles = 0;
this.totalDirectories = 0;
this.totalBlocks = 0;
this.totalSpace = 0;
this.maxFileSize = 0;
}
@Override
void start() throws IOException {}
@Override
void finish() throws IOException {
output();
super.finish();
}
@Override
void finishAbnormally() throws IOException {
System.out.println("*** Image processing finished abnormally. Ending ***");
output();
super.finishAbnormally();
}
private void output() throws IOException {
// write the distribution into the output file
write("Size\tNumFiles\n");
for(int i = 0; i < distribution.length; i++)
write(((long)i * step) + "\t" + distribution[i] + "\n");
System.out.println("totalFiles = " + totalFiles);
System.out.println("totalDirectories = " + totalDirectories);
System.out.println("totalBlocks = " + totalBlocks);
System.out.println("totalSpace = " + totalSpace);
System.out.println("maxFileSize = " + maxFileSize);
}
@Override
void leaveEnclosingElement() throws IOException {
ImageElement elem = elemS.pop();
if(elem != ImageElement.INODE &&
elem != ImageElement.INODE_UNDER_CONSTRUCTION)
return;
inInode = false;
if(current.numBlocks < 0) {
totalDirectories ++;
return;
}
totalFiles++;
totalBlocks += current.numBlocks;
totalSpace += current.fileSize * current.replication;
if(maxFileSize < current.fileSize)
maxFileSize = current.fileSize;
int high;
if(current.fileSize > maxSize)
high = distribution.length-1;
else
high = (int)Math.ceil((double)current.fileSize / step);
distribution[high]++;
if(totalFiles % 1000000 == 1)
System.out.println("Files processed: " + totalFiles
+ " Current: " + current.path);
}
@Override
void visit(ImageElement element, String value) throws IOException {
if(inInode) {
switch(element) {
case INODE_PATH:
current.path = (value.equals("") ? "/" : value);
break;
case REPLICATION:
current.replication = Integer.parseInt(value);
break;
case NUM_BYTES:
current.fileSize += Long.parseLong(value);
break;
default:
break;
}
}
}
@Override
void visitEnclosingElement(ImageElement element) throws IOException {
elemS.push(element);
if(element == ImageElement.INODE ||
element == ImageElement.INODE_UNDER_CONSTRUCTION) {
current = new FileContext();
inInode = true;
}
}
@Override
void visitEnclosingElement(ImageElement element,
ImageElement key, String value) throws IOException {
elemS.push(element);
if(element == ImageElement.INODE ||
element == ImageElement.INODE_UNDER_CONSTRUCTION)
inInode = true;
else if(element == ImageElement.BLOCKS)
current.numBlocks = Integer.parseInt(value);
}
}
| 6,326 | 31.613402 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageVisitor.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools.offlineImageViewer;
import java.io.IOException;
/**
* An implementation of ImageVisitor can traverse the structure of an
* Hadoop fsimage and respond to each of the structures within the file.
*/
abstract class ImageVisitor {
/**
* Structural elements of an FSImage that may be encountered within the
* file. ImageVisitors are able to handle processing any of these elements.
*/
public enum ImageElement {
FS_IMAGE,
IMAGE_VERSION,
NAMESPACE_ID,
IS_COMPRESSED,
COMPRESS_CODEC,
LAYOUT_VERSION,
NUM_INODES,
GENERATION_STAMP,
GENERATION_STAMP_V2,
GENERATION_STAMP_V1_LIMIT,
LAST_ALLOCATED_BLOCK_ID,
INODES,
INODE,
INODE_PATH,
REPLICATION,
MODIFICATION_TIME,
ACCESS_TIME,
BLOCK_SIZE,
NUM_BLOCKS,
BLOCKS,
BLOCK,
BLOCK_ID,
NUM_BYTES,
NS_QUOTA,
DS_QUOTA,
PERMISSIONS,
SYMLINK,
NUM_INODES_UNDER_CONSTRUCTION,
INODES_UNDER_CONSTRUCTION,
INODE_UNDER_CONSTRUCTION,
PREFERRED_BLOCK_SIZE,
CLIENT_NAME,
CLIENT_MACHINE,
USER_NAME,
GROUP_NAME,
PERMISSION_STRING,
CURRENT_DELEGATION_KEY_ID,
NUM_DELEGATION_KEYS,
DELEGATION_KEYS,
DELEGATION_KEY,
DELEGATION_TOKEN_SEQUENCE_NUMBER,
NUM_DELEGATION_TOKENS,
DELEGATION_TOKENS,
DELEGATION_TOKEN_IDENTIFIER,
DELEGATION_TOKEN_IDENTIFIER_KIND,
DELEGATION_TOKEN_IDENTIFIER_SEQNO,
DELEGATION_TOKEN_IDENTIFIER_OWNER,
DELEGATION_TOKEN_IDENTIFIER_RENEWER,
DELEGATION_TOKEN_IDENTIFIER_REALUSER,
DELEGATION_TOKEN_IDENTIFIER_ISSUE_DATE,
DELEGATION_TOKEN_IDENTIFIER_MAX_DATE,
DELEGATION_TOKEN_IDENTIFIER_EXPIRY_TIME,
DELEGATION_TOKEN_IDENTIFIER_MASTER_KEY_ID,
TRANSACTION_ID,
LAST_INODE_ID,
INODE_ID,
SNAPSHOT_COUNTER,
NUM_SNAPSHOTS_TOTAL,
NUM_SNAPSHOTS,
SNAPSHOTS,
SNAPSHOT,
SNAPSHOT_ID,
SNAPSHOT_ROOT,
SNAPSHOT_QUOTA,
NUM_SNAPSHOT_DIR_DIFF,
SNAPSHOT_DIR_DIFFS,
SNAPSHOT_DIR_DIFF,
SNAPSHOT_DIFF_SNAPSHOTID,
SNAPSHOT_DIR_DIFF_CHILDREN_SIZE,
SNAPSHOT_INODE_FILE_ATTRIBUTES,
SNAPSHOT_INODE_DIRECTORY_ATTRIBUTES,
SNAPSHOT_DIR_DIFF_CREATEDLIST,
SNAPSHOT_DIR_DIFF_CREATEDLIST_SIZE,
SNAPSHOT_DIR_DIFF_CREATED_INODE,
SNAPSHOT_DIR_DIFF_DELETEDLIST,
SNAPSHOT_DIR_DIFF_DELETEDLIST_SIZE,
SNAPSHOT_DIR_DIFF_DELETED_INODE,
IS_SNAPSHOTTABLE_DIR,
IS_WITHSNAPSHOT_DIR,
SNAPSHOT_FILE_DIFFS,
SNAPSHOT_FILE_DIFF,
NUM_SNAPSHOT_FILE_DIFF,
SNAPSHOT_FILE_SIZE,
SNAPSHOT_DST_SNAPSHOT_ID,
SNAPSHOT_LAST_SNAPSHOT_ID,
SNAPSHOT_REF_INODE_ID,
SNAPSHOT_REF_INODE,
CACHE_NEXT_ENTRY_ID,
CACHE_NUM_POOLS,
CACHE_POOL_NAME,
CACHE_POOL_OWNER_NAME,
CACHE_POOL_GROUP_NAME,
CACHE_POOL_PERMISSION_STRING,
CACHE_POOL_WEIGHT,
CACHE_NUM_ENTRIES,
CACHE_ENTRY_PATH,
CACHE_ENTRY_REPLICATION,
CACHE_ENTRY_POOL_NAME
}
/**
* Begin visiting the fsimage structure. Opportunity to perform
* any initialization necessary for the implementing visitor.
*/
abstract void start() throws IOException;
/**
* Finish visiting the fsimage structure. Opportunity to perform any
* clean up necessary for the implementing visitor.
*/
abstract void finish() throws IOException;
/**
* Finish visiting the fsimage structure after an error has occurred
* during the processing. Opportunity to perform any clean up necessary
* for the implementing visitor.
*/
abstract void finishAbnormally() throws IOException;
/**
* Visit non enclosing element of fsimage with specified value.
*
* @param element FSImage element
* @param value Element's value
*/
abstract void visit(ImageElement element, String value) throws IOException;
// Convenience methods to automatically convert numeric value types to strings
void visit(ImageElement element, int value) throws IOException {
visit(element, Integer.toString(value));
}
void visit(ImageElement element, long value) throws IOException {
visit(element, Long.toString(value));
}
/**
* Begin visiting an element that encloses another element, such as
* the beginning of the list of blocks that comprise a file.
*
* @param element Element being visited
*/
abstract void visitEnclosingElement(ImageElement element)
throws IOException;
/**
* Begin visiting an element that encloses another element, such as
* the beginning of the list of blocks that comprise a file.
*
* Also provide an additional key and value for the element, such as the
* number items within the element.
*
* @param element Element being visited
* @param key Key describing the element being visited
* @param value Value associated with element being visited
*/
abstract void visitEnclosingElement(ImageElement element,
ImageElement key, String value) throws IOException;
// Convenience methods to automatically convert value types to strings
void visitEnclosingElement(ImageElement element,
ImageElement key, int value)
throws IOException {
visitEnclosingElement(element, key, Integer.toString(value));
}
void visitEnclosingElement(ImageElement element,
ImageElement key, long value)
throws IOException {
visitEnclosingElement(element, key, Long.toString(value));
}
/**
* Leave current enclosing element. Called, for instance, at the end of
* processing the blocks that compromise a file.
*/
abstract void leaveEnclosingElement() throws IOException;
}
| 6,374 | 28.929577 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TextWriterImageVisitor.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools.offlineImageViewer;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStreamWriter;
import com.google.common.base.Charsets;
/**
* TextWriterImageProcessor mixes in the ability for ImageVisitor
* implementations to easily write their output to a text file.
*
* Implementing classes should be sure to call the super methods for the
* constructors, finish and finishAbnormally methods, in order that the
* underlying file may be opened and closed correctly.
*
* Note, this class does not add newlines to text written to file or (if
* enabled) screen. This is the implementing class' responsibility.
*/
abstract class TextWriterImageVisitor extends ImageVisitor {
private boolean printToScreen = false;
private boolean okToWrite = false;
final private OutputStreamWriter fw;
/**
* Create a processor that writes to the file named.
*
* @param filename Name of file to write output to
*/
public TextWriterImageVisitor(String filename) throws IOException {
this(filename, false);
}
/**
* Create a processor that writes to the file named and may or may not
* also output to the screen, as specified.
*
* @param filename Name of file to write output to
* @param printToScreen Mirror output to screen?
*/
public TextWriterImageVisitor(String filename, boolean printToScreen)
throws IOException {
super();
this.printToScreen = printToScreen;
fw = new OutputStreamWriter(new FileOutputStream(filename), Charsets.UTF_8);
okToWrite = true;
}
/* (non-Javadoc)
* @see org.apache.hadoop.hdfs.tools.offlineImageViewer.ImageVisitor#finish()
*/
@Override
void finish() throws IOException {
close();
}
/* (non-Javadoc)
* @see org.apache.hadoop.hdfs.tools.offlineImageViewer.ImageVisitor#finishAbnormally()
*/
@Override
void finishAbnormally() throws IOException {
close();
}
/**
* Close output stream and prevent further writing
*/
private void close() throws IOException {
fw.close();
okToWrite = false;
}
/**
* Write parameter to output file (and possibly screen).
*
* @param toWrite Text to write to file
*/
protected void write(String toWrite) throws IOException {
if(!okToWrite)
throw new IOException("file not open for writing.");
if(printToScreen)
System.out.print(toWrite);
try {
fw.write(toWrite);
} catch (IOException e) {
okToWrite = false;
throw e;
}
}
}
| 3,348 | 29.445455 | 89 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools.offlineImageViewer;
import java.io.EOFException;
import java.io.IOException;
import java.io.PrintStream;
import java.io.RandomAccessFile;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.OptionBuilder;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.commons.cli.PosixParser;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.net.NetUtils;
/**
* OfflineImageViewerPB to dump the contents of an Hadoop image file to XML or
* the console. Main entry point into utility, either via the command line or
* programatically.
*/
@InterfaceAudience.Private
public class OfflineImageViewerPB {
public static final Log LOG = LogFactory.getLog(OfflineImageViewerPB.class);
private final static String usage = "Usage: bin/hdfs oiv [OPTIONS] -i INPUTFILE -o OUTPUTFILE\n"
+ "Offline Image Viewer\n"
+ "View a Hadoop fsimage INPUTFILE using the specified PROCESSOR,\n"
+ "saving the results in OUTPUTFILE.\n"
+ "\n"
+ "The oiv utility will attempt to parse correctly formed image files\n"
+ "and will abort fail with mal-formed image files.\n"
+ "\n"
+ "The tool works offline and does not require a running cluster in\n"
+ "order to process an image file.\n"
+ "\n"
+ "The following image processors are available:\n"
+ " * XML: This processor creates an XML document with all elements of\n"
+ " the fsimage enumerated, suitable for further analysis by XML\n"
+ " tools.\n"
+ " * FileDistribution: This processor analyzes the file size\n"
+ " distribution in the image.\n"
+ " -maxSize specifies the range [0, maxSize] of file sizes to be\n"
+ " analyzed (128GB by default).\n"
+ " -step defines the granularity of the distribution. (2MB by default)\n"
+ " * Web: Run a viewer to expose read-only WebHDFS API.\n"
+ " -addr specifies the address to listen. (localhost:5978 by default)\n"
+ " * Delimited (experimental): Generate a text file with all of the elements common\n"
+ " to both inodes and inodes-under-construction, separated by a\n"
+ " delimiter. The default delimiter is \\t, though this may be\n"
+ " changed via the -delimiter argument.\n"
+ "\n"
+ "Required command line arguments:\n"
+ "-i,--inputFile <arg> FSImage file to process.\n"
+ "\n"
+ "Optional command line arguments:\n"
+ "-o,--outputFile <arg> Name of output file. If the specified\n"
+ " file exists, it will be overwritten.\n"
+ " (output to stdout by default)\n"
+ "-p,--processor <arg> Select which type of processor to apply\n"
+ " against image file. (XML|FileDistribution|Web|Delimited)\n"
+ " (Web by default)\n"
+ "-delimiter <arg> Delimiting string to use with Delimited processor. \n"
+ "-t,--temp <arg> Use temporary dir to cache intermediate result to generate\n"
+ " Delimited outputs. If not set, Delimited processor constructs\n"
+ " the namespace in memory before outputting text.\n"
+ "-h,--help Display usage information and exit\n";
/**
* Build command-line options and descriptions
*/
private static Options buildOptions() {
Options options = new Options();
// Build in/output file arguments, which are required, but there is no
// addOption method that can specify this
OptionBuilder.isRequired();
OptionBuilder.hasArgs();
OptionBuilder.withLongOpt("inputFile");
options.addOption(OptionBuilder.create("i"));
options.addOption("o", "outputFile", true, "");
options.addOption("p", "processor", true, "");
options.addOption("h", "help", false, "");
options.addOption("maxSize", true, "");
options.addOption("step", true, "");
options.addOption("addr", true, "");
options.addOption("delimiter", true, "");
options.addOption("t", "temp", true, "");
return options;
}
/**
* Entry point to command-line-driven operation. User may specify options and
* start fsimage viewer from the command line. Program will process image file
* and exit cleanly or, if an error is encountered, inform user and exit.
*
* @param args
* Command line options
* @throws IOException
*/
public static void main(String[] args) throws Exception {
int status = run(args);
System.exit(status);
}
public static int run(String[] args) throws Exception {
Options options = buildOptions();
if (args.length == 0) {
printUsage();
return 0;
}
CommandLineParser parser = new PosixParser();
CommandLine cmd;
try {
cmd = parser.parse(options, args);
} catch (ParseException e) {
System.out.println("Error parsing command-line options: ");
printUsage();
return -1;
}
if (cmd.hasOption("h")) { // print help and exit
printUsage();
return 0;
}
String inputFile = cmd.getOptionValue("i");
String processor = cmd.getOptionValue("p", "Web");
String outputFile = cmd.getOptionValue("o", "-");
String delimiter = cmd.getOptionValue("delimiter",
PBImageDelimitedTextWriter.DEFAULT_DELIMITER);
String tempPath = cmd.getOptionValue("t", "");
Configuration conf = new Configuration();
try (PrintStream out = outputFile.equals("-") ?
System.out : new PrintStream(outputFile, "UTF-8")) {
switch (processor) {
case "FileDistribution":
long maxSize = Long.parseLong(cmd.getOptionValue("maxSize", "0"));
int step = Integer.parseInt(cmd.getOptionValue("step", "0"));
new FileDistributionCalculator(conf, maxSize, step, out).visit(
new RandomAccessFile(inputFile, "r"));
break;
case "XML":
new PBImageXmlWriter(conf, out).visit(
new RandomAccessFile(inputFile, "r"));
break;
case "Web":
String addr = cmd.getOptionValue("addr", "localhost:5978");
try (WebImageViewer viewer = new WebImageViewer(
NetUtils.createSocketAddr(addr))) {
viewer.start(inputFile);
}
break;
case "Delimited":
try (PBImageDelimitedTextWriter writer =
new PBImageDelimitedTextWriter(out, delimiter, tempPath)) {
writer.visit(new RandomAccessFile(inputFile, "r"));
}
break;
}
return 0;
} catch (EOFException e) {
System.err.println("Input file ended unexpectedly. Exiting");
} catch (IOException e) {
System.err.println("Encountered exception. Exiting: " + e.getMessage());
}
return -1;
}
/**
* Print application usage instructions.
*/
private static void printUsage() {
System.out.println(usage);
}
}
| 8,068 | 38.945545 | 98 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools.offlineImageViewer;
import java.io.BufferedInputStream;
import java.io.DataInputStream;
import java.io.EOFException;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.OptionBuilder;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.commons.cli.PosixParser;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.PositionTrackingInputStream;
/**
* OfflineImageViewer to dump the contents of an Hadoop image file to XML
* or the console. Main entry point into utility, either via the
* command line or programatically.
*/
@InterfaceAudience.Private
public class OfflineImageViewer {
public static final Log LOG = LogFactory.getLog(OfflineImageViewer.class);
private final static String usage =
"Usage: bin/hdfs oiv_legacy [OPTIONS] -i INPUTFILE -o OUTPUTFILE\n" +
"Offline Image Viewer\n" +
"View a Hadoop fsimage INPUTFILE using the specified PROCESSOR,\n" +
"saving the results in OUTPUTFILE.\n" +
"\n" +
"The oiv utility will attempt to parse correctly formed image files\n" +
"and will abort fail with mal-formed image files.\n" +
"\n" +
"The tool works offline and does not require a running cluster in\n" +
"order to process an image file.\n" +
"\n" +
"The following image processors are available:\n" +
" * Ls: The default image processor generates an lsr-style listing\n" +
" of the files in the namespace, with the same fields in the same\n" +
" order. Note that in order to correctly determine file sizes,\n" +
" this formatter cannot skip blocks and will override the\n" +
" -skipBlocks option.\n" +
" * Indented: This processor enumerates over all of the elements in\n" +
" the fsimage file, using levels of indentation to delineate\n" +
" sections within the file.\n" +
" * Delimited: Generate a text file with all of the elements common\n" +
" to both inodes and inodes-under-construction, separated by a\n" +
" delimiter. The default delimiter is \u0001, though this may be\n" +
" changed via the -delimiter argument. This processor also overrides\n" +
" the -skipBlocks option for the same reason as the Ls processor\n" +
" * XML: This processor creates an XML document with all elements of\n" +
" the fsimage enumerated, suitable for further analysis by XML\n" +
" tools.\n" +
" * FileDistribution: This processor analyzes the file size\n" +
" distribution in the image.\n" +
" -maxSize specifies the range [0, maxSize] of file sizes to be\n" +
" analyzed (128GB by default).\n" +
" -step defines the granularity of the distribution. (2MB by default)\n" +
" * NameDistribution: This processor analyzes the file names\n" +
" in the image and prints total number of file names and how frequently\n" +
" file names are reused.\n" +
"\n" +
"Required command line arguments:\n" +
"-i,--inputFile <arg> FSImage file to process.\n" +
"-o,--outputFile <arg> Name of output file. If the specified\n" +
" file exists, it will be overwritten.\n" +
"\n" +
"Optional command line arguments:\n" +
"-p,--processor <arg> Select which type of processor to apply\n" +
" against image file." +
" (Ls|XML|Delimited|Indented|FileDistribution).\n" +
"-h,--help Display usage information and exit\n" +
"-printToScreen For processors that write to a file, also\n" +
" output to screen. On large image files this\n" +
" will dramatically increase processing time.\n" +
"-skipBlocks Skip inodes' blocks information. May\n" +
" significantly decrease output.\n" +
" (default = false).\n" +
"-delimiter <arg> Delimiting string to use with Delimited processor\n";
private final boolean skipBlocks;
private final String inputFile;
private final ImageVisitor processor;
public OfflineImageViewer(String inputFile, ImageVisitor processor,
boolean skipBlocks) {
this.inputFile = inputFile;
this.processor = processor;
this.skipBlocks = skipBlocks;
}
/**
* Process image file.
*/
public void go() throws IOException {
DataInputStream in = null;
PositionTrackingInputStream tracker = null;
ImageLoader fsip = null;
boolean done = false;
try {
tracker = new PositionTrackingInputStream(new BufferedInputStream(
new FileInputStream(new File(inputFile))));
in = new DataInputStream(tracker);
int imageVersionFile = findImageVersion(in);
fsip = ImageLoader.LoaderFactory.getLoader(imageVersionFile);
if(fsip == null)
throw new IOException("No image processor to read version " +
imageVersionFile + " is available.");
fsip.loadImage(in, processor, skipBlocks);
done = true;
} finally {
if (!done) {
if (tracker != null) {
LOG.error("image loading failed at offset " + tracker.getPos());
} else {
LOG.error("Failed to load image file.");
}
}
IOUtils.cleanup(LOG, in, tracker);
}
}
/**
* Check an fsimage datainputstream's version number.
*
* The datainput stream is returned at the same point as it was passed in;
* this method has no effect on the datainputstream's read pointer.
*
* @param in Datainputstream of fsimage
* @return Filesystem layout version of fsimage represented by stream
* @throws IOException If problem reading from in
*/
private int findImageVersion(DataInputStream in) throws IOException {
in.mark(42); // arbitrary amount, resetting immediately
int version = in.readInt();
in.reset();
return version;
}
/**
* Build command-line options and descriptions
*/
public static Options buildOptions() {
Options options = new Options();
// Build in/output file arguments, which are required, but there is no
// addOption method that can specify this
OptionBuilder.isRequired();
OptionBuilder.hasArgs();
OptionBuilder.withLongOpt("outputFile");
options.addOption(OptionBuilder.create("o"));
OptionBuilder.isRequired();
OptionBuilder.hasArgs();
OptionBuilder.withLongOpt("inputFile");
options.addOption(OptionBuilder.create("i"));
options.addOption("p", "processor", true, "");
options.addOption("h", "help", false, "");
options.addOption("skipBlocks", false, "");
options.addOption("printToScreen", false, "");
options.addOption("delimiter", true, "");
return options;
}
/**
* Entry point to command-line-driven operation. User may specify
* options and start fsimage viewer from the command line. Program
* will process image file and exit cleanly or, if an error is
* encountered, inform user and exit.
*
* @param args Command line options
* @throws IOException
*/
public static void main(String[] args) throws IOException {
Options options = buildOptions();
if(args.length == 0) {
printUsage();
return;
}
CommandLineParser parser = new PosixParser();
CommandLine cmd;
try {
cmd = parser.parse(options, args);
} catch (ParseException e) {
System.out.println("Error parsing command-line options: ");
printUsage();
return;
}
if(cmd.hasOption("h")) { // print help and exit
printUsage();
return;
}
boolean skipBlocks = cmd.hasOption("skipBlocks");
boolean printToScreen = cmd.hasOption("printToScreen");
String inputFile = cmd.getOptionValue("i");
String processor = cmd.getOptionValue("p", "Ls");
String outputFile = cmd.getOptionValue("o");
String delimiter = cmd.getOptionValue("delimiter");
if( !(delimiter == null || processor.equals("Delimited")) ) {
System.out.println("Can only specify -delimiter with Delimited processor");
printUsage();
return;
}
ImageVisitor v;
if(processor.equals("Indented")) {
v = new IndentedImageVisitor(outputFile, printToScreen);
} else if (processor.equals("XML")) {
v = new XmlImageVisitor(outputFile, printToScreen);
} else if (processor.equals("Delimited")) {
v = delimiter == null ?
new DelimitedImageVisitor(outputFile, printToScreen) :
new DelimitedImageVisitor(outputFile, printToScreen, delimiter);
skipBlocks = false;
} else if (processor.equals("FileDistribution")) {
long maxSize = Long.parseLong(cmd.getOptionValue("maxSize", "0"));
int step = Integer.parseInt(cmd.getOptionValue("step", "0"));
v = new FileDistributionVisitor(outputFile, maxSize, step);
} else if (processor.equals("NameDistribution")) {
v = new NameDistributionVisitor(outputFile, printToScreen);
} else {
v = new LsImageVisitor(outputFile, printToScreen);
skipBlocks = false;
}
try {
OfflineImageViewer d = new OfflineImageViewer(inputFile, v, skipBlocks);
d.go();
} catch (EOFException e) {
System.err.println("Input file ended unexpectedly. Exiting");
} catch(IOException e) {
System.err.println("Encountered exception. Exiting: " + e.getMessage());
}
}
/**
* Print application usage instructions.
*/
private static void printUsage() {
System.out.println(usage);
}
}
| 10,742 | 37.505376 | 90 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/DepthCounter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools.offlineImageViewer;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Utility class for tracking descent into the structure of the
* Visitor class (ImageVisitor, EditsVisitor etc.)
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class DepthCounter {
private int depth = 0;
public void incLevel() { depth++; }
public void decLevel() { if(depth >= 1) depth--; }
public int getLevel() { return depth; }
}
| 1,355 | 35.648649 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools.offlineImageViewer;
import java.io.BufferedInputStream;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.PrintStream;
import java.io.RandomAccessFile;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
import org.apache.hadoop.hdfs.server.namenode.FSImageFormatPBINode;
import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.SectionName;
import org.apache.hadoop.hdfs.server.namenode.FSImageUtil;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.CacheManagerSection;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeReferenceSection;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotSection;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.StringTableSection;
import org.apache.hadoop.hdfs.util.XMLUtils;
import org.apache.hadoop.util.LimitInputStream;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
/**
* PBImageXmlWriter walks over an fsimage structure and writes out
* an equivalent XML document that contains the fsimage's components.
*/
@InterfaceAudience.Private
public final class PBImageXmlWriter {
private final Configuration conf;
private final PrintStream out;
private String[] stringTable;
public PBImageXmlWriter(Configuration conf, PrintStream out) {
this.conf = conf;
this.out = out;
}
public void visit(RandomAccessFile file) throws IOException {
if (!FSImageUtil.checkFileFormat(file)) {
throw new IOException("Unrecognized FSImage");
}
FileSummary summary = FSImageUtil.loadSummary(file);
try (FileInputStream fin = new FileInputStream(file.getFD())) {
out.print("<?xml version=\"1.0\"?>\n<fsimage>");
ArrayList<FileSummary.Section> sections = Lists.newArrayList(summary
.getSectionsList());
Collections.sort(sections, new Comparator<FileSummary.Section>() {
@Override
public int compare(FileSummary.Section s1, FileSummary.Section s2) {
SectionName n1 = SectionName.fromString(s1.getName());
SectionName n2 = SectionName.fromString(s2.getName());
if (n1 == null) {
return n2 == null ? 0 : -1;
} else if (n2 == null) {
return -1;
} else {
return n1.ordinal() - n2.ordinal();
}
}
});
for (FileSummary.Section s : sections) {
fin.getChannel().position(s.getOffset());
InputStream is = FSImageUtil.wrapInputStreamForCompression(conf,
summary.getCodec(), new BufferedInputStream(new LimitInputStream(
fin, s.getLength())));
switch (SectionName.fromString(s.getName())) {
case NS_INFO:
dumpNameSection(is);
break;
case STRING_TABLE:
loadStringTable(is);
break;
case INODE:
dumpINodeSection(is);
break;
case INODE_REFERENCE:
dumpINodeReferenceSection(is);
break;
case INODE_DIR:
dumpINodeDirectorySection(is);
break;
case FILES_UNDERCONSTRUCTION:
dumpFileUnderConstructionSection(is);
break;
case SNAPSHOT:
dumpSnapshotSection(is);
break;
case SNAPSHOT_DIFF:
dumpSnapshotDiffSection(is);
break;
case SECRET_MANAGER:
dumpSecretManagerSection(is);
break;
case CACHE_MANAGER:
dumpCacheManagerSection(is);
break;
default:
break;
}
}
out.print("</fsimage>\n");
}
}
private void dumpCacheManagerSection(InputStream is) throws IOException {
out.print("<CacheManagerSection>");
CacheManagerSection s = CacheManagerSection.parseDelimitedFrom(is);
o("nextDirectiveId", s.getNextDirectiveId());
for (int i = 0; i < s.getNumPools(); ++i) {
CachePoolInfoProto p = CachePoolInfoProto.parseDelimitedFrom(is);
out.print("<pool>");
o("poolName", p.getPoolName()).o("ownerName", p.getOwnerName())
.o("groupName", p.getGroupName()).o("mode", p.getMode())
.o("limit", p.getLimit())
.o("maxRelativeExpiry", p.getMaxRelativeExpiry());
out.print("</pool>\n");
}
for (int i = 0; i < s.getNumDirectives(); ++i) {
CacheDirectiveInfoProto p = CacheDirectiveInfoProto
.parseDelimitedFrom(is);
out.print("<directive>");
o("id", p.getId()).o("path", p.getPath())
.o("replication", p.getReplication()).o("pool", p.getPool());
out.print("<expiration>");
CacheDirectiveInfoExpirationProto e = p.getExpiration();
o("millis", e.getMillis()).o("relatilve", e.getIsRelative());
out.print("</expiration>\n");
out.print("</directive>\n");
}
out.print("</CacheManagerSection>\n");
}
private void dumpFileUnderConstructionSection(InputStream in)
throws IOException {
out.print("<FileUnderConstructionSection>");
while (true) {
FileUnderConstructionEntry e = FileUnderConstructionEntry
.parseDelimitedFrom(in);
if (e == null) {
break;
}
out.print("<inode>");
o("id", e.getInodeId()).o("path", e.getFullPath());
out.print("</inode>\n");
}
out.print("</FileUnderConstructionSection>\n");
}
private void dumpINodeDirectory(INodeDirectory d) {
o("mtime", d.getModificationTime()).o("permission",
dumpPermission(d.getPermission()));
dumpAcls(d.getAcl());
if (d.hasDsQuota() && d.hasNsQuota()) {
o("nsquota", d.getNsQuota()).o("dsquota", d.getDsQuota());
}
}
private void dumpINodeDirectorySection(InputStream in) throws IOException {
out.print("<INodeDirectorySection>");
while (true) {
INodeDirectorySection.DirEntry e = INodeDirectorySection.DirEntry
.parseDelimitedFrom(in);
// note that in is a LimitedInputStream
if (e == null) {
break;
}
out.print("<directory>");
o("parent", e.getParent());
for (long id : e.getChildrenList()) {
o("inode", id);
}
for (int refId : e.getRefChildrenList()) {
o("inodereference-index", refId);
}
out.print("</directory>\n");
}
out.print("</INodeDirectorySection>\n");
}
private void dumpINodeReferenceSection(InputStream in) throws IOException {
out.print("<INodeReferenceSection>");
while (true) {
INodeReferenceSection.INodeReference e = INodeReferenceSection
.INodeReference.parseDelimitedFrom(in);
if (e == null) {
break;
}
dumpINodeReference(e);
}
out.print("</INodeReferenceSection>");
}
private void dumpINodeReference(INodeReferenceSection.INodeReference r) {
out.print("<ref>");
o("referredId", r.getReferredId()).o("name", r.getName().toStringUtf8())
.o("dstSnapshotId", r.getDstSnapshotId())
.o("lastSnapshotId", r.getLastSnapshotId());
out.print("</ref>\n");
}
private void dumpINodeFile(INodeSection.INodeFile f) {
o("replication", f.getReplication()).o("mtime", f.getModificationTime())
.o("atime", f.getAccessTime())
.o("perferredBlockSize", f.getPreferredBlockSize())
.o("permission", dumpPermission(f.getPermission()));
dumpAcls(f.getAcl());
if (f.getBlocksCount() > 0) {
out.print("<blocks>");
for (BlockProto b : f.getBlocksList()) {
out.print("<block>");
o("id", b.getBlockId()).o("genstamp", b.getGenStamp()).o("numBytes",
b.getNumBytes());
out.print("</block>\n");
}
out.print("</blocks>\n");
}
if (f.hasFileUC()) {
INodeSection.FileUnderConstructionFeature u = f.getFileUC();
out.print("<file-under-construction>");
o("clientName", u.getClientName()).o("clientMachine",
u.getClientMachine());
out.print("</file-under-construction>\n");
}
}
private void dumpAcls(AclFeatureProto aclFeatureProto) {
ImmutableList<AclEntry> aclEntryList = FSImageFormatPBINode.Loader
.loadAclEntries(aclFeatureProto, stringTable);
if (aclEntryList.size() > 0) {
out.print("<acls>");
for (AclEntry aclEntry : aclEntryList) {
o("acl", aclEntry.toString());
}
out.print("</acls>");
}
}
private void dumpINodeSection(InputStream in) throws IOException {
INodeSection s = INodeSection.parseDelimitedFrom(in);
out.print("<INodeSection>");
o("lastInodeId", s.getLastInodeId());
for (int i = 0; i < s.getNumInodes(); ++i) {
INodeSection.INode p = INodeSection.INode.parseDelimitedFrom(in);
out.print("<inode>");
o("id", p.getId()).o("type", p.getType()).o("name",
p.getName().toStringUtf8());
if (p.hasFile()) {
dumpINodeFile(p.getFile());
} else if (p.hasDirectory()) {
dumpINodeDirectory(p.getDirectory());
} else if (p.hasSymlink()) {
dumpINodeSymlink(p.getSymlink());
}
out.print("</inode>\n");
}
out.print("</INodeSection>\n");
}
private void dumpINodeSymlink(INodeSymlink s) {
o("permission", dumpPermission(s.getPermission()))
.o("target", s.getTarget().toStringUtf8())
.o("mtime", s.getModificationTime()).o("atime", s.getAccessTime());
}
private void dumpNameSection(InputStream in) throws IOException {
NameSystemSection s = NameSystemSection.parseDelimitedFrom(in);
out.print("<NameSection>\n");
o("genstampV1", s.getGenstampV1()).o("genstampV2", s.getGenstampV2())
.o("genstampV1Limit", s.getGenstampV1Limit())
.o("lastAllocatedBlockId", s.getLastAllocatedBlockId())
.o("txid", s.getTransactionId());
out.print("</NameSection>\n");
}
private String dumpPermission(long permission) {
return FSImageFormatPBINode.Loader.loadPermission(permission, stringTable)
.toString();
}
private void dumpSecretManagerSection(InputStream is) throws IOException {
out.print("<SecretManagerSection>");
SecretManagerSection s = SecretManagerSection.parseDelimitedFrom(is);
o("currentId", s.getCurrentId()).o("tokenSequenceNumber",
s.getTokenSequenceNumber());
out.print("</SecretManagerSection>");
}
private void dumpSnapshotDiffSection(InputStream in) throws IOException {
out.print("<SnapshotDiffSection>");
while (true) {
SnapshotDiffSection.DiffEntry e = SnapshotDiffSection.DiffEntry
.parseDelimitedFrom(in);
if (e == null) {
break;
}
out.print("<diff>");
o("inodeid", e.getInodeId());
switch (e.getType()) {
case FILEDIFF: {
for (int i = 0; i < e.getNumOfDiff(); ++i) {
out.print("<filediff>");
SnapshotDiffSection.FileDiff f = SnapshotDiffSection.FileDiff
.parseDelimitedFrom(in);
o("snapshotId", f.getSnapshotId()).o("size", f.getFileSize()).o(
"name", f.getName().toStringUtf8());
out.print("</filediff>\n");
}
}
break;
case DIRECTORYDIFF: {
for (int i = 0; i < e.getNumOfDiff(); ++i) {
out.print("<dirdiff>");
SnapshotDiffSection.DirectoryDiff d = SnapshotDiffSection.DirectoryDiff
.parseDelimitedFrom(in);
o("snapshotId", d.getSnapshotId())
.o("isSnapshotroot", d.getIsSnapshotRoot())
.o("childrenSize", d.getChildrenSize())
.o("name", d.getName().toStringUtf8());
for (int j = 0; j < d.getCreatedListSize(); ++j) {
SnapshotDiffSection.CreatedListEntry ce = SnapshotDiffSection.CreatedListEntry
.parseDelimitedFrom(in);
out.print("<created>");
o("name", ce.getName().toStringUtf8());
out.print("</created>\n");
}
for (long did : d.getDeletedINodeList()) {
out.print("<deleted>");
o("inode", did);
out.print("</deleted>\n");
}
for (int dRefid : d.getDeletedINodeRefList()) {
out.print("<deleted>");
o("inodereference-index", dRefid);
out.print("</deleted>\n");
}
out.print("</dirdiff>\n");
}
}
break;
default:
break;
}
out.print("</diff>");
}
out.print("</SnapshotDiffSection>\n");
}
private void dumpSnapshotSection(InputStream in) throws IOException {
out.print("<SnapshotSection>");
SnapshotSection s = SnapshotSection.parseDelimitedFrom(in);
o("snapshotCounter", s.getSnapshotCounter());
if (s.getSnapshottableDirCount() > 0) {
out.print("<snapshottableDir>");
for (long id : s.getSnapshottableDirList()) {
o("dir", id);
}
out.print("</snapshottableDir>\n");
}
for (int i = 0; i < s.getNumSnapshots(); ++i) {
SnapshotSection.Snapshot pbs = SnapshotSection.Snapshot
.parseDelimitedFrom(in);
o("snapshot", pbs.getSnapshotId());
}
out.print("</SnapshotSection>\n");
}
private void loadStringTable(InputStream in) throws IOException {
StringTableSection s = StringTableSection.parseDelimitedFrom(in);
stringTable = new String[s.getNumEntry() + 1];
for (int i = 0; i < s.getNumEntry(); ++i) {
StringTableSection.Entry e = StringTableSection.Entry
.parseDelimitedFrom(in);
stringTable[e.getId()] = e.getStr();
}
}
private PBImageXmlWriter o(final String e, final Object v) {
out.print("<" + e + ">" +
XMLUtils.mangleXmlString(v.toString(), true) + "</" + e + ">");
return this;
}
}
| 15,898 | 36.147196 | 116 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools.offlineImageViewer;
import java.io.DataInputStream;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
/**
* An ImageLoader can accept a DataInputStream to an Hadoop FSImage file
* and walk over its structure using the supplied ImageVisitor.
*
* Each implementation of ImageLoader is designed to rapidly process an
* image file. As long as minor changes are made from one layout version
* to another, it is acceptable to tweak one implementation to read the next.
* However, if the layout version changes enough that it would make a
* processor slow or difficult to read, another processor should be created.
* This allows each processor to quickly read an image without getting
* bogged down in dealing with significant differences between layout versions.
*/
interface ImageLoader {
/**
* @param in DataInputStream pointing to an Hadoop FSImage file
* @param v Visit to apply to the FSImage file
* @param enumerateBlocks Should visitor visit each of the file blocks?
*/
public void loadImage(DataInputStream in, ImageVisitor v,
boolean enumerateBlocks) throws IOException;
/**
* Can this processor handle the specified version of FSImage file?
*
* @param version FSImage version file
* @return True if this instance can process the file
*/
public boolean canLoadVersion(int version);
/**
* Factory for obtaining version of image loader that can read
* a particular image format.
*/
@InterfaceAudience.Private
public class LoaderFactory {
// Java doesn't support static methods on interfaces, which necessitates
// this factory class
/**
* Find an image loader capable of interpreting the specified
* layout version number. If none, return null;
*
* @param version fsimage layout version number to be processed
* @return ImageLoader that can interpret specified version, or null
*/
static public ImageLoader getLoader(int version) {
// Easy to add more image processors as they are written
ImageLoader[] loaders = { new ImageLoaderCurrent() };
for (ImageLoader l : loaders) {
if (l.canLoadVersion(version))
return l;
}
return null;
}
}
}
| 3,097 | 35.880952 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/NameDistributionVisitor.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools.offlineImageViewer;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map.Entry;
import org.apache.hadoop.classification.InterfaceAudience;
/**
* File name distribution visitor.
* <p>
* It analyzes file names in fsimage and prints the following information:
* <li>Number of unique file names</li>
* <li>Number file names and the corresponding number range of files that use
* these same names</li>
* <li>Heap saved if the file name objects are reused</li>
*/
@InterfaceAudience.Private
public class NameDistributionVisitor extends TextWriterImageVisitor {
HashMap<String, Integer> counts = new HashMap<String, Integer>();
public NameDistributionVisitor(String filename, boolean printToScreen)
throws IOException {
super(filename, printToScreen);
}
@Override
void finish() throws IOException {
final int BYTEARRAY_OVERHEAD = 24;
write("Total unique file names " + counts.size());
// Columns: Frequency of file occurrence, savings in heap, total files using
// the name and number of file names
final long stats[][] = { { 100000, 0, 0, 0 },
{ 10000, 0, 0, 0 },
{ 1000, 0, 0, 0 },
{ 100, 0, 0, 0 },
{ 10, 0, 0, 0 },
{ 5, 0, 0, 0 },
{ 4, 0, 0, 0 },
{ 3, 0, 0, 0 },
{ 2, 0, 0, 0 }};
int highbound = Integer.MIN_VALUE;
for (Entry<String, Integer> entry : counts.entrySet()) {
highbound = Math.max(highbound, entry.getValue());
for (int i = 0; i < stats.length; i++) {
if (entry.getValue() >= stats[i][0]) {
stats[i][1] += (BYTEARRAY_OVERHEAD + entry.getKey().length())
* (entry.getValue() - 1);
stats[i][2] += entry.getValue();
stats[i][3]++;
break;
}
}
}
long lowbound = 0;
long totalsavings = 0;
for (long[] stat : stats) {
lowbound = stat[0];
totalsavings += stat[1];
String range = lowbound == highbound ? " " + lowbound :
" between " + lowbound + "-" + highbound;
write("\n" + stat[3] + " names are used by " + stat[2] + " files"
+ range + " times. Heap savings ~" + stat[1] + " bytes.");
highbound = (int) stat[0] - 1;
}
write("\n\nTotal saved heap ~" + totalsavings + "bytes.\n");
super.finish();
}
@Override
void visit(ImageElement element, String value) throws IOException {
if (element == ImageElement.INODE_PATH) {
String filename = value.substring(value.lastIndexOf("/") + 1);
if (counts.containsKey(filename)) {
counts.put(filename, counts.get(filename) + 1);
} else {
counts.put(filename, 1);
}
}
}
@Override
void leaveEnclosingElement() throws IOException {
}
@Override
void start() throws IOException {
}
@Override
void visitEnclosingElement(ImageElement element) throws IOException {
}
@Override
void visitEnclosingElement(ImageElement element, ImageElement key,
String value) throws IOException {
}
}
| 4,035 | 32.915966 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageHandler.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools.offlineImageViewer;
import static io.netty.handler.codec.http.HttpHeaderNames.CONNECTION;
import static io.netty.handler.codec.http.HttpHeaderNames.CONTENT_LENGTH;
import static io.netty.handler.codec.http.HttpHeaderNames.CONTENT_TYPE;
import static io.netty.handler.codec.http.HttpHeaderValues.CLOSE;
import static io.netty.handler.codec.http.HttpResponseStatus.BAD_REQUEST;
import static io.netty.handler.codec.http.HttpResponseStatus.FORBIDDEN;
import static io.netty.handler.codec.http.HttpResponseStatus.INTERNAL_SERVER_ERROR;
import static io.netty.handler.codec.http.HttpResponseStatus.METHOD_NOT_ALLOWED;
import static io.netty.handler.codec.http.HttpResponseStatus.NOT_FOUND;
import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1;
import static org.apache.hadoop.hdfs.server.datanode.web.webhdfs.WebHdfsHandler.APPLICATION_JSON_UTF8;
import static org.apache.hadoop.hdfs.server.datanode.web.webhdfs.WebHdfsHandler.WEBHDFS_PREFIX;
import static org.apache.hadoop.hdfs.server.datanode.web.webhdfs.WebHdfsHandler.WEBHDFS_PREFIX_LENGTH;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import io.netty.channel.ChannelFutureListener;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.SimpleChannelInboundHandler;
import io.netty.channel.group.ChannelGroup;
import io.netty.handler.codec.http.DefaultFullHttpResponse;
import io.netty.handler.codec.http.DefaultHttpResponse;
import io.netty.handler.codec.http.HttpMethod;
import io.netty.handler.codec.http.HttpRequest;
import io.netty.handler.codec.http.HttpResponseStatus;
import io.netty.handler.codec.http.QueryStringDecoder;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hdfs.web.JsonUtil;
import org.apache.hadoop.util.StringUtils;
import com.google.common.base.Charsets;
/**
* Implement the read-only WebHDFS API for fsimage.
*/
class FSImageHandler extends SimpleChannelInboundHandler<HttpRequest> {
public static final Log LOG = LogFactory.getLog(FSImageHandler.class);
private final FSImageLoader image;
private final ChannelGroup activeChannels;
@Override
public void channelActive(ChannelHandlerContext ctx) throws Exception {
activeChannels.add(ctx.channel());
}
FSImageHandler(FSImageLoader image, ChannelGroup activeChannels) throws IOException {
this.image = image;
this.activeChannels = activeChannels;
}
@Override
public void channelRead0(ChannelHandlerContext ctx, HttpRequest request)
throws Exception {
if (request.method() != HttpMethod.GET) {
DefaultHttpResponse resp = new DefaultHttpResponse(HTTP_1_1,
METHOD_NOT_ALLOWED);
resp.headers().set(CONNECTION, CLOSE);
ctx.write(resp).addListener(ChannelFutureListener.CLOSE);
return;
}
QueryStringDecoder decoder = new QueryStringDecoder(request.uri());
final String op = getOp(decoder);
final String content;
String path = getPath(decoder);
switch (op) {
case "GETFILESTATUS":
content = image.getFileStatus(path);
break;
case "LISTSTATUS":
content = image.listStatus(path);
break;
case "GETACLSTATUS":
content = image.getAclStatus(path);
break;
case "GETXATTRS":
List<String> names = getXattrNames(decoder);
String encoder = getEncoder(decoder);
content = image.getXAttrs(path, names, encoder);
break;
case "LISTXATTRS":
content = image.listXAttrs(path);
break;
default:
throw new IllegalArgumentException("Invalid value for webhdfs parameter"
+ " \"op\"");
}
LOG.info("op=" + op + " target=" + path);
DefaultFullHttpResponse resp = new DefaultFullHttpResponse(HTTP_1_1,
HttpResponseStatus.OK, Unpooled.wrappedBuffer(content
.getBytes(Charsets.UTF_8)));
resp.headers().set(CONTENT_TYPE, APPLICATION_JSON_UTF8);
resp.headers().set(CONTENT_LENGTH, resp.content().readableBytes());
resp.headers().set(CONNECTION, CLOSE);
ctx.write(resp).addListener(ChannelFutureListener.CLOSE);
}
@Override
public void channelReadComplete(ChannelHandlerContext ctx) throws Exception {
ctx.flush();
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause)
throws Exception {
Exception e = cause instanceof Exception ? (Exception) cause : new
Exception(cause);
final String output = JsonUtil.toJsonString(e);
ByteBuf content = Unpooled.wrappedBuffer(output.getBytes(Charsets.UTF_8));
final DefaultFullHttpResponse resp = new DefaultFullHttpResponse(
HTTP_1_1, INTERNAL_SERVER_ERROR, content);
resp.headers().set(CONTENT_TYPE, APPLICATION_JSON_UTF8);
if (e instanceof IllegalArgumentException) {
resp.setStatus(BAD_REQUEST);
} else if (e instanceof FileNotFoundException) {
resp.setStatus(NOT_FOUND);
} else if (e instanceof IOException) {
resp.setStatus(FORBIDDEN);
}
resp.headers().set(CONTENT_LENGTH, resp.content().readableBytes());
resp.headers().set(CONNECTION, CLOSE);
ctx.write(resp).addListener(ChannelFutureListener.CLOSE);
}
private static String getOp(QueryStringDecoder decoder) {
Map<String, List<String>> parameters = decoder.parameters();
return parameters.containsKey("op")
? StringUtils.toUpperCase(parameters.get("op").get(0)) : null;
}
private static List<String> getXattrNames(QueryStringDecoder decoder) {
Map<String, List<String>> parameters = decoder.parameters();
return parameters.get("xattr.name");
}
private static String getEncoder(QueryStringDecoder decoder) {
Map<String, List<String>> parameters = decoder.parameters();
return parameters.containsKey("encoding") ? parameters.get("encoding").get(
0) : null;
}
private static String getPath(QueryStringDecoder decoder)
throws FileNotFoundException {
String path = decoder.path();
if (path.startsWith(WEBHDFS_PREFIX)) {
return path.substring(WEBHDFS_PREFIX_LENGTH);
} else {
throw new FileNotFoundException("Path: " + path + " should " +
"start with " + WEBHDFS_PREFIX);
}
}
}
| 7,149 | 38.285714 | 102 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/LsImageVisitor.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools.offlineImageViewer;
import java.io.IOException;
import java.util.Formatter;
import java.util.LinkedList;
/**
* LsImageVisitor displays the blocks of the namespace in a format very similar
* to the output of ls/lsr. Entries are marked as directories or not,
* permissions listed, replication, username and groupname, along with size,
* modification date and full path.
*
* Note: A significant difference between the output of the lsr command
* and this image visitor is that this class cannot sort the file entries;
* they are listed in the order they are stored within the fsimage file.
* Therefore, the output of this class cannot be directly compared to the
* output of the lsr command.
*/
class LsImageVisitor extends TextWriterImageVisitor {
final private LinkedList<ImageElement> elemQ = new LinkedList<ImageElement>();
private int numBlocks;
private String perms;
private int replication;
private String username;
private String group;
private long filesize;
private String modTime;
private String path;
private String linkTarget;
private boolean inInode = false;
final private StringBuilder sb = new StringBuilder();
final private Formatter formatter = new Formatter(sb);
public LsImageVisitor(String filename) throws IOException {
super(filename);
}
public LsImageVisitor(String filename, boolean printToScreen) throws IOException {
super(filename, printToScreen);
}
/**
* Start a new line of output, reset values.
*/
private void newLine() {
numBlocks = 0;
perms = username = group = path = linkTarget = "";
filesize = 0l;
replication = 0;
inInode = true;
}
/**
* All the values have been gathered. Print them to the console in an
* ls-style format.
*/
private final static int widthRepl = 2;
private final static int widthUser = 8;
private final static int widthGroup = 10;
private final static int widthSize = 10;
private final static int widthMod = 10;
private final static String lsStr = " %" + widthRepl + "s %" + widthUser +
"s %" + widthGroup + "s %" + widthSize +
"d %" + widthMod + "s %s";
private void printLine() throws IOException {
sb.append(numBlocks < 0 ? "d" : "-");
sb.append(perms);
if (0 != linkTarget.length()) {
path = path + " -> " + linkTarget;
}
formatter.format(lsStr, replication > 0 ? replication : "-",
username, group, filesize, modTime, path);
sb.append("\n");
write(sb.toString());
sb.setLength(0); // clear string builder
inInode = false;
}
@Override
void start() throws IOException {}
@Override
void finish() throws IOException {
super.finish();
}
@Override
void finishAbnormally() throws IOException {
System.out.println("Input ended unexpectedly.");
super.finishAbnormally();
}
@Override
void leaveEnclosingElement() throws IOException {
ImageElement elem = elemQ.pop();
if(elem == ImageElement.INODE)
printLine();
}
// Maintain state of location within the image tree and record
// values needed to display the inode in ls-style format.
@Override
void visit(ImageElement element, String value) throws IOException {
if(inInode) {
switch(element) {
case INODE_PATH:
if(value.equals("")) path = "/";
else path = value;
break;
case PERMISSION_STRING:
perms = value;
break;
case REPLICATION:
replication = Integer.parseInt(value);
break;
case USER_NAME:
username = value;
break;
case GROUP_NAME:
group = value;
break;
case NUM_BYTES:
filesize += Long.parseLong(value);
break;
case MODIFICATION_TIME:
modTime = value;
break;
case SYMLINK:
linkTarget = value;
break;
default:
// This is OK. We're not looking for all the values.
break;
}
}
}
@Override
void visitEnclosingElement(ImageElement element) throws IOException {
elemQ.push(element);
if(element == ImageElement.INODE)
newLine();
}
@Override
void visitEnclosingElement(ImageElement element,
ImageElement key, String value) throws IOException {
elemQ.push(element);
if(element == ImageElement.INODE)
newLine();
else if (element == ImageElement.BLOCKS)
numBlocks = Integer.parseInt(value);
}
}
| 5,361 | 28.955307 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools.offlineImageViewer;
import java.io.DataInputStream;
import java.io.IOException;
import java.text.DateFormat;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.HashMap;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LayoutFlags;
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion;
import org.apache.hadoop.hdfs.tools.offlineImageViewer.ImageVisitor.ImageElement;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.CompressionCodecFactory;
import org.apache.hadoop.security.token.delegation.DelegationKey;
/**
* ImageLoaderCurrent processes Hadoop FSImage files and walks over
* them using a provided ImageVisitor, calling the visitor at each element
* enumerated below.
*
* The only difference between v18 and v19 was the utilization of the
* stickybit. Therefore, the same viewer can reader either format.
*
* Versions -19 fsimage layout (with changes from -16 up):
* Image version (int)
* Namepsace ID (int)
* NumFiles (long)
* Generation stamp (long)
* INodes (count = NumFiles)
* INode
* Path (String)
* Replication (short)
* Modification Time (long as date)
* Access Time (long) // added in -16
* Block size (long)
* Num blocks (int)
* Blocks (count = Num blocks)
* Block
* Block ID (long)
* Num bytes (long)
* Generation stamp (long)
* Namespace Quota (long)
* Diskspace Quota (long) // added in -18
* Permissions
* Username (String)
* Groupname (String)
* OctalPerms (short -> String) // Modified in -19
* Symlink (String) // added in -23
* NumINodesUnderConstruction (int)
* INodesUnderConstruction (count = NumINodesUnderConstruction)
* INodeUnderConstruction
* Path (bytes as string)
* Replication (short)
* Modification time (long as date)
* Preferred block size (long)
* Num blocks (int)
* Blocks
* Block
* Block ID (long)
* Num bytes (long)
* Generation stamp (long)
* Permissions
* Username (String)
* Groupname (String)
* OctalPerms (short -> String)
* Client Name (String)
* Client Machine (String)
* NumLocations (int)
* DatanodeDescriptors (count = numLocations) // not loaded into memory
* short // but still in file
* long
* string
* long
* int
* string
* string
* enum
* CurrentDelegationKeyId (int)
* NumDelegationKeys (int)
* DelegationKeys (count = NumDelegationKeys)
* DelegationKeyLength (vint)
* DelegationKey (bytes)
* DelegationTokenSequenceNumber (int)
* NumDelegationTokens (int)
* DelegationTokens (count = NumDelegationTokens)
* DelegationTokenIdentifier
* owner (String)
* renewer (String)
* realUser (String)
* issueDate (vlong)
* maxDate (vlong)
* sequenceNumber (vint)
* masterKeyId (vint)
* expiryTime (long)
*
*/
class ImageLoaderCurrent implements ImageLoader {
protected final DateFormat dateFormat =
new SimpleDateFormat("yyyy-MM-dd HH:mm");
private static int[] versions = { -16, -17, -18, -19, -20, -21, -22, -23,
-24, -25, -26, -27, -28, -30, -31, -32, -33, -34, -35, -36, -37, -38, -39,
-40, -41, -42, -43, -44, -45, -46, -47, -48, -49, -50, -51 };
private int imageVersion = 0;
private final Map<Long, Boolean> subtreeMap = new HashMap<Long, Boolean>();
private final Map<Long, String> dirNodeMap = new HashMap<Long, String>();
/* (non-Javadoc)
* @see ImageLoader#canProcessVersion(int)
*/
@Override
public boolean canLoadVersion(int version) {
for(int v : versions)
if(v == version) return true;
return false;
}
/* (non-Javadoc)
* @see ImageLoader#processImage(java.io.DataInputStream, ImageVisitor, boolean)
*/
@Override
public void loadImage(DataInputStream in, ImageVisitor v,
boolean skipBlocks) throws IOException {
boolean done = false;
try {
v.start();
v.visitEnclosingElement(ImageElement.FS_IMAGE);
imageVersion = in.readInt();
if( !canLoadVersion(imageVersion))
throw new IOException("Cannot process fslayout version " + imageVersion);
if (NameNodeLayoutVersion.supports(Feature.ADD_LAYOUT_FLAGS, imageVersion)) {
LayoutFlags.read(in);
}
v.visit(ImageElement.IMAGE_VERSION, imageVersion);
v.visit(ImageElement.NAMESPACE_ID, in.readInt());
long numInodes = in.readLong();
v.visit(ImageElement.GENERATION_STAMP, in.readLong());
if (NameNodeLayoutVersion.supports(Feature.SEQUENTIAL_BLOCK_ID, imageVersion)) {
v.visit(ImageElement.GENERATION_STAMP_V2, in.readLong());
v.visit(ImageElement.GENERATION_STAMP_V1_LIMIT, in.readLong());
v.visit(ImageElement.LAST_ALLOCATED_BLOCK_ID, in.readLong());
}
if (NameNodeLayoutVersion.supports(Feature.STORED_TXIDS, imageVersion)) {
v.visit(ImageElement.TRANSACTION_ID, in.readLong());
}
if (NameNodeLayoutVersion.supports(Feature.ADD_INODE_ID, imageVersion)) {
v.visit(ImageElement.LAST_INODE_ID, in.readLong());
}
boolean supportSnapshot = NameNodeLayoutVersion.supports(Feature.SNAPSHOT,
imageVersion);
if (supportSnapshot) {
v.visit(ImageElement.SNAPSHOT_COUNTER, in.readInt());
int numSnapshots = in.readInt();
v.visit(ImageElement.NUM_SNAPSHOTS_TOTAL, numSnapshots);
for (int i = 0; i < numSnapshots; i++) {
processSnapshot(in, v);
}
}
if (NameNodeLayoutVersion.supports(Feature.FSIMAGE_COMPRESSION, imageVersion)) {
boolean isCompressed = in.readBoolean();
v.visit(ImageElement.IS_COMPRESSED, String.valueOf(isCompressed));
if (isCompressed) {
String codecClassName = Text.readString(in);
v.visit(ImageElement.COMPRESS_CODEC, codecClassName);
CompressionCodecFactory codecFac = new CompressionCodecFactory(
new Configuration());
CompressionCodec codec = codecFac.getCodecByClassName(codecClassName);
if (codec == null) {
throw new IOException("Image compression codec not supported: "
+ codecClassName);
}
in = new DataInputStream(codec.createInputStream(in));
}
}
processINodes(in, v, numInodes, skipBlocks, supportSnapshot);
subtreeMap.clear();
dirNodeMap.clear();
processINodesUC(in, v, skipBlocks);
if (NameNodeLayoutVersion.supports(Feature.DELEGATION_TOKEN, imageVersion)) {
processDelegationTokens(in, v);
}
if (NameNodeLayoutVersion.supports(Feature.CACHING, imageVersion)) {
processCacheManagerState(in, v);
}
v.leaveEnclosingElement(); // FSImage
done = true;
} finally {
if (done) {
v.finish();
} else {
v.finishAbnormally();
}
}
}
/**
* Process CacheManager state from the fsimage.
*/
private void processCacheManagerState(DataInputStream in, ImageVisitor v)
throws IOException {
v.visit(ImageElement.CACHE_NEXT_ENTRY_ID, in.readLong());
final int numPools = in.readInt();
for (int i=0; i<numPools; i++) {
v.visit(ImageElement.CACHE_POOL_NAME, Text.readString(in));
processCachePoolPermission(in, v);
v.visit(ImageElement.CACHE_POOL_WEIGHT, in.readInt());
}
final int numEntries = in.readInt();
for (int i=0; i<numEntries; i++) {
v.visit(ImageElement.CACHE_ENTRY_PATH, Text.readString(in));
v.visit(ImageElement.CACHE_ENTRY_REPLICATION, in.readShort());
v.visit(ImageElement.CACHE_ENTRY_POOL_NAME, Text.readString(in));
}
}
/**
* Process the Delegation Token related section in fsimage.
*
* @param in DataInputStream to process
* @param v Visitor to walk over records
*/
private void processDelegationTokens(DataInputStream in, ImageVisitor v)
throws IOException {
v.visit(ImageElement.CURRENT_DELEGATION_KEY_ID, in.readInt());
int numDKeys = in.readInt();
v.visitEnclosingElement(ImageElement.DELEGATION_KEYS,
ImageElement.NUM_DELEGATION_KEYS, numDKeys);
for(int i =0; i < numDKeys; i++) {
DelegationKey key = new DelegationKey();
key.readFields(in);
v.visit(ImageElement.DELEGATION_KEY, key.toString());
}
v.leaveEnclosingElement();
v.visit(ImageElement.DELEGATION_TOKEN_SEQUENCE_NUMBER, in.readInt());
int numDTokens = in.readInt();
v.visitEnclosingElement(ImageElement.DELEGATION_TOKENS,
ImageElement.NUM_DELEGATION_TOKENS, numDTokens);
for(int i=0; i<numDTokens; i++){
DelegationTokenIdentifier id = new DelegationTokenIdentifier();
id.readFields(in);
long expiryTime = in.readLong();
v.visitEnclosingElement(ImageElement.DELEGATION_TOKEN_IDENTIFIER);
v.visit(ImageElement.DELEGATION_TOKEN_IDENTIFIER_KIND,
id.getKind().toString());
v.visit(ImageElement.DELEGATION_TOKEN_IDENTIFIER_SEQNO,
id.getSequenceNumber());
v.visit(ImageElement.DELEGATION_TOKEN_IDENTIFIER_OWNER,
id.getOwner().toString());
v.visit(ImageElement.DELEGATION_TOKEN_IDENTIFIER_RENEWER,
id.getRenewer().toString());
v.visit(ImageElement.DELEGATION_TOKEN_IDENTIFIER_REALUSER,
id.getRealUser().toString());
v.visit(ImageElement.DELEGATION_TOKEN_IDENTIFIER_ISSUE_DATE,
id.getIssueDate());
v.visit(ImageElement.DELEGATION_TOKEN_IDENTIFIER_MAX_DATE,
id.getMaxDate());
v.visit(ImageElement.DELEGATION_TOKEN_IDENTIFIER_EXPIRY_TIME,
expiryTime);
v.visit(ImageElement.DELEGATION_TOKEN_IDENTIFIER_MASTER_KEY_ID,
id.getMasterKeyId());
v.leaveEnclosingElement(); // DELEGATION_TOKEN_IDENTIFIER
}
v.leaveEnclosingElement(); // DELEGATION_TOKENS
}
/**
* Process the INodes under construction section of the fsimage.
*
* @param in DataInputStream to process
* @param v Visitor to walk over inodes
* @param skipBlocks Walk over each block?
*/
private void processINodesUC(DataInputStream in, ImageVisitor v,
boolean skipBlocks) throws IOException {
int numINUC = in.readInt();
v.visitEnclosingElement(ImageElement.INODES_UNDER_CONSTRUCTION,
ImageElement.NUM_INODES_UNDER_CONSTRUCTION, numINUC);
for(int i = 0; i < numINUC; i++) {
v.visitEnclosingElement(ImageElement.INODE_UNDER_CONSTRUCTION);
byte [] name = FSImageSerialization.readBytes(in);
String n = new String(name, "UTF8");
v.visit(ImageElement.INODE_PATH, n);
if (NameNodeLayoutVersion.supports(Feature.ADD_INODE_ID, imageVersion)) {
long inodeId = in.readLong();
v.visit(ImageElement.INODE_ID, inodeId);
}
v.visit(ImageElement.REPLICATION, in.readShort());
v.visit(ImageElement.MODIFICATION_TIME, formatDate(in.readLong()));
v.visit(ImageElement.PREFERRED_BLOCK_SIZE, in.readLong());
int numBlocks = in.readInt();
processBlocks(in, v, numBlocks, skipBlocks);
processPermission(in, v);
v.visit(ImageElement.CLIENT_NAME, FSImageSerialization.readString(in));
v.visit(ImageElement.CLIENT_MACHINE, FSImageSerialization.readString(in));
// Skip over the datanode descriptors, which are still stored in the
// file but are not used by the datanode or loaded into memory
int numLocs = in.readInt();
for(int j = 0; j < numLocs; j++) {
in.readShort();
in.readLong();
in.readLong();
in.readLong();
in.readInt();
FSImageSerialization.readString(in);
FSImageSerialization.readString(in);
WritableUtils.readEnum(in, AdminStates.class);
}
v.leaveEnclosingElement(); // INodeUnderConstruction
}
v.leaveEnclosingElement(); // INodesUnderConstruction
}
/**
* Process the blocks section of the fsimage.
*
* @param in Datastream to process
* @param v Visitor to walk over inodes
* @param skipBlocks Walk over each block?
*/
private void processBlocks(DataInputStream in, ImageVisitor v,
int numBlocks, boolean skipBlocks) throws IOException {
v.visitEnclosingElement(ImageElement.BLOCKS,
ImageElement.NUM_BLOCKS, numBlocks);
// directory or symlink or reference node, no blocks to process
if(numBlocks < 0) {
v.leaveEnclosingElement(); // Blocks
return;
}
if(skipBlocks) {
int bytesToSkip = ((Long.SIZE * 3 /* fields */) / 8 /*bits*/) * numBlocks;
if(in.skipBytes(bytesToSkip) != bytesToSkip)
throw new IOException("Error skipping over blocks");
} else {
for(int j = 0; j < numBlocks; j++) {
v.visitEnclosingElement(ImageElement.BLOCK);
v.visit(ImageElement.BLOCK_ID, in.readLong());
v.visit(ImageElement.NUM_BYTES, in.readLong());
v.visit(ImageElement.GENERATION_STAMP, in.readLong());
v.leaveEnclosingElement(); // Block
}
}
v.leaveEnclosingElement(); // Blocks
}
/**
* Extract the INode permissions stored in the fsimage file.
*
* @param in Datastream to process
* @param v Visitor to walk over inodes
*/
private void processPermission(DataInputStream in, ImageVisitor v)
throws IOException {
v.visitEnclosingElement(ImageElement.PERMISSIONS);
v.visit(ImageElement.USER_NAME, Text.readString(in));
v.visit(ImageElement.GROUP_NAME, Text.readString(in));
FsPermission fsp = new FsPermission(in.readShort());
v.visit(ImageElement.PERMISSION_STRING, fsp.toString());
v.leaveEnclosingElement(); // Permissions
}
/**
* Extract CachePool permissions stored in the fsimage file.
*
* @param in Datastream to process
* @param v Visitor to walk over inodes
*/
private void processCachePoolPermission(DataInputStream in, ImageVisitor v)
throws IOException {
v.visitEnclosingElement(ImageElement.PERMISSIONS);
v.visit(ImageElement.CACHE_POOL_OWNER_NAME, Text.readString(in));
v.visit(ImageElement.CACHE_POOL_GROUP_NAME, Text.readString(in));
FsPermission fsp = new FsPermission(in.readShort());
v.visit(ImageElement.CACHE_POOL_PERMISSION_STRING, fsp.toString());
v.leaveEnclosingElement(); // Permissions
}
/**
* Process the INode records stored in the fsimage.
*
* @param in Datastream to process
* @param v Visitor to walk over INodes
* @param numInodes Number of INodes stored in file
* @param skipBlocks Process all the blocks within the INode?
* @param supportSnapshot Whether or not the imageVersion supports snapshot
* @throws VisitException
* @throws IOException
*/
private void processINodes(DataInputStream in, ImageVisitor v,
long numInodes, boolean skipBlocks, boolean supportSnapshot)
throws IOException {
v.visitEnclosingElement(ImageElement.INODES,
ImageElement.NUM_INODES, numInodes);
if (NameNodeLayoutVersion.supports(Feature.FSIMAGE_NAME_OPTIMIZATION, imageVersion)) {
if (!supportSnapshot) {
processLocalNameINodes(in, v, numInodes, skipBlocks);
} else {
processLocalNameINodesWithSnapshot(in, v, skipBlocks);
}
} else { // full path name
processFullNameINodes(in, v, numInodes, skipBlocks);
}
v.leaveEnclosingElement(); // INodes
}
/**
* Process image with full path name
*
* @param in image stream
* @param v visitor
* @param numInodes number of indoes to read
* @param skipBlocks skip blocks or not
* @throws IOException if there is any error occurs
*/
private void processLocalNameINodes(DataInputStream in, ImageVisitor v,
long numInodes, boolean skipBlocks) throws IOException {
// process root
processINode(in, v, skipBlocks, "", false);
numInodes--;
while (numInodes > 0) {
numInodes -= processDirectory(in, v, skipBlocks);
}
}
private int processDirectory(DataInputStream in, ImageVisitor v,
boolean skipBlocks) throws IOException {
String parentName = FSImageSerialization.readString(in);
return processChildren(in, v, skipBlocks, parentName);
}
/**
* Process image with local path name and snapshot support
*
* @param in image stream
* @param v visitor
* @param skipBlocks skip blocks or not
*/
private void processLocalNameINodesWithSnapshot(DataInputStream in,
ImageVisitor v, boolean skipBlocks) throws IOException {
// process root
processINode(in, v, skipBlocks, "", false);
processDirectoryWithSnapshot(in, v, skipBlocks);
}
/**
* Process directories when snapshot is supported.
*/
private void processDirectoryWithSnapshot(DataInputStream in, ImageVisitor v,
boolean skipBlocks) throws IOException {
// 1. load dir node id
long inodeId = in.readLong();
String dirName = dirNodeMap.remove(inodeId);
Boolean visitedRef = subtreeMap.get(inodeId);
if (visitedRef != null) {
if (visitedRef.booleanValue()) { // the subtree has been visited
return;
} else { // first time to visit
subtreeMap.put(inodeId, true);
}
} // else the dir is not linked by a RefNode, thus cannot be revisited
// 2. load possible snapshots
processSnapshots(in, v, dirName);
// 3. load children nodes
processChildren(in, v, skipBlocks, dirName);
// 4. load possible directory diff list
processDirectoryDiffList(in, v, dirName);
// recursively process sub-directories
final int numSubTree = in.readInt();
for (int i = 0; i < numSubTree; i++) {
processDirectoryWithSnapshot(in, v, skipBlocks);
}
}
/**
* Process snapshots of a snapshottable directory
*/
private void processSnapshots(DataInputStream in, ImageVisitor v,
String rootName) throws IOException {
final int numSnapshots = in.readInt();
if (numSnapshots >= 0) {
v.visitEnclosingElement(ImageElement.SNAPSHOTS,
ImageElement.NUM_SNAPSHOTS, numSnapshots);
for (int i = 0; i < numSnapshots; i++) {
// process snapshot
v.visitEnclosingElement(ImageElement.SNAPSHOT);
v.visit(ImageElement.SNAPSHOT_ID, in.readInt());
v.leaveEnclosingElement();
}
v.visit(ImageElement.SNAPSHOT_QUOTA, in.readInt());
v.leaveEnclosingElement();
}
}
private void processSnapshot(DataInputStream in, ImageVisitor v)
throws IOException {
v.visitEnclosingElement(ImageElement.SNAPSHOT);
v.visit(ImageElement.SNAPSHOT_ID, in.readInt());
// process root of snapshot
v.visitEnclosingElement(ImageElement.SNAPSHOT_ROOT);
processINode(in, v, true, "", false);
v.leaveEnclosingElement();
v.leaveEnclosingElement();
}
private void processDirectoryDiffList(DataInputStream in, ImageVisitor v,
String currentINodeName) throws IOException {
final int numDirDiff = in.readInt();
if (numDirDiff >= 0) {
v.visitEnclosingElement(ImageElement.SNAPSHOT_DIR_DIFFS,
ImageElement.NUM_SNAPSHOT_DIR_DIFF, numDirDiff);
for (int i = 0; i < numDirDiff; i++) {
// process directory diffs in reverse chronological oder
processDirectoryDiff(in, v, currentINodeName);
}
v.leaveEnclosingElement();
}
}
private void processDirectoryDiff(DataInputStream in, ImageVisitor v,
String currentINodeName) throws IOException {
v.visitEnclosingElement(ImageElement.SNAPSHOT_DIR_DIFF);
int snapshotId = in.readInt();
v.visit(ImageElement.SNAPSHOT_DIFF_SNAPSHOTID, snapshotId);
v.visit(ImageElement.SNAPSHOT_DIR_DIFF_CHILDREN_SIZE, in.readInt());
// process snapshotINode
boolean useRoot = in.readBoolean();
if (!useRoot) {
if (in.readBoolean()) {
v.visitEnclosingElement(ImageElement.SNAPSHOT_INODE_DIRECTORY_ATTRIBUTES);
if (NameNodeLayoutVersion.supports(Feature.OPTIMIZE_SNAPSHOT_INODES, imageVersion)) {
processINodeDirectoryAttributes(in, v, currentINodeName);
} else {
processINode(in, v, true, currentINodeName, true);
}
v.leaveEnclosingElement();
}
}
// process createdList
int createdSize = in.readInt();
v.visitEnclosingElement(ImageElement.SNAPSHOT_DIR_DIFF_CREATEDLIST,
ImageElement.SNAPSHOT_DIR_DIFF_CREATEDLIST_SIZE, createdSize);
for (int i = 0; i < createdSize; i++) {
String createdNode = FSImageSerialization.readString(in);
v.visit(ImageElement.SNAPSHOT_DIR_DIFF_CREATED_INODE, createdNode);
}
v.leaveEnclosingElement();
// process deletedList
int deletedSize = in.readInt();
v.visitEnclosingElement(ImageElement.SNAPSHOT_DIR_DIFF_DELETEDLIST,
ImageElement.SNAPSHOT_DIR_DIFF_DELETEDLIST_SIZE, deletedSize);
for (int i = 0; i < deletedSize; i++) {
v.visitEnclosingElement(ImageElement.SNAPSHOT_DIR_DIFF_DELETED_INODE);
processINode(in, v, false, currentINodeName, true);
v.leaveEnclosingElement();
}
v.leaveEnclosingElement();
v.leaveEnclosingElement();
}
private void processINodeDirectoryAttributes(DataInputStream in, ImageVisitor v,
String parentName) throws IOException {
final String pathName = readINodePath(in, parentName);
v.visit(ImageElement.INODE_PATH, pathName);
processPermission(in, v);
v.visit(ImageElement.MODIFICATION_TIME, formatDate(in.readLong()));
v.visit(ImageElement.NS_QUOTA, in.readLong());
v.visit(ImageElement.DS_QUOTA, in.readLong());
}
/** Process children under a directory */
private int processChildren(DataInputStream in, ImageVisitor v,
boolean skipBlocks, String parentName) throws IOException {
int numChildren = in.readInt();
for (int i = 0; i < numChildren; i++) {
processINode(in, v, skipBlocks, parentName, false);
}
return numChildren;
}
/**
* Process image with full path name
*
* @param in image stream
* @param v visitor
* @param numInodes number of indoes to read
* @param skipBlocks skip blocks or not
* @throws IOException if there is any error occurs
*/
private void processFullNameINodes(DataInputStream in, ImageVisitor v,
long numInodes, boolean skipBlocks) throws IOException {
for(long i = 0; i < numInodes; i++) {
processINode(in, v, skipBlocks, null, false);
}
}
private String readINodePath(DataInputStream in, String parentName)
throws IOException {
String pathName = FSImageSerialization.readString(in);
if (parentName != null) { // local name
pathName = "/" + pathName;
if (!"/".equals(parentName)) { // children of non-root directory
pathName = parentName + pathName;
}
}
return pathName;
}
/**
* Process an INode
*
* @param in image stream
* @param v visitor
* @param skipBlocks skip blocks or not
* @param parentName the name of its parent node
* @param isSnapshotCopy whether or not the inode is a snapshot copy
* @throws IOException
*/
private void processINode(DataInputStream in, ImageVisitor v,
boolean skipBlocks, String parentName, boolean isSnapshotCopy)
throws IOException {
boolean supportSnapshot =
NameNodeLayoutVersion.supports(Feature.SNAPSHOT, imageVersion);
boolean supportInodeId =
NameNodeLayoutVersion.supports(Feature.ADD_INODE_ID, imageVersion);
v.visitEnclosingElement(ImageElement.INODE);
final String pathName = readINodePath(in, parentName);
v.visit(ImageElement.INODE_PATH, pathName);
long inodeId = HdfsConstants.GRANDFATHER_INODE_ID;
if (supportInodeId) {
inodeId = in.readLong();
v.visit(ImageElement.INODE_ID, inodeId);
}
v.visit(ImageElement.REPLICATION, in.readShort());
v.visit(ImageElement.MODIFICATION_TIME, formatDate(in.readLong()));
if(NameNodeLayoutVersion.supports(Feature.FILE_ACCESS_TIME, imageVersion))
v.visit(ImageElement.ACCESS_TIME, formatDate(in.readLong()));
v.visit(ImageElement.BLOCK_SIZE, in.readLong());
int numBlocks = in.readInt();
processBlocks(in, v, numBlocks, skipBlocks);
if (numBlocks >= 0) { // File
if (supportSnapshot) {
// make sure subtreeMap only contains entry for directory
subtreeMap.remove(inodeId);
// process file diffs
processFileDiffList(in, v, parentName);
if (isSnapshotCopy) {
boolean underConstruction = in.readBoolean();
if (underConstruction) {
v.visit(ImageElement.CLIENT_NAME,
FSImageSerialization.readString(in));
v.visit(ImageElement.CLIENT_MACHINE,
FSImageSerialization.readString(in));
}
}
}
processPermission(in, v);
} else if (numBlocks == -1) { // Directory
if (supportSnapshot && supportInodeId) {
dirNodeMap.put(inodeId, pathName);
}
v.visit(ImageElement.NS_QUOTA, numBlocks == -1 ? in.readLong() : -1);
if (NameNodeLayoutVersion.supports(Feature.DISKSPACE_QUOTA, imageVersion))
v.visit(ImageElement.DS_QUOTA, numBlocks == -1 ? in.readLong() : -1);
if (supportSnapshot) {
boolean snapshottable = in.readBoolean();
if (!snapshottable) {
boolean withSnapshot = in.readBoolean();
v.visit(ImageElement.IS_WITHSNAPSHOT_DIR, Boolean.toString(withSnapshot));
} else {
v.visit(ImageElement.IS_SNAPSHOTTABLE_DIR, Boolean.toString(snapshottable));
}
}
processPermission(in, v);
} else if (numBlocks == -2) {
v.visit(ImageElement.SYMLINK, Text.readString(in));
processPermission(in, v);
} else if (numBlocks == -3) { // reference node
final boolean isWithName = in.readBoolean();
int snapshotId = in.readInt();
if (isWithName) {
v.visit(ImageElement.SNAPSHOT_LAST_SNAPSHOT_ID, snapshotId);
} else {
v.visit(ImageElement.SNAPSHOT_DST_SNAPSHOT_ID, snapshotId);
}
final boolean firstReferred = in.readBoolean();
if (firstReferred) {
// if a subtree is linked by multiple "parents", the corresponding dir
// must be referred by a reference node. we put the reference node into
// the subtreeMap here and let its value be false. when we later visit
// the subtree for the first time, we change the value to true.
subtreeMap.put(inodeId, false);
v.visitEnclosingElement(ImageElement.SNAPSHOT_REF_INODE);
processINode(in, v, skipBlocks, parentName, isSnapshotCopy);
v.leaveEnclosingElement(); // referred inode
} else {
v.visit(ImageElement.SNAPSHOT_REF_INODE_ID, in.readLong());
}
}
v.leaveEnclosingElement(); // INode
}
private void processINodeFileAttributes(DataInputStream in, ImageVisitor v,
String parentName) throws IOException {
final String pathName = readINodePath(in, parentName);
v.visit(ImageElement.INODE_PATH, pathName);
processPermission(in, v);
v.visit(ImageElement.MODIFICATION_TIME, formatDate(in.readLong()));
if(NameNodeLayoutVersion.supports(Feature.FILE_ACCESS_TIME, imageVersion)) {
v.visit(ImageElement.ACCESS_TIME, formatDate(in.readLong()));
}
v.visit(ImageElement.REPLICATION, in.readShort());
v.visit(ImageElement.BLOCK_SIZE, in.readLong());
}
private void processFileDiffList(DataInputStream in, ImageVisitor v,
String currentINodeName) throws IOException {
final int size = in.readInt();
if (size >= 0) {
v.visitEnclosingElement(ImageElement.SNAPSHOT_FILE_DIFFS,
ImageElement.NUM_SNAPSHOT_FILE_DIFF, size);
for (int i = 0; i < size; i++) {
processFileDiff(in, v, currentINodeName);
}
v.leaveEnclosingElement();
}
}
private void processFileDiff(DataInputStream in, ImageVisitor v,
String currentINodeName) throws IOException {
int snapshotId = in.readInt();
v.visitEnclosingElement(ImageElement.SNAPSHOT_FILE_DIFF,
ImageElement.SNAPSHOT_DIFF_SNAPSHOTID, snapshotId);
v.visit(ImageElement.SNAPSHOT_FILE_SIZE, in.readLong());
if (in.readBoolean()) {
v.visitEnclosingElement(ImageElement.SNAPSHOT_INODE_FILE_ATTRIBUTES);
if (NameNodeLayoutVersion.supports(Feature.OPTIMIZE_SNAPSHOT_INODES, imageVersion)) {
processINodeFileAttributes(in, v, currentINodeName);
} else {
processINode(in, v, true, currentINodeName, true);
}
v.leaveEnclosingElement();
}
v.leaveEnclosingElement();
}
/**
* Helper method to format dates during processing.
* @param date Date as read from image file
* @return String version of date format
*/
private String formatDate(long date) {
return dateFormat.format(new Date(date));
}
}
| 30,442 | 36.03528 | 93 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools.offlineImageViewer;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.server.namenode.FSImageFormatPBINode;
import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf;
import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.SectionName;
import org.apache.hadoop.hdfs.server.namenode.FSImageUtil;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeId;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.LimitInputStream;
import org.apache.hadoop.util.Time;
import org.fusesource.leveldbjni.JniDBFactory;
import org.iq80.leveldb.DB;
import org.iq80.leveldb.Options;
import org.iq80.leveldb.WriteBatch;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.BufferedInputStream;
import java.io.Closeable;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.PrintStream;
import java.io.RandomAccessFile;
import java.io.UnsupportedEncodingException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
/**
* This class reads the protobuf-based fsimage and generates text output
* for each inode to {@link PBImageTextWriter#out}. The sub-class can override
* {@link getEntry()} to generate formatted string for each inode.
*
* Since protobuf-based fsimage does not guarantee the order of inodes and
* directories, PBImageTextWriter runs two-phase scans:
*
* <ol>
* <li>The first phase, PBImageTextWriter scans the INode sections to reads the
* filename of each directory. It also scans the INode_Dir sections to loads
* the relationships between a directory and its children. It uses these metadata
* to build FS namespace and stored in {@link MetadataMap}</li>
* <li>The second phase, PBImageTextWriter re-scans the INode sections. For each
* inode, it looks up the path of the parent directory in the {@link MetadataMap},
* and generate output.</li>
* </ol>
*
* Two various of {@link MetadataMap} are provided. {@link InMemoryMetadataDB}
* stores all metadata in memory (O(n) memory) while
* {@link LevelDBMetadataMap} stores metadata in LevelDB on disk (O(1) memory).
* User can choose between them based on the time/space tradeoffs.
*/
abstract class PBImageTextWriter implements Closeable {
private static final Logger LOG =
LoggerFactory.getLogger(PBImageTextWriter.class);
/**
* This metadata map is used to construct the namespace before generating
* text outputs.
*
* It contains two mapping relationships:
* <p>
* <li>It maps each inode (inode Id) to its parent directory (inode Id).</li>
* <li>It maps each directory from its inode Id.</li>
* </p>
*/
private static interface MetadataMap extends Closeable {
/**
* Associate an inode with its parent directory.
*/
public void putDirChild(long parentId, long childId) throws IOException;
/**
* Associate a directory with its inode Id.
*/
public void putDir(INode dir) throws IOException;
/** Get the full path of the parent directory for the given inode. */
public String getParentPath(long inode) throws IOException;
/** Synchronize metadata to persistent storage, if possible */
public void sync() throws IOException;
}
/**
* Maintain all the metadata in memory.
*/
private static class InMemoryMetadataDB implements MetadataMap {
/**
* Represent a directory in memory.
*/
private static class Dir {
private final long inode;
private Dir parent = null;
private String name;
private String path = null; // cached full path of the directory.
Dir(long inode, String name) {
this.inode = inode;
this.name = name;
}
private void setParent(Dir parent) {
Preconditions.checkState(this.parent == null);
this.parent = parent;
}
/**
* Returns the full path of this directory.
*/
private String getPath() {
if (this.parent == null) {
return "/";
}
if (this.path == null) {
this.path = new Path(parent.getPath(), name.isEmpty() ? "/" : name).
toString();
this.name = null;
}
return this.path;
}
@Override
public boolean equals(Object o) {
return o instanceof Dir && inode == ((Dir) o).inode;
}
@Override
public int hashCode() {
return Long.valueOf(inode).hashCode();
}
}
/** INode Id to Dir object mapping */
private Map<Long, Dir> dirMap = new HashMap<>();
/** Children to parent directory INode ID mapping. */
private Map<Long, Dir> dirChildMap = new HashMap<>();
InMemoryMetadataDB() {
}
@Override
public void close() throws IOException {
}
@Override
public void putDirChild(long parentId, long childId) {
Dir parent = dirMap.get(parentId);
Dir child = dirMap.get(childId);
if (child != null) {
child.setParent(parent);
}
Preconditions.checkState(!dirChildMap.containsKey(childId));
dirChildMap.put(childId, parent);
}
@Override
public void putDir(INode p) {
Preconditions.checkState(!dirMap.containsKey(p.getId()));
Dir dir = new Dir(p.getId(), p.getName().toStringUtf8());
dirMap.put(p.getId(), dir);
}
public String getParentPath(long inode) throws IOException {
if (inode == INodeId.ROOT_INODE_ID) {
return "";
}
Dir parent = dirChildMap.get(inode);
Preconditions.checkState(parent != null,
"Can not find parent directory for INode: %s", inode);
return parent.getPath();
}
@Override
public void sync() {
}
}
/**
* A MetadataMap that stores metadata in LevelDB.
*/
private static class LevelDBMetadataMap implements MetadataMap {
/**
* Store metadata in LevelDB.
*/
private static class LevelDBStore implements Closeable {
private DB db = null;
private WriteBatch batch = null;
private int writeCount = 0;
private static final int BATCH_SIZE = 1024;
LevelDBStore(final File dbPath) throws IOException {
Options options = new Options();
options.createIfMissing(true);
options.errorIfExists(true);
db = JniDBFactory.factory.open(dbPath, options);
batch = db.createWriteBatch();
}
@Override
public void close() throws IOException {
if (batch != null) {
IOUtils.cleanup(null, batch);
batch = null;
}
IOUtils.cleanup(null, db);
db = null;
}
public void put(byte[] key, byte[] value) throws IOException {
batch.put(key, value);
writeCount++;
if (writeCount >= BATCH_SIZE) {
sync();
}
}
public byte[] get(byte[] key) throws IOException {
return db.get(key);
}
public void sync() throws IOException {
try {
db.write(batch);
} finally {
batch.close();
batch = null;
}
batch = db.createWriteBatch();
writeCount = 0;
}
}
/**
* A LRU cache for directory path strings.
*
* The key of this LRU cache is the inode of a directory.
*/
private static class DirPathCache extends LinkedHashMap<Long, String> {
private final static int CAPACITY = 16 * 1024;
DirPathCache() {
super(CAPACITY);
}
@Override
protected boolean removeEldestEntry(Map.Entry<Long, String> entry) {
return super.size() > CAPACITY;
}
}
/** Map the child inode to the parent directory inode. */
private LevelDBStore dirChildMap = null;
/** Directory entry map */
private LevelDBStore dirMap = null;
private DirPathCache dirPathCache = new DirPathCache();
LevelDBMetadataMap(String baseDir) throws IOException {
File dbDir = new File(baseDir);
if (dbDir.exists()) {
FileUtils.deleteDirectory(dbDir);
}
if (!dbDir.mkdirs()) {
throw new IOException("Failed to mkdir on " + dbDir);
}
try {
dirChildMap = new LevelDBStore(new File(dbDir, "dirChildMap"));
dirMap = new LevelDBStore(new File(dbDir, "dirMap"));
} catch (IOException e) {
LOG.error("Failed to open LevelDBs", e);
IOUtils.cleanup(null, this);
}
}
@Override
public void close() throws IOException {
IOUtils.cleanup(null, dirChildMap, dirMap);
dirChildMap = null;
dirMap = null;
}
private static byte[] toBytes(long value) {
return ByteBuffer.allocate(8).putLong(value).array();
}
private static byte[] toBytes(String value)
throws UnsupportedEncodingException {
return value.getBytes("UTF-8");
}
private static long toLong(byte[] bytes) {
Preconditions.checkArgument(bytes.length == 8);
return ByteBuffer.wrap(bytes).getLong();
}
private static String toString(byte[] bytes) throws IOException {
try {
return new String(bytes, "UTF-8");
} catch (UnsupportedEncodingException e) {
throw new IOException(e);
}
}
@Override
public void putDirChild(long parentId, long childId) throws IOException {
dirChildMap.put(toBytes(childId), toBytes(parentId));
}
@Override
public void putDir(INode dir) throws IOException {
Preconditions.checkArgument(dir.hasDirectory(),
"INode %s (%s) is not a directory.", dir.getId(), dir.getName());
dirMap.put(toBytes(dir.getId()), toBytes(dir.getName().toStringUtf8()));
}
@Override
public String getParentPath(long inode) throws IOException {
if (inode == INodeId.ROOT_INODE_ID) {
return "/";
}
byte[] bytes = dirChildMap.get(toBytes(inode));
Preconditions.checkState(bytes != null && bytes.length == 8,
"Can not find parent directory for inode %s, "
+ "fsimage might be corrupted", inode);
long parent = toLong(bytes);
if (!dirPathCache.containsKey(parent)) {
bytes = dirMap.get(toBytes(parent));
if (parent != INodeId.ROOT_INODE_ID) {
Preconditions.checkState(bytes != null,
"Can not find parent directory for inode %s, "
+ ", the fsimage might be corrupted.", parent);
}
String parentName = toString(bytes);
String parentPath =
new Path(getParentPath(parent),
parentName.isEmpty()? "/" : parentName).toString();
dirPathCache.put(parent, parentPath);
}
return dirPathCache.get(parent);
}
@Override
public void sync() throws IOException {
dirChildMap.sync();
dirMap.sync();
}
}
private String[] stringTable;
private PrintStream out;
private MetadataMap metadataMap = null;
/**
* Construct a PB FsImage writer to generate text file.
* @param out the writer to output text information of fsimage.
* @param tempPath the path to store metadata. If it is empty, store metadata
* in memory instead.
*/
PBImageTextWriter(PrintStream out, String tempPath) throws IOException {
this.out = out;
if (tempPath.isEmpty()) {
metadataMap = new InMemoryMetadataDB();
} else {
metadataMap = new LevelDBMetadataMap(tempPath);
}
}
@Override
public void close() throws IOException {
IOUtils.cleanup(null, metadataMap);
}
/**
* Get text output for the given inode.
* @param parent the path of parent directory
* @param inode the INode object to output.
*/
abstract protected String getEntry(String parent, INode inode);
public void visit(RandomAccessFile file) throws IOException {
Configuration conf = new Configuration();
if (!FSImageUtil.checkFileFormat(file)) {
throw new IOException("Unrecognized FSImage");
}
FileSummary summary = FSImageUtil.loadSummary(file);
try (FileInputStream fin = new FileInputStream(file.getFD())) {
InputStream is;
ArrayList<FileSummary.Section> sections =
Lists.newArrayList(summary.getSectionsList());
Collections.sort(sections,
new Comparator<FileSummary.Section>() {
@Override
public int compare(FsImageProto.FileSummary.Section s1,
FsImageProto.FileSummary.Section s2) {
FSImageFormatProtobuf.SectionName n1 =
FSImageFormatProtobuf.SectionName.fromString(s1.getName());
FSImageFormatProtobuf.SectionName n2 =
FSImageFormatProtobuf.SectionName.fromString(s2.getName());
if (n1 == null) {
return n2 == null ? 0 : -1;
} else if (n2 == null) {
return -1;
} else {
return n1.ordinal() - n2.ordinal();
}
}
});
for (FileSummary.Section section : sections) {
fin.getChannel().position(section.getOffset());
is = FSImageUtil.wrapInputStreamForCompression(conf,
summary.getCodec(), new BufferedInputStream(new LimitInputStream(
fin, section.getLength())));
switch (SectionName.fromString(section.getName())) {
case STRING_TABLE:
stringTable = FSImageLoader.loadStringTable(is);
break;
default:
break;
}
}
loadDirectories(fin, sections, summary, conf);
loadINodeDirSection(fin, sections, summary, conf);
metadataMap.sync();
output(conf, summary, fin, sections);
}
}
private void output(Configuration conf, FileSummary summary,
FileInputStream fin, ArrayList<FileSummary.Section> sections)
throws IOException {
InputStream is;
long startTime = Time.monotonicNow();
for (FileSummary.Section section : sections) {
if (SectionName.fromString(section.getName()) == SectionName.INODE) {
fin.getChannel().position(section.getOffset());
is = FSImageUtil.wrapInputStreamForCompression(conf,
summary.getCodec(), new BufferedInputStream(new LimitInputStream(
fin, section.getLength())));
outputINodes(is);
}
}
long timeTaken = Time.monotonicNow() - startTime;
LOG.debug("Time to output inodes: {}ms", timeTaken);
}
protected PermissionStatus getPermission(long perm) {
return FSImageFormatPBINode.Loader.loadPermission(perm, stringTable);
}
/** Load the directories in the INode section. */
private void loadDirectories(
FileInputStream fin, List<FileSummary.Section> sections,
FileSummary summary, Configuration conf)
throws IOException {
LOG.info("Loading directories");
long startTime = Time.monotonicNow();
for (FileSummary.Section section : sections) {
if (SectionName.fromString(section.getName())
== SectionName.INODE) {
fin.getChannel().position(section.getOffset());
InputStream is = FSImageUtil.wrapInputStreamForCompression(conf,
summary.getCodec(), new BufferedInputStream(new LimitInputStream(
fin, section.getLength())));
loadDirectoriesInINodeSection(is);
}
}
long timeTaken = Time.monotonicNow() - startTime;
LOG.info("Finished loading directories in {}ms", timeTaken);
}
private void loadINodeDirSection(
FileInputStream fin, List<FileSummary.Section> sections,
FileSummary summary, Configuration conf)
throws IOException {
LOG.info("Loading INode directory section.");
long startTime = Time.monotonicNow();
for (FileSummary.Section section : sections) {
if (SectionName.fromString(section.getName())
== SectionName.INODE_DIR) {
fin.getChannel().position(section.getOffset());
InputStream is = FSImageUtil.wrapInputStreamForCompression(conf,
summary.getCodec(), new BufferedInputStream(
new LimitInputStream(fin, section.getLength())));
buildNamespace(is);
}
}
long timeTaken = Time.monotonicNow() - startTime;
LOG.info("Finished loading INode directory section in {}ms", timeTaken);
}
/**
* Load the filenames of the directories from the INode section.
*/
private void loadDirectoriesInINodeSection(InputStream in) throws IOException {
INodeSection s = INodeSection.parseDelimitedFrom(in);
LOG.info("Loading directories in INode section.");
int numDirs = 0;
for (int i = 0; i < s.getNumInodes(); ++i) {
INode p = INode.parseDelimitedFrom(in);
if (LOG.isDebugEnabled() && i % 10000 == 0) {
LOG.debug("Scanned {} inodes.", i);
}
if (p.hasDirectory()) {
metadataMap.putDir(p);
numDirs++;
}
}
LOG.info("Found {} directories in INode section.", numDirs);
}
/**
* Scan the INodeDirectory section to construct the namespace.
*/
private void buildNamespace(InputStream in) throws IOException {
int count = 0;
while (true) {
FsImageProto.INodeDirectorySection.DirEntry e =
FsImageProto.INodeDirectorySection.DirEntry.parseDelimitedFrom(in);
if (e == null) {
break;
}
count++;
if (LOG.isDebugEnabled() && count % 10000 == 0) {
LOG.debug("Scanned {} directories.", count);
}
long parentId = e.getParent();
// Referred INode is not support for now.
for (int i = 0; i < e.getChildrenCount(); i++) {
long childId = e.getChildren(i);
metadataMap.putDirChild(parentId, childId);
}
Preconditions.checkState(e.getRefChildrenCount() == 0);
}
LOG.info("Scanned {} INode directories to build namespace.", count);
}
private void outputINodes(InputStream in) throws IOException {
INodeSection s = INodeSection.parseDelimitedFrom(in);
LOG.info("Found {} INodes in the INode section", s.getNumInodes());
for (int i = 0; i < s.getNumInodes(); ++i) {
INode p = INode.parseDelimitedFrom(in);
String parentPath = metadataMap.getParentPath(p.getId());
out.println(getEntry(parentPath, p));
if (LOG.isDebugEnabled() && i % 100000 == 0) {
LOG.debug("Outputted {} INodes.", i);
}
}
LOG.info("Outputted {} INodes.", s.getNumInodes());
}
}
| 19,869 | 32.677966 | 84 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/IndentedImageVisitor.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools.offlineImageViewer;
import java.io.IOException;
import java.util.Date;
/**
* IndentedImageVisitor walks over an FSImage and displays its structure
* using indenting to organize sections within the image file.
*/
class IndentedImageVisitor extends TextWriterImageVisitor {
public IndentedImageVisitor(String filename) throws IOException {
super(filename);
}
public IndentedImageVisitor(String filename, boolean printToScreen) throws IOException {
super(filename, printToScreen);
}
final private DepthCounter dc = new DepthCounter();// to track leading spacing
@Override
void start() throws IOException {}
@Override
void finish() throws IOException { super.finish(); }
@Override
void finishAbnormally() throws IOException {
System.out.println("*** Image processing finished abnormally. Ending ***");
super.finishAbnormally();
}
@Override
void leaveEnclosingElement() throws IOException {
dc.decLevel();
}
@Override
void visit(ImageElement element, String value) throws IOException {
printIndents();
write(element + " = " + value + "\n");
}
@Override
void visit(ImageElement element, long value) throws IOException {
if ((element == ImageElement.DELEGATION_TOKEN_IDENTIFIER_EXPIRY_TIME) ||
(element == ImageElement.DELEGATION_TOKEN_IDENTIFIER_ISSUE_DATE) ||
(element == ImageElement.DELEGATION_TOKEN_IDENTIFIER_MAX_DATE)) {
visit(element, new Date(value).toString());
} else {
visit(element, Long.toString(value));
}
}
@Override
void visitEnclosingElement(ImageElement element) throws IOException {
printIndents();
write(element + "\n");
dc.incLevel();
}
// Print element, along with associated key/value pair, in brackets
@Override
void visitEnclosingElement(ImageElement element,
ImageElement key, String value)
throws IOException {
printIndents();
write(element + " [" + key + " = " + value + "]\n");
dc.incLevel();
}
/**
* Print an appropriate number of spaces for the current level.
* FsImages can potentially be millions of lines long, so caching can
* significantly speed up output.
*/
final private static String [] indents = { "",
" ",
" ",
" ",
" ",
" ",
" "};
private void printIndents() throws IOException {
try {
write(indents[dc.getLevel()]);
} catch (IndexOutOfBoundsException e) {
// There's no reason in an fsimage would need a deeper indent
for(int i = 0; i < dc.getLevel(); i++)
write(" ");
}
}
}
| 3,703 | 32.071429 | 90 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/DelimitedImageVisitor.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools.offlineImageViewer;
import java.io.IOException;
import java.util.AbstractMap;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedList;
/**
* A DelimitedImageVisitor generates a text representation of the fsimage,
* with each element separated by a delimiter string. All of the elements
* common to both inodes and inodes-under-construction are included. When
* processing an fsimage with a layout version that did not include an
* element, such as AccessTime, the output file will include a column
* for the value, but no value will be included.
*
* Individual block information for each file is not currently included.
*
* The default delimiter is tab, as this is an unlikely value to be included
* an inode path or other text metadata. The delimiter value can be via the
* constructor.
*/
class DelimitedImageVisitor extends TextWriterImageVisitor {
private static final String defaultDelimiter = "\t";
final private LinkedList<ImageElement> elemQ = new LinkedList<ImageElement>();
private long fileSize = 0l;
// Elements of fsimage we're interested in tracking
private final Collection<ImageElement> elementsToTrack;
// Values for each of the elements in elementsToTrack
private final AbstractMap<ImageElement, String> elements =
new HashMap<ImageElement, String>();
private final String delimiter;
{
elementsToTrack = new ArrayList<ImageElement>();
// This collection determines what elements are tracked and the order
// in which they are output
Collections.addAll(elementsToTrack, ImageElement.INODE_PATH,
ImageElement.REPLICATION,
ImageElement.MODIFICATION_TIME,
ImageElement.ACCESS_TIME,
ImageElement.BLOCK_SIZE,
ImageElement.NUM_BLOCKS,
ImageElement.NUM_BYTES,
ImageElement.NS_QUOTA,
ImageElement.DS_QUOTA,
ImageElement.PERMISSION_STRING,
ImageElement.USER_NAME,
ImageElement.GROUP_NAME);
}
public DelimitedImageVisitor(String filename) throws IOException {
this(filename, false);
}
public DelimitedImageVisitor(String outputFile, boolean printToScreen)
throws IOException {
this(outputFile, printToScreen, defaultDelimiter);
}
public DelimitedImageVisitor(String outputFile, boolean printToScreen,
String delimiter) throws IOException {
super(outputFile, printToScreen);
this.delimiter = delimiter;
reset();
}
/**
* Reset the values of the elements we're tracking in order to handle
* the next file
*/
private void reset() {
elements.clear();
for(ImageElement e : elementsToTrack)
elements.put(e, null);
fileSize = 0l;
}
@Override
void leaveEnclosingElement() throws IOException {
ImageElement elem = elemQ.pop();
// If we're done with an inode, write out our results and start over
if(elem == ImageElement.INODE ||
elem == ImageElement.INODE_UNDER_CONSTRUCTION) {
writeLine();
write("\n");
reset();
}
}
/**
* Iterate through all the elements we're tracking and, if a value was
* recorded for it, write it out.
*/
private void writeLine() throws IOException {
Iterator<ImageElement> it = elementsToTrack.iterator();
while(it.hasNext()) {
ImageElement e = it.next();
String v = null;
if(e == ImageElement.NUM_BYTES)
v = String.valueOf(fileSize);
else
v = elements.get(e);
if(v != null)
write(v);
if(it.hasNext())
write(delimiter);
}
}
@Override
void visit(ImageElement element, String value) throws IOException {
// Explicitly label the root path
if(element == ImageElement.INODE_PATH && value.equals(""))
value = "/";
// Special case of file size, which is sum of the num bytes in each block
if(element == ImageElement.NUM_BYTES)
fileSize += Long.parseLong(value);
if(elements.containsKey(element) && element != ImageElement.NUM_BYTES)
elements.put(element, value);
}
@Override
void visitEnclosingElement(ImageElement element) throws IOException {
elemQ.push(element);
}
@Override
void visitEnclosingElement(ImageElement element, ImageElement key,
String value) throws IOException {
// Special case as numBlocks is an attribute of the blocks element
if(key == ImageElement.NUM_BLOCKS
&& elements.containsKey(ImageElement.NUM_BLOCKS))
elements.put(key, value);
elemQ.push(element);
}
@Override
void start() throws IOException { /* Nothing to do */ }
}
| 6,049 | 33.971098 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/XmlImageVisitor.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools.offlineImageViewer;
import java.io.IOException;
import java.util.LinkedList;
import org.apache.hadoop.hdfs.util.XMLUtils;
/**
* An XmlImageVisitor walks over an fsimage structure and writes out
* an equivalent XML document that contains the fsimage's components.
*/
public class XmlImageVisitor extends TextWriterImageVisitor {
final private LinkedList<ImageElement> tagQ =
new LinkedList<ImageElement>();
public XmlImageVisitor(String filename) throws IOException {
super(filename, false);
}
public XmlImageVisitor(String filename, boolean printToScreen)
throws IOException {
super(filename, printToScreen);
}
@Override
void finish() throws IOException {
super.finish();
}
@Override
void finishAbnormally() throws IOException {
write("\n<!-- Error processing image file. Exiting -->\n");
super.finishAbnormally();
}
@Override
void leaveEnclosingElement() throws IOException {
if(tagQ.size() == 0)
throw new IOException("Tried to exit non-existent enclosing element " +
"in FSImage file");
ImageElement element = tagQ.pop();
write("</" + element.toString() + ">\n");
}
@Override
void start() throws IOException {
write("<?xml version=\"1.0\" ?>\n");
}
@Override
void visit(ImageElement element, String value) throws IOException {
writeTag(element.toString(), value);
}
@Override
void visitEnclosingElement(ImageElement element) throws IOException {
write("<" + element.toString() + ">\n");
tagQ.push(element);
}
@Override
void visitEnclosingElement(ImageElement element,
ImageElement key, String value)
throws IOException {
write("<" + element.toString() + " " + key + "=\"" + value +"\">\n");
tagQ.push(element);
}
private void writeTag(String tag, String value) throws IOException {
write("<" + tag + ">" +
XMLUtils.mangleXmlString(value, true) + "</" + tag + ">\n");
}
}
| 2,843 | 30.252747 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/WebImageViewer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools.offlineImageViewer;
import com.google.common.annotations.VisibleForTesting;
import io.netty.bootstrap.ServerBootstrap;
import io.netty.channel.Channel;
import io.netty.channel.ChannelInitializer;
import io.netty.channel.ChannelPipeline;
import io.netty.channel.EventLoopGroup;
import io.netty.channel.group.ChannelGroup;
import io.netty.channel.group.DefaultChannelGroup;
import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.channel.socket.SocketChannel;
import io.netty.channel.socket.nio.NioServerSocketChannel;
import io.netty.handler.codec.http.HttpRequestDecoder;
import io.netty.handler.codec.http.HttpResponseEncoder;
import io.netty.handler.codec.string.StringEncoder;
import io.netty.util.concurrent.GlobalEventExecutor;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import java.io.Closeable;
import java.io.IOException;
import java.net.InetSocketAddress;
/**
* WebImageViewer loads a fsimage and exposes read-only WebHDFS API for its
* namespace.
*/
public class WebImageViewer implements Closeable {
public static final Log LOG = LogFactory.getLog(WebImageViewer.class);
private Channel channel;
private InetSocketAddress address;
private final ServerBootstrap bootstrap;
private final EventLoopGroup bossGroup;
private final EventLoopGroup workerGroup;
private final ChannelGroup allChannels;
public WebImageViewer(InetSocketAddress address) {
this.address = address;
this.bossGroup = new NioEventLoopGroup();
this.workerGroup = new NioEventLoopGroup();
this.allChannels = new DefaultChannelGroup(GlobalEventExecutor.INSTANCE);
this.bootstrap = new ServerBootstrap()
.group(bossGroup, workerGroup)
.channel(NioServerSocketChannel.class);
}
/**
* Start WebImageViewer and wait until the thread is interrupted.
* @param fsimage the fsimage to load.
* @throws IOException if failed to load the fsimage.
*/
public void start(String fsimage) throws IOException {
try {
initServer(fsimage);
channel.closeFuture().await();
} catch (InterruptedException e) {
LOG.info("Interrupted. Stopping the WebImageViewer.");
close();
}
}
/**
* Start WebImageViewer.
* @param fsimage the fsimage to load.
* @throws IOException if fail to load the fsimage.
*/
@VisibleForTesting
public void initServer(String fsimage)
throws IOException, InterruptedException {
final FSImageLoader loader = FSImageLoader.load(fsimage);
bootstrap.childHandler(new ChannelInitializer<SocketChannel>() {
@Override
protected void initChannel(SocketChannel ch) throws Exception {
ChannelPipeline p = ch.pipeline();
p.addLast(new HttpRequestDecoder(),
new StringEncoder(),
new HttpResponseEncoder(),
new FSImageHandler(loader, allChannels));
}
});
channel = bootstrap.bind(address).sync().channel();
allChannels.add(channel);
address = (InetSocketAddress) channel.localAddress();
LOG.info("WebImageViewer started. Listening on " + address.toString() + ". Press Ctrl+C to stop the viewer.");
}
/**
* Get the listening port.
* @return the port WebImageViewer is listening on
*/
@VisibleForTesting
public int getPort() {
return address.getPort();
}
@Override
public void close() {
allChannels.close().awaitUninterruptibly();
bossGroup.shutdownGracefully();
workerGroup.shutdownGracefully();
}
}
| 4,334 | 33.404762 | 114 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools.offlineImageViewer;
import java.io.BufferedInputStream;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.io.RandomAccessFile;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import java.util.Map;
import com.google.common.collect.ImmutableList;
import com.google.protobuf.CodedInputStream;
import com.google.protobuf.InvalidProtocolBufferException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.XAttrHelper;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
import org.apache.hadoop.hdfs.server.namenode.FSImageFormatPBINode;
import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf;
import org.apache.hadoop.hdfs.server.namenode.FSImageUtil;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto;
import org.apache.hadoop.hdfs.server.namenode.INodeId;
import org.apache.hadoop.hdfs.web.JsonUtil;
import org.apache.hadoop.hdfs.web.resources.XAttrEncodingParam;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.LimitInputStream;
import org.codehaus.jackson.map.ObjectMapper;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
/**
* FSImageLoader loads fsimage and provide methods to return JSON formatted
* file status of the namespace of the fsimage.
*/
class FSImageLoader {
public static final Log LOG = LogFactory.getLog(FSImageHandler.class);
private final String[] stringTable;
// byte representation of inodes, sorted by id
private final byte[][] inodes;
private final Map<Long, long[]> dirmap;
private static final Comparator<byte[]> INODE_BYTES_COMPARATOR = new
Comparator<byte[]>() {
@Override
public int compare(byte[] o1, byte[] o2) {
try {
final FsImageProto.INodeSection.INode l = FsImageProto.INodeSection
.INode.parseFrom(o1);
final FsImageProto.INodeSection.INode r = FsImageProto.INodeSection
.INode.parseFrom(o2);
if (l.getId() < r.getId()) {
return -1;
} else if (l.getId() > r.getId()) {
return 1;
} else {
return 0;
}
} catch (InvalidProtocolBufferException e) {
throw new RuntimeException(e);
}
}
};
private FSImageLoader(String[] stringTable, byte[][] inodes,
Map<Long, long[]> dirmap) {
this.stringTable = stringTable;
this.inodes = inodes;
this.dirmap = dirmap;
}
/**
* Load fsimage into the memory.
* @param inputFile the filepath of the fsimage to load.
* @return FSImageLoader
* @throws IOException if failed to load fsimage.
*/
static FSImageLoader load(String inputFile) throws IOException {
Configuration conf = new Configuration();
RandomAccessFile file = new RandomAccessFile(inputFile, "r");
if (!FSImageUtil.checkFileFormat(file)) {
throw new IOException("Unrecognized FSImage");
}
FsImageProto.FileSummary summary = FSImageUtil.loadSummary(file);
try (FileInputStream fin = new FileInputStream(file.getFD())) {
// Map to record INodeReference to the referred id
ImmutableList<Long> refIdList = null;
String[] stringTable = null;
byte[][] inodes = null;
Map<Long, long[]> dirmap = null;
ArrayList<FsImageProto.FileSummary.Section> sections =
Lists.newArrayList(summary.getSectionsList());
Collections.sort(sections,
new Comparator<FsImageProto.FileSummary.Section>() {
@Override
public int compare(FsImageProto.FileSummary.Section s1,
FsImageProto.FileSummary.Section s2) {
FSImageFormatProtobuf.SectionName n1 =
FSImageFormatProtobuf.SectionName.fromString(s1.getName());
FSImageFormatProtobuf.SectionName n2 =
FSImageFormatProtobuf.SectionName.fromString(s2.getName());
if (n1 == null) {
return n2 == null ? 0 : -1;
} else if (n2 == null) {
return -1;
} else {
return n1.ordinal() - n2.ordinal();
}
}
});
for (FsImageProto.FileSummary.Section s : sections) {
fin.getChannel().position(s.getOffset());
InputStream is = FSImageUtil.wrapInputStreamForCompression(conf,
summary.getCodec(), new BufferedInputStream(new LimitInputStream(
fin, s.getLength())));
if (LOG.isDebugEnabled()) {
LOG.debug("Loading section " + s.getName() + " length: " + s.getLength
());
}
switch (FSImageFormatProtobuf.SectionName.fromString(s.getName())) {
case STRING_TABLE:
stringTable = loadStringTable(is);
break;
case INODE:
inodes = loadINodeSection(is);
break;
case INODE_REFERENCE:
refIdList = loadINodeReferenceSection(is);
break;
case INODE_DIR:
dirmap = loadINodeDirectorySection(is, refIdList);
break;
default:
break;
}
}
return new FSImageLoader(stringTable, inodes, dirmap);
}
}
private static Map<Long, long[]> loadINodeDirectorySection
(InputStream in, List<Long> refIdList)
throws IOException {
LOG.info("Loading inode directory section");
Map<Long, long[]> dirs = Maps.newHashMap();
long counter = 0;
while (true) {
FsImageProto.INodeDirectorySection.DirEntry e =
FsImageProto.INodeDirectorySection.DirEntry.parseDelimitedFrom(in);
// note that in is a LimitedInputStream
if (e == null) {
break;
}
++counter;
long[] l = new long[e.getChildrenCount() + e.getRefChildrenCount()];
for (int i = 0; i < e.getChildrenCount(); ++i) {
l[i] = e.getChildren(i);
}
for (int i = e.getChildrenCount(); i < l.length; i++) {
int refId = e.getRefChildren(i - e.getChildrenCount());
l[i] = refIdList.get(refId);
}
dirs.put(e.getParent(), l);
}
LOG.info("Loaded " + counter + " directories");
return dirs;
}
private static ImmutableList<Long> loadINodeReferenceSection(InputStream in)
throws IOException {
LOG.info("Loading inode references");
ImmutableList.Builder<Long> builder = ImmutableList.builder();
long counter = 0;
while (true) {
FsImageProto.INodeReferenceSection.INodeReference e =
FsImageProto.INodeReferenceSection.INodeReference
.parseDelimitedFrom(in);
if (e == null) {
break;
}
++counter;
builder.add(e.getReferredId());
}
LOG.info("Loaded " + counter + " inode references");
return builder.build();
}
private static byte[][] loadINodeSection(InputStream in)
throws IOException {
FsImageProto.INodeSection s = FsImageProto.INodeSection
.parseDelimitedFrom(in);
LOG.info("Loading " + s.getNumInodes() + " inodes.");
final byte[][] inodes = new byte[(int) s.getNumInodes()][];
for (int i = 0; i < s.getNumInodes(); ++i) {
int size = CodedInputStream.readRawVarint32(in.read(), in);
byte[] bytes = new byte[size];
IOUtils.readFully(in, bytes, 0, size);
inodes[i] = bytes;
}
LOG.debug("Sorting inodes");
Arrays.sort(inodes, INODE_BYTES_COMPARATOR);
LOG.debug("Finished sorting inodes");
return inodes;
}
static String[] loadStringTable(InputStream in) throws
IOException {
FsImageProto.StringTableSection s = FsImageProto.StringTableSection
.parseDelimitedFrom(in);
LOG.info("Loading " + s.getNumEntry() + " strings");
String[] stringTable = new String[s.getNumEntry() + 1];
for (int i = 0; i < s.getNumEntry(); ++i) {
FsImageProto.StringTableSection.Entry e = FsImageProto
.StringTableSection.Entry.parseDelimitedFrom(in);
stringTable[e.getId()] = e.getStr();
}
return stringTable;
}
/**
* Return the JSON formatted FileStatus of the specified file.
* @param path a path specifies a file
* @return JSON formatted FileStatus
* @throws IOException if failed to serialize fileStatus to JSON.
*/
String getFileStatus(String path) throws IOException {
ObjectMapper mapper = new ObjectMapper();
FsImageProto.INodeSection.INode inode = fromINodeId(lookup(path));
return "{\"FileStatus\":\n"
+ mapper.writeValueAsString(getFileStatus(inode, false)) + "\n}\n";
}
/**
* Return the JSON formatted list of the files in the specified directory.
* @param path a path specifies a directory to list
* @return JSON formatted file list in the directory
* @throws IOException if failed to serialize fileStatus to JSON.
*/
String listStatus(String path) throws IOException {
StringBuilder sb = new StringBuilder();
ObjectMapper mapper = new ObjectMapper();
List<Map<String, Object>> fileStatusList = getFileStatusList(path);
sb.append("{\"FileStatuses\":{\"FileStatus\":[\n");
int i = 0;
for (Map<String, Object> fileStatusMap : fileStatusList) {
if (i++ != 0) {
sb.append(',');
}
sb.append(mapper.writeValueAsString(fileStatusMap));
}
sb.append("\n]}}\n");
return sb.toString();
}
private List<Map<String, Object>> getFileStatusList(String path)
throws IOException {
List<Map<String, Object>> list = new ArrayList<Map<String, Object>>();
long id = lookup(path);
FsImageProto.INodeSection.INode inode = fromINodeId(id);
if (inode.getType() == FsImageProto.INodeSection.INode.Type.DIRECTORY) {
if (!dirmap.containsKey(id)) {
// if the directory is empty, return empty list
return list;
}
long[] children = dirmap.get(id);
for (long cid : children) {
list.add(getFileStatus(fromINodeId(cid), true));
}
} else {
list.add(getFileStatus(inode, false));
}
return list;
}
/**
* Return the JSON formatted XAttrNames of the specified file.
*
* @param path
* a path specifies a file
* @return JSON formatted XAttrNames
* @throws IOException
* if failed to serialize fileStatus to JSON.
*/
String listXAttrs(String path) throws IOException {
return JsonUtil.toJsonString(getXAttrList(path));
}
/**
* Return the JSON formatted XAttrs of the specified file.
*
* @param path
* a path specifies a file
* @return JSON formatted XAttrs
* @throws IOException
* if failed to serialize fileStatus to JSON.
*/
String getXAttrs(String path, List<String> names, String encoder)
throws IOException {
List<XAttr> xAttrs = getXAttrList(path);
List<XAttr> filtered;
if (names == null || names.size() == 0) {
filtered = xAttrs;
} else {
filtered = Lists.newArrayListWithCapacity(names.size());
for (String name : names) {
XAttr search = XAttrHelper.buildXAttr(name);
boolean found = false;
for (XAttr aXAttr : xAttrs) {
if (aXAttr.getNameSpace() == search.getNameSpace()
&& aXAttr.getName().equals(search.getName())) {
filtered.add(aXAttr);
found = true;
break;
}
}
if (!found) {
throw new IOException(
"At least one of the attributes provided was not found.");
}
}
}
return JsonUtil.toJsonString(filtered,
new XAttrEncodingParam(encoder).getEncoding());
}
private List<XAttr> getXAttrList(String path) throws IOException {
long id = lookup(path);
FsImageProto.INodeSection.INode inode = fromINodeId(id);
switch (inode.getType()) {
case FILE:
return FSImageFormatPBINode.Loader.loadXAttrs(
inode.getFile().getXAttrs(), stringTable);
case DIRECTORY:
return FSImageFormatPBINode.Loader.loadXAttrs(inode.getDirectory()
.getXAttrs(), stringTable);
default:
return null;
}
}
/**
* Return the JSON formatted ACL status of the specified file.
* @param path a path specifies a file
* @return JSON formatted AclStatus
* @throws IOException if failed to serialize fileStatus to JSON.
*/
String getAclStatus(String path) throws IOException {
PermissionStatus p = getPermissionStatus(path);
List<AclEntry> aclEntryList = getAclEntryList(path);
FsPermission permission = p.getPermission();
AclStatus.Builder builder = new AclStatus.Builder();
builder.owner(p.getUserName()).group(p.getGroupName())
.addEntries(aclEntryList).setPermission(permission)
.stickyBit(permission.getStickyBit());
AclStatus aclStatus = builder.build();
return JsonUtil.toJsonString(aclStatus);
}
private List<AclEntry> getAclEntryList(String path) throws IOException {
long id = lookup(path);
FsImageProto.INodeSection.INode inode = fromINodeId(id);
switch (inode.getType()) {
case FILE: {
FsImageProto.INodeSection.INodeFile f = inode.getFile();
return FSImageFormatPBINode.Loader.loadAclEntries(
f.getAcl(), stringTable);
}
case DIRECTORY: {
FsImageProto.INodeSection.INodeDirectory d = inode.getDirectory();
return FSImageFormatPBINode.Loader.loadAclEntries(
d.getAcl(), stringTable);
}
default: {
return new ArrayList<AclEntry>();
}
}
}
private PermissionStatus getPermissionStatus(String path) throws IOException {
long id = lookup(path);
FsImageProto.INodeSection.INode inode = fromINodeId(id);
switch (inode.getType()) {
case FILE: {
FsImageProto.INodeSection.INodeFile f = inode.getFile();
return FSImageFormatPBINode.Loader.loadPermission(
f.getPermission(), stringTable);
}
case DIRECTORY: {
FsImageProto.INodeSection.INodeDirectory d = inode.getDirectory();
return FSImageFormatPBINode.Loader.loadPermission(
d.getPermission(), stringTable);
}
case SYMLINK: {
FsImageProto.INodeSection.INodeSymlink s = inode.getSymlink();
return FSImageFormatPBINode.Loader.loadPermission(
s.getPermission(), stringTable);
}
default: {
return null;
}
}
}
/**
* Return the INodeId of the specified path.
*/
private long lookup(String path) throws IOException {
Preconditions.checkArgument(path.startsWith("/"));
long id = INodeId.ROOT_INODE_ID;
for (int offset = 0, next; offset < path.length(); offset = next) {
next = path.indexOf('/', offset + 1);
if (next == -1) {
next = path.length();
}
if (offset + 1 > next) {
break;
}
final String component = path.substring(offset + 1, next);
if (component.isEmpty()) {
continue;
}
final long[] children = dirmap.get(id);
if (children == null) {
throw new FileNotFoundException(path);
}
boolean found = false;
for (long cid : children) {
FsImageProto.INodeSection.INode child = fromINodeId(cid);
if (component.equals(child.getName().toStringUtf8())) {
found = true;
id = child.getId();
break;
}
}
if (!found) {
throw new FileNotFoundException(path);
}
}
return id;
}
private Map<String, Object> getFileStatus
(FsImageProto.INodeSection.INode inode, boolean printSuffix){
Map<String, Object> map = Maps.newHashMap();
switch (inode.getType()) {
case FILE: {
FsImageProto.INodeSection.INodeFile f = inode.getFile();
PermissionStatus p = FSImageFormatPBINode.Loader.loadPermission(
f.getPermission(), stringTable);
map.put("accessTime", f.getAccessTime());
map.put("blockSize", f.getPreferredBlockSize());
map.put("group", p.getGroupName());
map.put("length", getFileSize(f));
map.put("modificationTime", f.getModificationTime());
map.put("owner", p.getUserName());
map.put("pathSuffix",
printSuffix ? inode.getName().toStringUtf8() : "");
map.put("permission", toString(p.getPermission()));
map.put("replication", f.getReplication());
map.put("type", inode.getType());
map.put("fileId", inode.getId());
map.put("childrenNum", 0);
return map;
}
case DIRECTORY: {
FsImageProto.INodeSection.INodeDirectory d = inode.getDirectory();
PermissionStatus p = FSImageFormatPBINode.Loader.loadPermission(
d.getPermission(), stringTable);
map.put("accessTime", 0);
map.put("blockSize", 0);
map.put("group", p.getGroupName());
map.put("length", 0);
map.put("modificationTime", d.getModificationTime());
map.put("owner", p.getUserName());
map.put("pathSuffix",
printSuffix ? inode.getName().toStringUtf8() : "");
map.put("permission", toString(p.getPermission()));
map.put("replication", 0);
map.put("type", inode.getType());
map.put("fileId", inode.getId());
map.put("childrenNum", dirmap.containsKey(inode.getId()) ?
dirmap.get(inode.getId()).length : 0);
return map;
}
case SYMLINK: {
FsImageProto.INodeSection.INodeSymlink d = inode.getSymlink();
PermissionStatus p = FSImageFormatPBINode.Loader.loadPermission(
d.getPermission(), stringTable);
map.put("accessTime", d.getAccessTime());
map.put("blockSize", 0);
map.put("group", p.getGroupName());
map.put("length", 0);
map.put("modificationTime", d.getModificationTime());
map.put("owner", p.getUserName());
map.put("pathSuffix",
printSuffix ? inode.getName().toStringUtf8() : "");
map.put("permission", toString(p.getPermission()));
map.put("replication", 0);
map.put("type", inode.getType());
map.put("symlink", d.getTarget().toStringUtf8());
map.put("fileId", inode.getId());
map.put("childrenNum", 0);
return map;
}
default:
return null;
}
}
static long getFileSize(FsImageProto.INodeSection.INodeFile f) {
long size = 0;
for (HdfsProtos.BlockProto p : f.getBlocksList()) {
size += p.getNumBytes();
}
return size;
}
private String toString(FsPermission permission) {
return String.format("%o", permission.toShort());
}
private FsImageProto.INodeSection.INode fromINodeId(final long id)
throws IOException {
int l = 0, r = inodes.length;
while (l < r) {
int mid = l + (r - l) / 2;
FsImageProto.INodeSection.INode n = FsImageProto.INodeSection.INode
.parseFrom(inodes[mid]);
long nid = n.getId();
if (id > nid) {
l = mid + 1;
} else if (id < nid) {
r = mid;
} else {
return n;
}
}
return null;
}
}
| 20,431 | 33.748299 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/BinaryEditsVisitor.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools.offlineEditsViewer;
import java.io.File;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp;
import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion;
/**
* BinaryEditsVisitor implements a binary EditsVisitor
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class BinaryEditsVisitor implements OfflineEditsVisitor {
final private EditLogFileOutputStream elfos;
/**
* Create a processor that writes to a given file
* @param outputName Name of file to write output to
*/
public BinaryEditsVisitor(String outputName) throws IOException {
this.elfos = new EditLogFileOutputStream(new Configuration(),
new File(outputName), 0);
elfos.create(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
}
/**
* Start the visitor (initialization)
*/
@Override
public void start(int version) throws IOException {
}
/**
* Finish the visitor
*/
@Override
public void close(Throwable error) throws IOException {
elfos.setReadyToFlush();
elfos.flushAndSync(true);
elfos.close();
}
@Override
public void visitOp(FSEditLogOp op) throws IOException {
elfos.write(op);
}
}
| 2,275 | 31.514286 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TeeOutputStream.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools.offlineEditsViewer;
import java.io.IOException;
import java.io.OutputStream;
/**
* A TeeOutputStream writes its output to multiple output streams.
*/
public class TeeOutputStream extends OutputStream {
private final OutputStream[] outs;
public TeeOutputStream(OutputStream outs[]) {
this.outs = outs;
}
@Override
public void write(int c) throws IOException {
for (OutputStream o : outs) {
o.write(c);
}
}
@Override
public void write(byte[] b) throws IOException {
for (OutputStream o : outs) {
o.write(b);
}
}
@Override
public void write(byte[] b, int off, int len) throws IOException {
for (OutputStream o : outs) {
o.write(b, off, len);
}
}
@Override
public void close() throws IOException {
for (OutputStream o : outs) {
o.close();
}
}
@Override
public void flush() throws IOException {
for (OutputStream o : outs) {
o.flush();
}
}
}
| 1,796 | 25.426471 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsBinaryLoader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools.offlineEditsViewer;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp;
import org.apache.hadoop.hdfs.server.namenode.EditLogInputStream;
import org.apache.hadoop.io.IOUtils;
/**
* OfflineEditsBinaryLoader loads edits from a binary edits file
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
class OfflineEditsBinaryLoader implements OfflineEditsLoader {
private final OfflineEditsVisitor visitor;
private final EditLogInputStream inputStream;
private final boolean fixTxIds;
private final boolean recoveryMode;
private long nextTxId;
public static final Log LOG =
LogFactory.getLog(OfflineEditsBinaryLoader.class.getName());
/**
* Constructor
*/
public OfflineEditsBinaryLoader(OfflineEditsVisitor visitor,
EditLogInputStream inputStream, OfflineEditsViewer.Flags flags) {
this.visitor = visitor;
this.inputStream = inputStream;
this.fixTxIds = flags.getFixTxIds();
this.recoveryMode = flags.getRecoveryMode();
this.nextTxId = -1;
}
/**
* Loads edits file, uses visitor to process all elements
*/
@Override
public void loadEdits() throws IOException {
try {
visitor.start(inputStream.getVersion(true));
while (true) {
try {
FSEditLogOp op = inputStream.readOp();
if (op == null)
break;
if (fixTxIds) {
if (nextTxId <= 0) {
nextTxId = op.getTransactionId();
if (nextTxId <= 0) {
nextTxId = 1;
}
}
op.setTransactionId(nextTxId);
nextTxId++;
}
visitor.visitOp(op);
} catch (IOException e) {
if (!recoveryMode) {
// Tell the visitor to clean up, then re-throw the exception
LOG.error("Got IOException at position " +
inputStream.getPosition());
visitor.close(e);
throw e;
}
LOG.error("Got IOException while reading stream! Resyncing.", e);
inputStream.resync();
} catch (RuntimeException e) {
if (!recoveryMode) {
// Tell the visitor to clean up, then re-throw the exception
LOG.error("Got RuntimeException at position " +
inputStream.getPosition());
visitor.close(e);
throw e;
}
LOG.error("Got RuntimeException while reading stream! Resyncing.", e);
inputStream.resync();
}
}
visitor.close(null);
} finally {
IOUtils.cleanup(LOG, inputStream);
}
}
}
| 3,746 | 33.376147 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsLoader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools.offlineEditsViewer;
import java.io.File;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream;
import org.apache.hadoop.hdfs.server.namenode.EditLogInputStream;
/**
* OfflineEditsLoader walks an EditsVisitor over an EditLogInputStream
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
interface OfflineEditsLoader {
abstract public void loadEdits() throws IOException;
static class OfflineEditsLoaderFactory {
static OfflineEditsLoader createLoader(OfflineEditsVisitor visitor,
String inputFileName, boolean xmlInput,
OfflineEditsViewer.Flags flags) throws IOException {
if (xmlInput) {
return new OfflineEditsXmlLoader(visitor, new File(inputFileName), flags);
} else {
File file = null;
EditLogInputStream elis = null;
OfflineEditsLoader loader = null;
try {
file = new File(inputFileName);
elis = new EditLogFileInputStream(file, HdfsServerConstants.INVALID_TXID,
HdfsServerConstants.INVALID_TXID, false);
loader = new OfflineEditsBinaryLoader(visitor, elis, flags);
} finally {
if ((loader == null) && (elis != null)) {
elis.close();
}
}
return loader;
}
}
}
}
| 2,346 | 35.671875 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsVisitor.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools.offlineEditsViewer;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp;
/**
* An implementation of OfflineEditsVisitor can traverse the structure of an
* Hadoop edits log and respond to each of the structures within the file.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
abstract public interface OfflineEditsVisitor {
/**
* Begin visiting the edits log structure. Opportunity to perform
* any initialization necessary for the implementing visitor.
*
* @param version Edit log version
*/
abstract void start(int version) throws IOException;
/**
* Finish visiting the edits log structure. Opportunity to perform any
* clean up necessary for the implementing visitor.
*
* @param error If the visitor was closed because of an
* unrecoverable error in the input stream, this
* is the exception.
*/
abstract void close(Throwable error) throws IOException;
/**
* Begin visiting an element that encloses another element, such as
* the beginning of the list of blocks that comprise a file.
*
* @param value Token being visited
*/
abstract void visitOp(FSEditLogOp op)
throws IOException;
}
| 2,227 | 36.133333 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsViewer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools.offlineEditsViewer;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsLoader.OfflineEditsLoaderFactory;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.OptionBuilder;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.commons.cli.PosixParser;
/**
* This class implements an offline edits viewer, tool that
* can be used to view edit logs.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class OfflineEditsViewer extends Configured implements Tool {
private final static String defaultProcessor = "xml";
/**
* Print help.
*/
private void printHelp() {
String summary =
"Usage: bin/hdfs oev [OPTIONS] -i INPUT_FILE -o OUTPUT_FILE\n" +
"Offline edits viewer\n" +
"Parse a Hadoop edits log file INPUT_FILE and save results\n" +
"in OUTPUT_FILE.\n" +
"Required command line arguments:\n" +
"-i,--inputFile <arg> edits file to process, xml (case\n" +
" insensitive) extension means XML format,\n" +
" any other filename means binary format\n" +
"-o,--outputFile <arg> Name of output file. If the specified\n" +
" file exists, it will be overwritten,\n" +
" format of the file is determined\n" +
" by -p option\n" +
"\n" +
"Optional command line arguments:\n" +
"-p,--processor <arg> Select which type of processor to apply\n" +
" against image file, currently supported\n" +
" processors are: binary (native binary format\n" +
" that Hadoop uses), xml (default, XML\n" +
" format), stats (prints statistics about\n" +
" edits file)\n" +
"-h,--help Display usage information and exit\n" +
"-f,--fix-txids Renumber the transaction IDs in the input,\n" +
" so that there are no gaps or invalid " +
" transaction IDs.\n" +
"-r,--recover When reading binary edit logs, use recovery \n" +
" mode. This will give you the chance to skip \n" +
" corrupt parts of the edit log.\n" +
"-v,--verbose More verbose output, prints the input and\n" +
" output filenames, for processors that write\n" +
" to a file, also output to screen. On large\n" +
" image files this will dramatically increase\n" +
" processing time (default is false).\n";
System.out.println(summary);
System.out.println();
ToolRunner.printGenericCommandUsage(System.out);
}
/**
* Build command-line options and descriptions
*
* @return command line options
*/
public static Options buildOptions() {
Options options = new Options();
// Build in/output file arguments, which are required, but there is no
// addOption method that can specify this
OptionBuilder.isRequired();
OptionBuilder.hasArgs();
OptionBuilder.withLongOpt("outputFilename");
options.addOption(OptionBuilder.create("o"));
OptionBuilder.isRequired();
OptionBuilder.hasArgs();
OptionBuilder.withLongOpt("inputFilename");
options.addOption(OptionBuilder.create("i"));
options.addOption("p", "processor", true, "");
options.addOption("v", "verbose", false, "");
options.addOption("f", "fix-txids", false, "");
options.addOption("r", "recover", false, "");
options.addOption("h", "help", false, "");
return options;
}
/** Process an edit log using the chosen processor or visitor.
*
* @param inputFilename The file to process
* @param outputFilename The output file name
* @param processor If visitor is null, the processor to use
* @param visitor If non-null, the visitor to use.
*
* @return 0 on success; error code otherwise
*/
public int go(String inputFileName, String outputFileName, String processor,
Flags flags, OfflineEditsVisitor visitor)
{
if (flags.getPrintToScreen()) {
System.out.println("input [" + inputFileName + "]");
System.out.println("output [" + outputFileName + "]");
}
try {
if (visitor == null) {
visitor = OfflineEditsVisitorFactory.getEditsVisitor(
outputFileName, processor, flags.getPrintToScreen());
}
boolean xmlInput = inputFileName.endsWith(".xml");
OfflineEditsLoader loader = OfflineEditsLoaderFactory.
createLoader(visitor, inputFileName, xmlInput, flags);
loader.loadEdits();
} catch(Exception e) {
System.err.println("Encountered exception. Exiting: " + e.getMessage());
e.printStackTrace(System.err);
return -1;
}
return 0;
}
public static class Flags {
private boolean printToScreen = false;
private boolean fixTxIds = false;
private boolean recoveryMode = false;
public Flags() {
}
public boolean getPrintToScreen() {
return printToScreen;
}
public void setPrintToScreen() {
printToScreen = true;
}
public boolean getFixTxIds() {
return fixTxIds;
}
public void setFixTxIds() {
fixTxIds = true;
}
public boolean getRecoveryMode() {
return recoveryMode;
}
public void setRecoveryMode() {
recoveryMode = true;
}
}
/**
* Main entry point for ToolRunner (see ToolRunner docs)
*
* @param argv The parameters passed to this program.
* @return 0 on success, non zero on error.
*/
@Override
public int run(String[] argv) throws Exception {
Options options = buildOptions();
if(argv.length == 0) {
printHelp();
return -1;
}
CommandLineParser parser = new PosixParser();
CommandLine cmd;
try {
cmd = parser.parse(options, argv);
} catch (ParseException e) {
System.out.println(
"Error parsing command-line options: " + e.getMessage());
printHelp();
return -1;
}
if(cmd.hasOption("h")) { // print help and exit
printHelp();
return -1;
}
String inputFileName = cmd.getOptionValue("i");
String outputFileName = cmd.getOptionValue("o");
String processor = cmd.getOptionValue("p");
if(processor == null) {
processor = defaultProcessor;
}
Flags flags = new Flags();
if (cmd.hasOption("r")) {
flags.setRecoveryMode();
}
if (cmd.hasOption("f")) {
flags.setFixTxIds();
}
if (cmd.hasOption("v")) {
flags.setPrintToScreen();
}
return go(inputFileName, outputFileName, processor, flags, null);
}
/**
* main() runs the offline edits viewer using ToolRunner
*
* @param argv Command line parameters.
*/
public static void main(String[] argv) throws Exception {
int res = ToolRunner.run(new OfflineEditsViewer(), argv);
System.exit(res);
}
}
| 8,355 | 33.672199 | 100 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsXmlLoader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools.offlineEditsViewer;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.Stack;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdfs.util.XMLUtils;
import org.apache.hadoop.hdfs.util.XMLUtils.InvalidXmlException;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.OpInstanceCache;
import org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer;
import org.apache.hadoop.hdfs.util.XMLUtils.Stanza;
import org.xml.sax.Attributes;
import org.xml.sax.InputSource;
import org.xml.sax.SAXException;
import org.xml.sax.SAXParseException;
import org.xml.sax.XMLReader;
import org.xml.sax.helpers.DefaultHandler;
import org.xml.sax.helpers.XMLReaderFactory;
import com.google.common.base.Charsets;
/**
* OfflineEditsXmlLoader walks an EditsVisitor over an OEV XML file
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
class OfflineEditsXmlLoader
extends DefaultHandler implements OfflineEditsLoader {
private final boolean fixTxIds;
private final OfflineEditsVisitor visitor;
private final InputStreamReader fileReader;
private ParseState state;
private Stanza stanza;
private Stack<Stanza> stanzaStack;
private FSEditLogOpCodes opCode;
private StringBuffer cbuf;
private long nextTxId;
private final OpInstanceCache opCache = new OpInstanceCache();
static enum ParseState {
EXPECT_EDITS_TAG,
EXPECT_VERSION,
EXPECT_RECORD,
EXPECT_OPCODE,
EXPECT_DATA,
HANDLE_DATA,
EXPECT_END,
}
public OfflineEditsXmlLoader(OfflineEditsVisitor visitor,
File inputFile, OfflineEditsViewer.Flags flags) throws FileNotFoundException {
this.visitor = visitor;
this.fileReader =
new InputStreamReader(new FileInputStream(inputFile), Charsets.UTF_8);
this.fixTxIds = flags.getFixTxIds();
}
/**
* Loads edits file, uses visitor to process all elements
*/
@Override
public void loadEdits() throws IOException {
try {
XMLReader xr = XMLReaderFactory.createXMLReader();
xr.setContentHandler(this);
xr.setErrorHandler(this);
xr.setDTDHandler(null);
xr.parse(new InputSource(fileReader));
visitor.close(null);
} catch (SAXParseException e) {
System.out.println("XML parsing error: " + "\n" +
"Line: " + e.getLineNumber() + "\n" +
"URI: " + e.getSystemId() + "\n" +
"Message: " + e.getMessage());
visitor.close(e);
throw new IOException(e.toString());
} catch (SAXException e) {
visitor.close(e);
throw new IOException(e.toString());
} catch (RuntimeException e) {
visitor.close(e);
throw e;
} finally {
fileReader.close();
}
}
@Override
public void startDocument() {
state = ParseState.EXPECT_EDITS_TAG;
stanza = null;
stanzaStack = new Stack<Stanza>();
opCode = null;
cbuf = new StringBuffer();
nextTxId = -1;
}
@Override
public void endDocument() {
if (state != ParseState.EXPECT_END) {
throw new InvalidXmlException("expecting </EDITS>");
}
}
@Override
public void startElement (String uri, String name,
String qName, Attributes atts) {
switch (state) {
case EXPECT_EDITS_TAG:
if (!name.equals("EDITS")) {
throw new InvalidXmlException("you must put " +
"<EDITS> at the top of the XML file! " +
"Got tag " + name + " instead");
}
state = ParseState.EXPECT_VERSION;
break;
case EXPECT_VERSION:
if (!name.equals("EDITS_VERSION")) {
throw new InvalidXmlException("you must put " +
"<EDITS_VERSION> at the top of the XML file! " +
"Got tag " + name + " instead");
}
break;
case EXPECT_RECORD:
if (!name.equals("RECORD")) {
throw new InvalidXmlException("expected a <RECORD> tag");
}
state = ParseState.EXPECT_OPCODE;
break;
case EXPECT_OPCODE:
if (!name.equals("OPCODE")) {
throw new InvalidXmlException("expected an <OPCODE> tag");
}
break;
case EXPECT_DATA:
if (!name.equals("DATA")) {
throw new InvalidXmlException("expected a <DATA> tag");
}
stanza = new Stanza();
state = ParseState.HANDLE_DATA;
break;
case HANDLE_DATA:
Stanza parent = stanza;
Stanza child = new Stanza();
stanzaStack.push(parent);
stanza = child;
parent.addChild(name, child);
break;
case EXPECT_END:
throw new InvalidXmlException("not expecting anything after </EDITS>");
}
}
@Override
public void endElement (String uri, String name, String qName) {
String str = XMLUtils.unmangleXmlString(cbuf.toString(), false).trim();
cbuf = new StringBuffer();
switch (state) {
case EXPECT_EDITS_TAG:
throw new InvalidXmlException("expected <EDITS/>");
case EXPECT_VERSION:
if (!name.equals("EDITS_VERSION")) {
throw new InvalidXmlException("expected </EDITS_VERSION>");
}
try {
int version = Integer.parseInt(str);
visitor.start(version);
} catch (IOException e) {
// Can't throw IOException from a SAX method, sigh.
throw new RuntimeException(e);
}
state = ParseState.EXPECT_RECORD;
break;
case EXPECT_RECORD:
if (name.equals("EDITS")) {
state = ParseState.EXPECT_END;
} else if (!name.equals("RECORD")) {
throw new InvalidXmlException("expected </EDITS> or </RECORD>");
}
break;
case EXPECT_OPCODE:
if (!name.equals("OPCODE")) {
throw new InvalidXmlException("expected </OPCODE>");
}
opCode = FSEditLogOpCodes.valueOf(str);
state = ParseState.EXPECT_DATA;
break;
case EXPECT_DATA:
throw new InvalidXmlException("expected <DATA/>");
case HANDLE_DATA:
stanza.setValue(str);
if (stanzaStack.empty()) {
if (!name.equals("DATA")) {
throw new InvalidXmlException("expected </DATA>");
}
state = ParseState.EXPECT_RECORD;
FSEditLogOp op = opCache.get(opCode);
opCode = null;
try {
op.decodeXml(stanza);
stanza = null;
} finally {
if (stanza != null) {
System.err.println("fromXml error decoding opcode " + opCode +
"\n" + stanza.toString());
stanza = null;
}
}
if (fixTxIds) {
if (nextTxId <= 0) {
nextTxId = op.getTransactionId();
if (nextTxId <= 0) {
nextTxId = 1;
}
}
op.setTransactionId(nextTxId);
nextTxId++;
}
try {
visitor.visitOp(op);
} catch (IOException e) {
// Can't throw IOException from a SAX method, sigh.
throw new RuntimeException(e);
}
state = ParseState.EXPECT_RECORD;
} else {
stanza = stanzaStack.pop();
}
break;
case EXPECT_END:
throw new InvalidXmlException("not expecting anything after </EDITS>");
}
}
@Override
public void characters (char ch[], int start, int length) {
cbuf.append(ch, start, length);
}
}
| 8,386 | 30.768939 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools.offlineEditsViewer;
import java.io.IOException;
import java.io.OutputStream;
import org.apache.hadoop.hdfs.util.XMLUtils;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.xml.sax.ContentHandler;
import org.xml.sax.SAXException;
import org.xml.sax.helpers.AttributesImpl;
import org.apache.xml.serialize.OutputFormat;
import org.apache.xml.serialize.XMLSerializer;
/**
* An XmlEditsVisitor walks over an EditLog structure and writes out
* an equivalent XML document that contains the EditLog's components.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class XmlEditsVisitor implements OfflineEditsVisitor {
private final OutputStream out;
private ContentHandler contentHandler;
/**
* Create a processor that writes to the file named and may or may not
* also output to the screen, as specified.
*
* @param filename Name of file to write output to
* @param printToScreen Mirror output to screen?
*/
public XmlEditsVisitor(OutputStream out)
throws IOException {
this.out = out;
OutputFormat outFormat = new OutputFormat("XML", "UTF-8", true);
outFormat.setIndenting(true);
outFormat.setIndent(2);
outFormat.setDoctype(null, null);
XMLSerializer serializer = new XMLSerializer(out, outFormat);
contentHandler = serializer.asContentHandler();
try {
contentHandler.startDocument();
contentHandler.startElement("", "", "EDITS", new AttributesImpl());
} catch (SAXException e) {
throw new IOException("SAX error: " + e.getMessage());
}
}
/**
* Start visitor (initialization)
*/
@Override
public void start(int version) throws IOException {
try {
contentHandler.startElement("", "", "EDITS_VERSION", new AttributesImpl());
StringBuilder bld = new StringBuilder();
bld.append(version);
addString(bld.toString());
contentHandler.endElement("", "", "EDITS_VERSION");
}
catch (SAXException e) {
throw new IOException("SAX error: " + e.getMessage());
}
}
public void addString(String str) throws SAXException {
int slen = str.length();
char arr[] = new char[slen];
str.getChars(0, slen, arr, 0);
contentHandler.characters(arr, 0, slen);
}
/**
* Finish visitor
*/
@Override
public void close(Throwable error) throws IOException {
try {
contentHandler.endElement("", "", "EDITS");
if (error != null) {
String msg = error.getMessage();
XMLUtils.addSaxString(contentHandler, "ERROR",
(msg == null) ? "null" : msg);
}
contentHandler.endDocument();
}
catch (SAXException e) {
throw new IOException("SAX error: " + e.getMessage());
}
out.close();
}
@Override
public void visitOp(FSEditLogOp op) throws IOException {
try {
op.outputToXml(contentHandler);
}
catch (SAXException e) {
throw new IOException("SAX error: " + e.getMessage());
}
}
}
| 3,944 | 31.073171 | 81 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/StatisticsEditsVisitor.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools.offlineEditsViewer;
import java.io.IOException;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.PrintWriter;
import java.util.Map;
import java.util.HashMap;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes;
import com.google.common.base.Charsets;
/**
* StatisticsEditsVisitor implements text version of EditsVisitor
* that aggregates counts of op codes processed
*
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class StatisticsEditsVisitor implements OfflineEditsVisitor {
final private PrintWriter out;
private int version = -1;
private final Map<FSEditLogOpCodes, Long> opCodeCount =
new HashMap<FSEditLogOpCodes, Long>();
/**
* Create a processor that writes to the file named and may or may not
* also output to the screen, as specified.
*
* @param filename Name of file to write output to
* @param tokenizer Input tokenizer
* @param printToScreen Mirror output to screen?
*/
public StatisticsEditsVisitor(OutputStream out) throws IOException {
this.out = new PrintWriter(new OutputStreamWriter(out, Charsets.UTF_8));
}
/** Start the visitor */
@Override
public void start(int version) throws IOException {
this.version = version;
}
/** Close the visitor */
@Override
public void close(Throwable error) throws IOException {
out.print(getStatisticsString());
if (error != null) {
out.print("EXITING ON ERROR: " + error.toString() + "\n");
}
out.close();
}
@Override
public void visitOp(FSEditLogOp op) throws IOException {
incrementOpCodeCount(op.opCode);
}
/**
* Increment the op code counter
*
* @param opCode opCode for which to increment count
*/
private void incrementOpCodeCount(FSEditLogOpCodes opCode) {
if(!opCodeCount.containsKey(opCode)) {
opCodeCount.put(opCode, 0L);
}
Long newValue = opCodeCount.get(opCode) + 1;
opCodeCount.put(opCode, newValue);
}
/**
* Get statistics
*
* @return statistics, map of counts per opCode
*/
public Map<FSEditLogOpCodes, Long> getStatistics() {
return opCodeCount;
}
/**
* Get the statistics in string format, suitable for printing
*
* @return statistics in in string format, suitable for printing
*/
public String getStatisticsString() {
StringBuffer sb = new StringBuffer();
sb.append(String.format(
" %-30.30s : %d%n",
"VERSION", version));
for(FSEditLogOpCodes opCode : FSEditLogOpCodes.values()) {
sb.append(String.format(
" %-30.30s (%3d): %d%n",
opCode.toString(),
opCode.getOpCode(),
opCodeCount.get(opCode)));
}
return sb.toString();
}
}
| 3,758 | 29.560976 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsVisitorFactory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools.offlineEditsViewer;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.StringUtils;
/**
* EditsVisitorFactory for different implementations of EditsVisitor
*
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class OfflineEditsVisitorFactory {
/**
* Factory function that creates an EditsVisitor object
*
* @param filename output filename
* @param processor type of visitor to create
* @param printToScreen parameter passed to visitor constructor
*
* @return EditsVisitor for appropriate output format (binary, xml, etc.)
*/
static public OfflineEditsVisitor getEditsVisitor(String filename,
String processor, boolean printToScreen) throws IOException {
if(StringUtils.equalsIgnoreCase("binary", processor)) {
return new BinaryEditsVisitor(filename);
}
OfflineEditsVisitor vis;
OutputStream fout = new FileOutputStream(filename);
OutputStream out = null;
try {
if (!printToScreen) {
out = fout;
}
else {
OutputStream outs[] = new OutputStream[2];
outs[0] = fout;
outs[1] = System.out;
out = new TeeOutputStream(outs);
}
if(StringUtils.equalsIgnoreCase("xml", processor)) {
vis = new XmlEditsVisitor(out);
} else if(StringUtils.equalsIgnoreCase("stats", processor)) {
vis = new StatisticsEditsVisitor(out);
} else {
throw new IOException("Unknown proccesor " + processor +
" (valid processors: xml, binary, stats)");
}
out = fout = null;
return vis;
} finally {
IOUtils.closeStream(fout);
IOUtils.closeStream(out);
}
}
}
| 2,762 | 33.974684 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/snapshot/SnapshotDiff.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools.snapshot;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
/**
* A tool used to get the difference report between two snapshots, or between
* a snapshot and the current status of a directory.
* <pre>
* Usage: SnapshotDiff snapshotDir from to
* For from/to, users can use "." to present the current status, and use
* ".snapshot/snapshot_name" to present a snapshot, where ".snapshot/" can be
* omitted.
* </pre>
*/
@InterfaceAudience.Private
public class SnapshotDiff extends Configured implements Tool {
private static String getSnapshotName(String name) {
if (Path.CUR_DIR.equals(name)) { // current directory
return "";
}
final int i;
if (name.startsWith(HdfsConstants.DOT_SNAPSHOT_DIR + Path.SEPARATOR)) {
i = 0;
} else if (name.startsWith(
HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR + Path.SEPARATOR)) {
i = 1;
} else {
return name;
}
// get the snapshot name
return name.substring(i + HdfsConstants.DOT_SNAPSHOT_DIR.length() + 1);
}
@Override
public int run(String[] argv) throws Exception {
String description = "hdfs snapshotDiff <snapshotDir> <from> <to>:\n" +
"\tGet the difference between two snapshots, \n" +
"\tor between a snapshot and the current tree of a directory.\n" +
"\tFor <from>/<to>, users can use \".\" to present the current status,\n" +
"\tand use \".snapshot/snapshot_name\" to present a snapshot,\n" +
"\twhere \".snapshot/\" can be omitted\n";
if(argv.length != 3) {
System.err.println("Usage: \n" + description);
return 1;
}
FileSystem fs = FileSystem.get(getConf());
if (! (fs instanceof DistributedFileSystem)) {
System.err.println(
"SnapshotDiff can only be used in DistributedFileSystem");
return 1;
}
DistributedFileSystem dfs = (DistributedFileSystem) fs;
Path snapshotRoot = new Path(argv[0]);
String fromSnapshot = getSnapshotName(argv[1]);
String toSnapshot = getSnapshotName(argv[2]);
try {
SnapshotDiffReport diffReport = dfs.getSnapshotDiffReport(snapshotRoot,
fromSnapshot, toSnapshot);
System.out.println(diffReport.toString());
} catch (IOException e) {
String[] content = e.getLocalizedMessage().split("\n");
System.err.println("snapshotDiff: " + content[0]);
return 1;
}
return 0;
}
public static void main(String[] argv) throws Exception {
int rc = ToolRunner.run(new SnapshotDiff(), argv);
System.exit(rc);
}
}
| 3,777 | 34.980952 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/snapshot/LsSnapshottableDir.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.tools.snapshot;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
/**
* A tool used to list all snapshottable directories that are owned by the
* current user. The tool returns all the snapshottable directories if the user
* is a super user.
*/
@InterfaceAudience.Private
public class LsSnapshottableDir extends Configured implements Tool {
@Override
public int run(String[] argv) throws Exception {
String description = "hdfs lsSnapshottableDir: \n" +
"\tGet the list of snapshottable directories that are owned by the current user.\n" +
"\tReturn all the snapshottable directories if the current user is a super user.\n";
if(argv.length != 0) {
System.err.println("Usage: \n" + description);
return 1;
}
FileSystem fs = FileSystem.get(getConf());
if (! (fs instanceof DistributedFileSystem)) {
System.err.println(
"LsSnapshottableDir can only be used in DistributedFileSystem");
return 1;
}
DistributedFileSystem dfs = (DistributedFileSystem) fs;
try {
SnapshottableDirectoryStatus[] stats = dfs.getSnapshottableDirListing();
SnapshottableDirectoryStatus.print(stats, System.out);
} catch (IOException e) {
String[] content = e.getLocalizedMessage().split("\n");
System.err.println("lsSnapshottableDir: " + content[0]);
return 1;
}
return 0;
}
public static void main(String[] argv) throws Exception {
int rc = ToolRunner.run(new LsSnapshottableDir(), argv);
System.exit(rc);
}
}
| 2,745 | 37.138889 | 93 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSelector.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.security.token.block;
import java.util.Collection;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.security.token.TokenSelector;
/**
* A block token selector for HDFS
*/
@InterfaceAudience.Private
public class BlockTokenSelector implements TokenSelector<BlockTokenIdentifier> {
@Override
@SuppressWarnings("unchecked")
public Token<BlockTokenIdentifier> selectToken(Text service,
Collection<Token<? extends TokenIdentifier>> tokens) {
if (service == null) {
return null;
}
for (Token<? extends TokenIdentifier> token : tokens) {
if (BlockTokenIdentifier.KIND_NAME.equals(token.getKind())) {
return (Token<BlockTokenIdentifier>) token;
}
}
return null;
}
}
| 1,734 | 34.408163 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockKey.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.security.token.block;
import javax.crypto.SecretKey;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.security.token.delegation.DelegationKey;
/**
* Key used for generating and verifying block tokens
*/
@InterfaceAudience.Private
public class BlockKey extends DelegationKey {
public BlockKey() {
super();
}
public BlockKey(int keyId, long expiryDate, SecretKey key) {
super(keyId, expiryDate, key);
}
public BlockKey(int keyId, long expiryDate, byte[] encodedKey) {
super(keyId, expiryDate, encodedKey);
}
}
| 1,416 | 31.204545 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockPoolTokenSecretManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.security.token.block;
import java.io.IOException;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.Map;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier.AccessMode;
import org.apache.hadoop.security.token.SecretManager;
import org.apache.hadoop.security.token.Token;
import com.google.common.annotations.VisibleForTesting;
/**
* Manages a {@link BlockTokenSecretManager} per block pool. Routes the requests
* given a block pool Id to corresponding {@link BlockTokenSecretManager}
*/
public class BlockPoolTokenSecretManager extends
SecretManager<BlockTokenIdentifier> {
private final Map<String, BlockTokenSecretManager> map =
new HashMap<String, BlockTokenSecretManager>();
/**
* Add a block pool Id and corresponding {@link BlockTokenSecretManager} to map
* @param bpid block pool Id
* @param secretMgr {@link BlockTokenSecretManager}
*/
public synchronized void addBlockPool(String bpid,
BlockTokenSecretManager secretMgr) {
map.put(bpid, secretMgr);
}
synchronized BlockTokenSecretManager get(String bpid) {
BlockTokenSecretManager secretMgr = map.get(bpid);
if (secretMgr == null) {
throw new IllegalArgumentException("Block pool " + bpid
+ " is not found");
}
return secretMgr;
}
public synchronized boolean isBlockPoolRegistered(String bpid) {
return map.containsKey(bpid);
}
/** Return an empty BlockTokenIdentifer */
@Override
public BlockTokenIdentifier createIdentifier() {
return new BlockTokenIdentifier();
}
@Override
public byte[] createPassword(BlockTokenIdentifier identifier) {
return get(identifier.getBlockPoolId()).createPassword(identifier);
}
@Override
public byte[] retrievePassword(BlockTokenIdentifier identifier)
throws InvalidToken {
return get(identifier.getBlockPoolId()).retrievePassword(identifier);
}
/**
* See {@link BlockTokenSecretManager#checkAccess(BlockTokenIdentifier,
* String, ExtendedBlock, BlockTokenIdentifier.AccessMode)}
*/
public void checkAccess(BlockTokenIdentifier id, String userId,
ExtendedBlock block, AccessMode mode) throws InvalidToken {
get(block.getBlockPoolId()).checkAccess(id, userId, block, mode);
}
/**
* See {@link BlockTokenSecretManager#checkAccess(Token, String,
* ExtendedBlock, BlockTokenIdentifier.AccessMode)}
*/
public void checkAccess(Token<BlockTokenIdentifier> token,
String userId, ExtendedBlock block, AccessMode mode) throws InvalidToken {
get(block.getBlockPoolId()).checkAccess(token, userId, block, mode);
}
/**
* See {@link BlockTokenSecretManager#addKeys(ExportedBlockKeys)}
*/
public void addKeys(String bpid, ExportedBlockKeys exportedKeys)
throws IOException {
get(bpid).addKeys(exportedKeys);
}
/**
* See {@link BlockTokenSecretManager#generateToken(ExtendedBlock, EnumSet)}
*/
public Token<BlockTokenIdentifier> generateToken(ExtendedBlock b,
EnumSet<AccessMode> of) throws IOException {
return get(b.getBlockPoolId()).generateToken(b, of);
}
@VisibleForTesting
public void clearAllKeysForTesting() {
for (BlockTokenSecretManager btsm : map.values()) {
btsm.clearAllKeysForTesting();
}
}
public DataEncryptionKey generateDataEncryptionKey(String blockPoolId) {
return get(blockPoolId).generateDataEncryptionKey();
}
public byte[] retrieveDataEncryptionKey(int keyId, String blockPoolId,
byte[] nonce) throws IOException {
return get(blockPoolId).retrieveDataEncryptionKey(keyId, nonce);
}
}
| 4,533 | 33.348485 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/InvalidBlockTokenException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.security.token.block;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Access token verification failed.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class InvalidBlockTokenException extends IOException {
private static final long serialVersionUID = 168L;
public InvalidBlockTokenException() {
super();
}
public InvalidBlockTokenException(String msg) {
super(msg);
}
}
| 1,358 | 31.357143 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.security.token.block;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.IOException;
import java.security.SecureRandom;
import java.util.Arrays;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.datatransfer.InvalidEncryptionKeyException;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.SecretManager;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.Time;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
/**
* BlockTokenSecretManager can be instantiated in 2 modes, master mode and slave
* mode. Master can generate new block keys and export block keys to slaves,
* while slaves can only import and use block keys received from master. Both
* master and slave can generate and verify block tokens. Typically, master mode
* is used by NN and slave mode is used by DN.
*/
@InterfaceAudience.Private
public class BlockTokenSecretManager extends
SecretManager<BlockTokenIdentifier> {
public static final Log LOG = LogFactory
.getLog(BlockTokenSecretManager.class);
// We use these in an HA setup to ensure that the pair of NNs produce block
// token serial numbers that are in different ranges.
private static final int LOW_MASK = ~(1 << 31);
public static final Token<BlockTokenIdentifier> DUMMY_TOKEN = new Token<BlockTokenIdentifier>();
private final boolean isMaster;
private int nnIndex;
/**
* keyUpdateInterval is the interval that NN updates its block keys. It should
* be set long enough so that all live DN's and Balancer should have sync'ed
* their block keys with NN at least once during each interval.
*/
private long keyUpdateInterval;
private volatile long tokenLifetime;
private int serialNo;
private BlockKey currentKey;
private BlockKey nextKey;
private final Map<Integer, BlockKey> allKeys;
private String blockPoolId;
private final String encryptionAlgorithm;
private final SecureRandom nonceGenerator = new SecureRandom();
;
/**
* Constructor for slaves.
*
* @param keyUpdateInterval how often a new key will be generated
* @param tokenLifetime how long an individual token is valid
*/
public BlockTokenSecretManager(long keyUpdateInterval,
long tokenLifetime, String blockPoolId, String encryptionAlgorithm) {
this(false, keyUpdateInterval, tokenLifetime, blockPoolId,
encryptionAlgorithm);
}
/**
* Constructor for masters.
*
* @param keyUpdateInterval how often a new key will be generated
* @param tokenLifetime how long an individual token is valid
* @param nnIndex namenode index
* @param blockPoolId block pool ID
* @param encryptionAlgorithm encryption algorithm to use
*/
public BlockTokenSecretManager(long keyUpdateInterval,
long tokenLifetime, int nnIndex, String blockPoolId,
String encryptionAlgorithm) {
this(true, keyUpdateInterval, tokenLifetime, blockPoolId,
encryptionAlgorithm);
Preconditions.checkArgument(nnIndex == 0 || nnIndex == 1);
this.nnIndex = nnIndex;
setSerialNo(new SecureRandom().nextInt());
generateKeys();
}
private BlockTokenSecretManager(boolean isMaster, long keyUpdateInterval,
long tokenLifetime, String blockPoolId, String encryptionAlgorithm) {
this.isMaster = isMaster;
this.keyUpdateInterval = keyUpdateInterval;
this.tokenLifetime = tokenLifetime;
this.allKeys = new HashMap<Integer, BlockKey>();
this.blockPoolId = blockPoolId;
this.encryptionAlgorithm = encryptionAlgorithm;
generateKeys();
}
@VisibleForTesting
public synchronized void setSerialNo(int serialNo) {
this.serialNo = (serialNo & LOW_MASK) | (nnIndex << 31);
}
public void setBlockPoolId(String blockPoolId) {
this.blockPoolId = blockPoolId;
}
/** Initialize block keys */
private synchronized void generateKeys() {
if (!isMaster)
return;
/*
* Need to set estimated expiry dates for currentKey and nextKey so that if
* NN crashes, DN can still expire those keys. NN will stop using the newly
* generated currentKey after the first keyUpdateInterval, however it may
* still be used by DN and Balancer to generate new tokens before they get a
* chance to sync their keys with NN. Since we require keyUpdInterval to be
* long enough so that all live DN's and Balancer will sync their keys with
* NN at least once during the period, the estimated expiry date for
* currentKey is set to now() + 2 * keyUpdateInterval + tokenLifetime.
* Similarly, the estimated expiry date for nextKey is one keyUpdateInterval
* more.
*/
setSerialNo(serialNo + 1);
currentKey = new BlockKey(serialNo, Time.now() + 2
* keyUpdateInterval + tokenLifetime, generateSecret());
setSerialNo(serialNo + 1);
nextKey = new BlockKey(serialNo, Time.now() + 3
* keyUpdateInterval + tokenLifetime, generateSecret());
allKeys.put(currentKey.getKeyId(), currentKey);
allKeys.put(nextKey.getKeyId(), nextKey);
}
/** Export block keys, only to be used in master mode */
public synchronized ExportedBlockKeys exportKeys() {
if (!isMaster)
return null;
if (LOG.isDebugEnabled())
LOG.debug("Exporting access keys");
return new ExportedBlockKeys(true, keyUpdateInterval, tokenLifetime,
currentKey, allKeys.values().toArray(new BlockKey[0]));
}
private synchronized void removeExpiredKeys() {
long now = Time.now();
for (Iterator<Map.Entry<Integer, BlockKey>> it = allKeys.entrySet()
.iterator(); it.hasNext();) {
Map.Entry<Integer, BlockKey> e = it.next();
if (e.getValue().getExpiryDate() < now) {
it.remove();
}
}
}
/**
* Set block keys, only to be used in slave mode
*/
public synchronized void addKeys(ExportedBlockKeys exportedKeys)
throws IOException {
if (isMaster || exportedKeys == null)
return;
LOG.info("Setting block keys");
removeExpiredKeys();
this.currentKey = exportedKeys.getCurrentKey();
BlockKey[] receivedKeys = exportedKeys.getAllKeys();
for (int i = 0; i < receivedKeys.length; i++) {
if (receivedKeys[i] == null)
continue;
this.allKeys.put(receivedKeys[i].getKeyId(), receivedKeys[i]);
}
}
/**
* Update block keys if update time > update interval.
* @return true if the keys are updated.
*/
public synchronized boolean updateKeys(final long updateTime) throws IOException {
if (updateTime > keyUpdateInterval) {
return updateKeys();
}
return false;
}
/**
* Update block keys, only to be used in master mode
*/
synchronized boolean updateKeys() throws IOException {
if (!isMaster)
return false;
LOG.info("Updating block keys");
removeExpiredKeys();
// set final expiry date of retiring currentKey
allKeys.put(currentKey.getKeyId(), new BlockKey(currentKey.getKeyId(),
Time.now() + keyUpdateInterval + tokenLifetime,
currentKey.getKey()));
// update the estimated expiry date of new currentKey
currentKey = new BlockKey(nextKey.getKeyId(), Time.now()
+ 2 * keyUpdateInterval + tokenLifetime, nextKey.getKey());
allKeys.put(currentKey.getKeyId(), currentKey);
// generate a new nextKey
setSerialNo(serialNo + 1);
nextKey = new BlockKey(serialNo, Time.now() + 3
* keyUpdateInterval + tokenLifetime, generateSecret());
allKeys.put(nextKey.getKeyId(), nextKey);
return true;
}
/** Generate an block token for current user */
public Token<BlockTokenIdentifier> generateToken(ExtendedBlock block,
EnumSet<BlockTokenIdentifier.AccessMode> modes) throws IOException {
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
String userID = (ugi == null ? null : ugi.getShortUserName());
return generateToken(userID, block, modes);
}
/** Generate a block token for a specified user */
public Token<BlockTokenIdentifier> generateToken(String userId,
ExtendedBlock block, EnumSet<BlockTokenIdentifier.AccessMode> modes) throws IOException {
BlockTokenIdentifier id = new BlockTokenIdentifier(userId, block
.getBlockPoolId(), block.getBlockId(), modes);
return new Token<BlockTokenIdentifier>(id, this);
}
/**
* Check if access should be allowed. userID is not checked if null. This
* method doesn't check if token password is correct. It should be used only
* when token password has already been verified (e.g., in the RPC layer).
*/
public void checkAccess(BlockTokenIdentifier id, String userId,
ExtendedBlock block, BlockTokenIdentifier.AccessMode mode) throws InvalidToken {
if (LOG.isDebugEnabled()) {
LOG.debug("Checking access for user=" + userId + ", block=" + block
+ ", access mode=" + mode + " using " + id.toString());
}
if (userId != null && !userId.equals(id.getUserId())) {
throw new InvalidToken("Block token with " + id.toString()
+ " doesn't belong to user " + userId);
}
if (!id.getBlockPoolId().equals(block.getBlockPoolId())) {
throw new InvalidToken("Block token with " + id.toString()
+ " doesn't apply to block " + block);
}
if (id.getBlockId() != block.getBlockId()) {
throw new InvalidToken("Block token with " + id.toString()
+ " doesn't apply to block " + block);
}
if (isExpired(id.getExpiryDate())) {
throw new InvalidToken("Block token with " + id.toString()
+ " is expired.");
}
if (!id.getAccessModes().contains(mode)) {
throw new InvalidToken("Block token with " + id.toString()
+ " doesn't have " + mode + " permission");
}
}
/** Check if access should be allowed. userID is not checked if null */
public void checkAccess(Token<BlockTokenIdentifier> token, String userId,
ExtendedBlock block, BlockTokenIdentifier.AccessMode mode) throws InvalidToken {
BlockTokenIdentifier id = new BlockTokenIdentifier();
try {
id.readFields(new DataInputStream(new ByteArrayInputStream(token
.getIdentifier())));
} catch (IOException e) {
throw new InvalidToken(
"Unable to de-serialize block token identifier for user=" + userId
+ ", block=" + block + ", access mode=" + mode);
}
checkAccess(id, userId, block, mode);
if (!Arrays.equals(retrievePassword(id), token.getPassword())) {
throw new InvalidToken("Block token with " + id.toString()
+ " doesn't have the correct token password");
}
}
private static boolean isExpired(long expiryDate) {
return Time.now() > expiryDate;
}
/**
* check if a token is expired. for unit test only. return true when token is
* expired, false otherwise
*/
static boolean isTokenExpired(Token<BlockTokenIdentifier> token)
throws IOException {
ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier());
DataInputStream in = new DataInputStream(buf);
long expiryDate = WritableUtils.readVLong(in);
return isExpired(expiryDate);
}
/** set token lifetime. */
public void setTokenLifetime(long tokenLifetime) {
this.tokenLifetime = tokenLifetime;
}
/**
* Create an empty block token identifier
*
* @return a newly created empty block token identifier
*/
@Override
public BlockTokenIdentifier createIdentifier() {
return new BlockTokenIdentifier();
}
/**
* Create a new password/secret for the given block token identifier.
*
* @param identifier
* the block token identifier
* @return token password/secret
*/
@Override
protected byte[] createPassword(BlockTokenIdentifier identifier) {
BlockKey key = null;
synchronized (this) {
key = currentKey;
}
if (key == null)
throw new IllegalStateException("currentKey hasn't been initialized.");
identifier.setExpiryDate(Time.now() + tokenLifetime);
identifier.setKeyId(key.getKeyId());
if (LOG.isDebugEnabled()) {
LOG.debug("Generating block token for " + identifier.toString());
}
return createPassword(identifier.getBytes(), key.getKey());
}
/**
* Look up the token password/secret for the given block token identifier.
*
* @param identifier
* the block token identifier to look up
* @return token password/secret as byte[]
* @throws InvalidToken
*/
@Override
public byte[] retrievePassword(BlockTokenIdentifier identifier)
throws InvalidToken {
if (isExpired(identifier.getExpiryDate())) {
throw new InvalidToken("Block token with " + identifier.toString()
+ " is expired.");
}
BlockKey key = null;
synchronized (this) {
key = allKeys.get(identifier.getKeyId());
}
if (key == null) {
throw new InvalidToken("Can't re-compute password for "
+ identifier.toString() + ", since the required block key (keyID="
+ identifier.getKeyId() + ") doesn't exist.");
}
return createPassword(identifier.getBytes(), key.getKey());
}
/**
* Generate a data encryption key for this block pool, using the current
* BlockKey.
*
* @return a data encryption key which may be used to encrypt traffic
* over the DataTransferProtocol
*/
public DataEncryptionKey generateDataEncryptionKey() {
byte[] nonce = new byte[8];
nonceGenerator.nextBytes(nonce);
BlockKey key = null;
synchronized (this) {
key = currentKey;
}
byte[] encryptionKey = createPassword(nonce, key.getKey());
return new DataEncryptionKey(key.getKeyId(), blockPoolId, nonce,
encryptionKey, Time.now() + tokenLifetime,
encryptionAlgorithm);
}
/**
* Recreate an encryption key based on the given key id and nonce.
*
* @param keyId identifier of the secret key used to generate the encryption key.
* @param nonce random value used to create the encryption key
* @return the encryption key which corresponds to this (keyId, blockPoolId, nonce)
* @throws InvalidEncryptionKeyException
*/
public byte[] retrieveDataEncryptionKey(int keyId, byte[] nonce)
throws InvalidEncryptionKeyException {
BlockKey key = null;
synchronized (this) {
key = allKeys.get(keyId);
if (key == null) {
throw new InvalidEncryptionKeyException("Can't re-compute encryption key"
+ " for nonce, since the required block key (keyID=" + keyId
+ ") doesn't exist. Current key: " + currentKey.getKeyId());
}
}
return createPassword(nonce, key.getKey());
}
@VisibleForTesting
public synchronized void setKeyUpdateIntervalForTesting(long millis) {
this.keyUpdateInterval = millis;
}
@VisibleForTesting
public void clearAllKeysForTesting() {
allKeys.clear();
}
@VisibleForTesting
public synchronized int getSerialNoForTesting() {
return serialNo;
}
}
| 16,241 | 35.498876 | 98 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/ExportedBlockKeys.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.security.token.block;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableFactories;
import org.apache.hadoop.io.WritableFactory;
/**
* Object for passing block keys
*/
@InterfaceAudience.Private
public class ExportedBlockKeys implements Writable {
public static final ExportedBlockKeys DUMMY_KEYS = new ExportedBlockKeys();
private boolean isBlockTokenEnabled;
private long keyUpdateInterval;
private long tokenLifetime;
private final BlockKey currentKey;
private BlockKey[] allKeys;
public ExportedBlockKeys() {
this(false, 0, 0, new BlockKey(), new BlockKey[0]);
}
public ExportedBlockKeys(boolean isBlockTokenEnabled, long keyUpdateInterval,
long tokenLifetime, BlockKey currentKey, BlockKey[] allKeys) {
this.isBlockTokenEnabled = isBlockTokenEnabled;
this.keyUpdateInterval = keyUpdateInterval;
this.tokenLifetime = tokenLifetime;
this.currentKey = currentKey == null ? new BlockKey() : currentKey;
this.allKeys = allKeys == null ? new BlockKey[0] : allKeys;
}
public boolean isBlockTokenEnabled() {
return isBlockTokenEnabled;
}
public long getKeyUpdateInterval() {
return keyUpdateInterval;
}
public long getTokenLifetime() {
return tokenLifetime;
}
public BlockKey getCurrentKey() {
return currentKey;
}
public BlockKey[] getAllKeys() {
return allKeys;
}
// ///////////////////////////////////////////////
// Writable
// ///////////////////////////////////////////////
static { // register a ctor
WritableFactories.setFactory(ExportedBlockKeys.class,
new WritableFactory() {
@Override
public Writable newInstance() {
return new ExportedBlockKeys();
}
});
}
/**
*/
@Override
public void write(DataOutput out) throws IOException {
out.writeBoolean(isBlockTokenEnabled);
out.writeLong(keyUpdateInterval);
out.writeLong(tokenLifetime);
currentKey.write(out);
out.writeInt(allKeys.length);
for (int i = 0; i < allKeys.length; i++) {
allKeys[i].write(out);
}
}
/**
*/
@Override
public void readFields(DataInput in) throws IOException {
isBlockTokenEnabled = in.readBoolean();
keyUpdateInterval = in.readLong();
tokenLifetime = in.readLong();
currentKey.readFields(in);
this.allKeys = new BlockKey[in.readInt()];
for (int i = 0; i < allKeys.length; i++) {
allKeys[i] = new BlockKey();
allKeys[i].readFields(in);
}
}
}
| 3,497 | 28.644068 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.security.token.delegation;
import java.io.DataInput;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.InterruptedIOException;
import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Map.Entry;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.Counter;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.Step;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StepType;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.RetriableException;
import org.apache.hadoop.ipc.StandbyException;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
import org.apache.hadoop.security.token.delegation.DelegationKey;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.protobuf.ByteString;
/**
* A HDFS specific delegation token secret manager.
* The secret manager is responsible for generating and accepting the password
* for each token.
*/
@InterfaceAudience.Private
public class DelegationTokenSecretManager
extends AbstractDelegationTokenSecretManager<DelegationTokenIdentifier> {
private static final Log LOG = LogFactory
.getLog(DelegationTokenSecretManager.class);
private final FSNamesystem namesystem;
private final SerializerCompat serializerCompat = new SerializerCompat();
public DelegationTokenSecretManager(long delegationKeyUpdateInterval,
long delegationTokenMaxLifetime, long delegationTokenRenewInterval,
long delegationTokenRemoverScanInterval, FSNamesystem namesystem) {
this(delegationKeyUpdateInterval, delegationTokenMaxLifetime,
delegationTokenRenewInterval, delegationTokenRemoverScanInterval, false,
namesystem);
}
/**
* Create a secret manager
* @param delegationKeyUpdateInterval the number of milliseconds for rolling
* new secret keys.
* @param delegationTokenMaxLifetime the maximum lifetime of the delegation
* tokens in milliseconds
* @param delegationTokenRenewInterval how often the tokens must be renewed
* in milliseconds
* @param delegationTokenRemoverScanInterval how often the tokens are scanned
* for expired tokens in milliseconds
* @param storeTokenTrackingId whether to store the token's tracking id
*/
public DelegationTokenSecretManager(long delegationKeyUpdateInterval,
long delegationTokenMaxLifetime, long delegationTokenRenewInterval,
long delegationTokenRemoverScanInterval, boolean storeTokenTrackingId,
FSNamesystem namesystem) {
super(delegationKeyUpdateInterval, delegationTokenMaxLifetime,
delegationTokenRenewInterval, delegationTokenRemoverScanInterval);
this.namesystem = namesystem;
this.storeTokenTrackingId = storeTokenTrackingId;
}
@Override //SecretManager
public DelegationTokenIdentifier createIdentifier() {
return new DelegationTokenIdentifier();
}
@Override
public byte[] retrievePassword(
DelegationTokenIdentifier identifier) throws InvalidToken {
try {
// this check introduces inconsistency in the authentication to a
// HA standby NN. non-token auths are allowed into the namespace which
// decides whether to throw a StandbyException. tokens are a bit
// different in that a standby may be behind and thus not yet know
// of all tokens issued by the active NN. the following check does
// not allow ANY token auth, however it should allow known tokens in
namesystem.checkOperation(OperationCategory.READ);
} catch (StandbyException se) {
// FIXME: this is a hack to get around changing method signatures by
// tunneling a non-InvalidToken exception as the cause which the
// RPC server will unwrap before returning to the client
InvalidToken wrappedStandby = new InvalidToken("StandbyException");
wrappedStandby.initCause(se);
throw wrappedStandby;
}
return super.retrievePassword(identifier);
}
@Override
public byte[] retriableRetrievePassword(DelegationTokenIdentifier identifier)
throws InvalidToken, StandbyException, RetriableException, IOException {
namesystem.checkOperation(OperationCategory.READ);
try {
return super.retrievePassword(identifier);
} catch (InvalidToken it) {
if (namesystem.inTransitionToActive()) {
// if the namesystem is currently in the middle of transition to
// active state, let client retry since the corresponding editlog may
// have not been applied yet
throw new RetriableException(it);
} else {
throw it;
}
}
}
/**
* Returns expiry time of a token given its identifier.
*
* @param dtId DelegationTokenIdentifier of a token
* @return Expiry time of the token
* @throws IOException
*/
public synchronized long getTokenExpiryTime(
DelegationTokenIdentifier dtId) throws IOException {
DelegationTokenInformation info = currentTokens.get(dtId);
if (info != null) {
return info.getRenewDate();
} else {
throw new IOException("No delegation token found for this identifier");
}
}
/**
* Load SecretManager state from fsimage.
*
* @param in input stream to read fsimage
* @throws IOException
*/
public synchronized void loadSecretManagerStateCompat(DataInput in)
throws IOException {
if (running) {
// a safety check
throw new IOException(
"Can't load state from image in a running SecretManager.");
}
serializerCompat.load(in);
}
public static class SecretManagerState {
public final SecretManagerSection section;
public final List<SecretManagerSection.DelegationKey> keys;
public final List<SecretManagerSection.PersistToken> tokens;
public SecretManagerState(
SecretManagerSection s,
List<SecretManagerSection.DelegationKey> keys,
List<SecretManagerSection.PersistToken> tokens) {
this.section = s;
this.keys = keys;
this.tokens = tokens;
}
}
public synchronized void loadSecretManagerState(SecretManagerState state)
throws IOException {
Preconditions.checkState(!running,
"Can't load state from image in a running SecretManager.");
currentId = state.section.getCurrentId();
delegationTokenSequenceNumber = state.section.getTokenSequenceNumber();
for (SecretManagerSection.DelegationKey k : state.keys) {
addKey(new DelegationKey(k.getId(), k.getExpiryDate(), k.hasKey() ? k
.getKey().toByteArray() : null));
}
for (SecretManagerSection.PersistToken t : state.tokens) {
DelegationTokenIdentifier id = new DelegationTokenIdentifier(new Text(
t.getOwner()), new Text(t.getRenewer()), new Text(t.getRealUser()));
id.setIssueDate(t.getIssueDate());
id.setMaxDate(t.getMaxDate());
id.setSequenceNumber(t.getSequenceNumber());
id.setMasterKeyId(t.getMasterKeyId());
addPersistedDelegationToken(id, t.getExpiryDate());
}
}
/**
* Store the current state of the SecretManager for persistence
*
* @param out Output stream for writing into fsimage.
* @param sdPath String storage directory path
* @throws IOException
*/
public synchronized void saveSecretManagerStateCompat(DataOutputStream out,
String sdPath) throws IOException {
serializerCompat.save(out, sdPath);
}
public synchronized SecretManagerState saveSecretManagerState() {
SecretManagerSection s = SecretManagerSection.newBuilder()
.setCurrentId(currentId)
.setTokenSequenceNumber(delegationTokenSequenceNumber)
.setNumKeys(allKeys.size()).setNumTokens(currentTokens.size()).build();
ArrayList<SecretManagerSection.DelegationKey> keys = Lists
.newArrayListWithCapacity(allKeys.size());
ArrayList<SecretManagerSection.PersistToken> tokens = Lists
.newArrayListWithCapacity(currentTokens.size());
for (DelegationKey v : allKeys.values()) {
SecretManagerSection.DelegationKey.Builder b = SecretManagerSection.DelegationKey
.newBuilder().setId(v.getKeyId()).setExpiryDate(v.getExpiryDate());
if (v.getEncodedKey() != null) {
b.setKey(ByteString.copyFrom(v.getEncodedKey()));
}
keys.add(b.build());
}
for (Entry<DelegationTokenIdentifier, DelegationTokenInformation> e : currentTokens
.entrySet()) {
DelegationTokenIdentifier id = e.getKey();
SecretManagerSection.PersistToken.Builder b = SecretManagerSection.PersistToken
.newBuilder().setOwner(id.getOwner().toString())
.setRenewer(id.getRenewer().toString())
.setRealUser(id.getRealUser().toString())
.setIssueDate(id.getIssueDate()).setMaxDate(id.getMaxDate())
.setSequenceNumber(id.getSequenceNumber())
.setMasterKeyId(id.getMasterKeyId())
.setExpiryDate(e.getValue().getRenewDate());
tokens.add(b.build());
}
return new SecretManagerState(s, keys, tokens);
}
/**
* This method is intended to be used only while reading edit logs.
*
* @param identifier DelegationTokenIdentifier read from the edit logs or
* fsimage
*
* @param expiryTime token expiry time
* @throws IOException
*/
public synchronized void addPersistedDelegationToken(
DelegationTokenIdentifier identifier, long expiryTime) throws IOException {
if (running) {
// a safety check
throw new IOException(
"Can't add persisted delegation token to a running SecretManager.");
}
int keyId = identifier.getMasterKeyId();
DelegationKey dKey = allKeys.get(keyId);
if (dKey == null) {
LOG
.warn("No KEY found for persisted identifier "
+ identifier.toString());
return;
}
byte[] password = createPassword(identifier.getBytes(), dKey.getKey());
if (identifier.getSequenceNumber() > this.delegationTokenSequenceNumber) {
this.delegationTokenSequenceNumber = identifier.getSequenceNumber();
}
if (currentTokens.get(identifier) == null) {
currentTokens.put(identifier, new DelegationTokenInformation(expiryTime,
password, getTrackingIdIfEnabled(identifier)));
} else {
throw new IOException(
"Same delegation token being added twice; invalid entry in fsimage or editlogs");
}
}
/**
* Add a MasterKey to the list of keys.
*
* @param key DelegationKey
* @throws IOException
*/
public synchronized void updatePersistedMasterKey(DelegationKey key)
throws IOException {
addKey(key);
}
/**
* Update the token cache with renewal record in edit logs.
*
* @param identifier DelegationTokenIdentifier of the renewed token
* @param expiryTime expirty time in milliseconds
* @throws IOException
*/
public synchronized void updatePersistedTokenRenewal(
DelegationTokenIdentifier identifier, long expiryTime) throws IOException {
if (running) {
// a safety check
throw new IOException(
"Can't update persisted delegation token renewal to a running SecretManager.");
}
DelegationTokenInformation info = null;
info = currentTokens.get(identifier);
if (info != null) {
int keyId = identifier.getMasterKeyId();
byte[] password = createPassword(identifier.getBytes(), allKeys
.get(keyId).getKey());
currentTokens.put(identifier, new DelegationTokenInformation(expiryTime,
password, getTrackingIdIfEnabled(identifier)));
}
}
/**
* Update the token cache with the cancel record in edit logs
*
* @param identifier DelegationTokenIdentifier of the canceled token
* @throws IOException
*/
public synchronized void updatePersistedTokenCancellation(
DelegationTokenIdentifier identifier) throws IOException {
if (running) {
// a safety check
throw new IOException(
"Can't update persisted delegation token renewal to a running SecretManager.");
}
currentTokens.remove(identifier);
}
/**
* Returns the number of delegation keys currently stored.
* @return number of delegation keys
*/
public synchronized int getNumberOfKeys() {
return allKeys.size();
}
/**
* Call namesystem to update editlogs for new master key.
*/
@Override //AbstractDelegationTokenManager
protected void logUpdateMasterKey(DelegationKey key)
throws IOException {
synchronized (noInterruptsLock) {
// The edit logging code will fail catastrophically if it
// is interrupted during a logSync, since the interrupt
// closes the edit log files. Doing this inside the
// above lock and then checking interruption status
// prevents this bug.
if (Thread.interrupted()) {
throw new InterruptedIOException(
"Interrupted before updating master key");
}
namesystem.logUpdateMasterKey(key);
}
}
@Override //AbstractDelegationTokenManager
protected void logExpireToken(final DelegationTokenIdentifier dtId)
throws IOException {
synchronized (noInterruptsLock) {
// The edit logging code will fail catastrophically if it
// is interrupted during a logSync, since the interrupt
// closes the edit log files. Doing this inside the
// above lock and then checking interruption status
// prevents this bug.
if (Thread.interrupted()) {
throw new InterruptedIOException(
"Interrupted before expiring delegation token");
}
namesystem.logExpireDelegationToken(dtId);
}
}
/** A utility method for creating credentials. */
public static Credentials createCredentials(final NameNode namenode,
final UserGroupInformation ugi, final String renewer) throws IOException {
final Token<DelegationTokenIdentifier> token = namenode.getRpcServer(
).getDelegationToken(new Text(renewer));
if (token == null) {
return null;
}
final InetSocketAddress addr = namenode.getNameNodeAddress();
SecurityUtil.setTokenService(token, addr);
final Credentials c = new Credentials();
c.addToken(new Text(ugi.getShortUserName()), token);
return c;
}
private final class SerializerCompat {
private void load(DataInput in) throws IOException {
currentId = in.readInt();
loadAllKeys(in);
delegationTokenSequenceNumber = in.readInt();
loadCurrentTokens(in);
}
private void save(DataOutputStream out, String sdPath) throws IOException {
out.writeInt(currentId);
saveAllKeys(out, sdPath);
out.writeInt(delegationTokenSequenceNumber);
saveCurrentTokens(out, sdPath);
}
/**
* Private helper methods to save delegation keys and tokens in fsimage
*/
private synchronized void saveCurrentTokens(DataOutputStream out,
String sdPath) throws IOException {
StartupProgress prog = NameNode.getStartupProgress();
Step step = new Step(StepType.DELEGATION_TOKENS, sdPath);
prog.beginStep(Phase.SAVING_CHECKPOINT, step);
prog.setTotal(Phase.SAVING_CHECKPOINT, step, currentTokens.size());
Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
out.writeInt(currentTokens.size());
Iterator<DelegationTokenIdentifier> iter = currentTokens.keySet()
.iterator();
while (iter.hasNext()) {
DelegationTokenIdentifier id = iter.next();
id.write(out);
DelegationTokenInformation info = currentTokens.get(id);
out.writeLong(info.getRenewDate());
counter.increment();
}
prog.endStep(Phase.SAVING_CHECKPOINT, step);
}
/*
* Save the current state of allKeys
*/
private synchronized void saveAllKeys(DataOutputStream out, String sdPath)
throws IOException {
StartupProgress prog = NameNode.getStartupProgress();
Step step = new Step(StepType.DELEGATION_KEYS, sdPath);
prog.beginStep(Phase.SAVING_CHECKPOINT, step);
prog.setTotal(Phase.SAVING_CHECKPOINT, step, currentTokens.size());
Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
out.writeInt(allKeys.size());
Iterator<Integer> iter = allKeys.keySet().iterator();
while (iter.hasNext()) {
Integer key = iter.next();
allKeys.get(key).write(out);
counter.increment();
}
prog.endStep(Phase.SAVING_CHECKPOINT, step);
}
/**
* Private helper methods to load Delegation tokens from fsimage
*/
private synchronized void loadCurrentTokens(DataInput in)
throws IOException {
StartupProgress prog = NameNode.getStartupProgress();
Step step = new Step(StepType.DELEGATION_TOKENS);
prog.beginStep(Phase.LOADING_FSIMAGE, step);
int numberOfTokens = in.readInt();
prog.setTotal(Phase.LOADING_FSIMAGE, step, numberOfTokens);
Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step);
for (int i = 0; i < numberOfTokens; i++) {
DelegationTokenIdentifier id = new DelegationTokenIdentifier();
id.readFields(in);
long expiryTime = in.readLong();
addPersistedDelegationToken(id, expiryTime);
counter.increment();
}
prog.endStep(Phase.LOADING_FSIMAGE, step);
}
/**
* Private helper method to load delegation keys from fsimage.
* @throws IOException on error
*/
private synchronized void loadAllKeys(DataInput in) throws IOException {
StartupProgress prog = NameNode.getStartupProgress();
Step step = new Step(StepType.DELEGATION_KEYS);
prog.beginStep(Phase.LOADING_FSIMAGE, step);
int numberOfKeys = in.readInt();
prog.setTotal(Phase.LOADING_FSIMAGE, step, numberOfKeys);
Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step);
for (int i = 0; i < numberOfKeys; i++) {
DelegationKey value = new DelegationKey();
value.readFields(in);
addKey(value);
counter.increment();
}
prog.endStep(Phase.LOADING_FSIMAGE, step);
}
}
}
| 19,843 | 37.382979 | 91 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/inotify/MissingEventsException.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.inotify;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class MissingEventsException extends Exception {
private static final long serialVersionUID = 1L;
private long expectedTxid;
private long actualTxid;
public MissingEventsException() {}
public MissingEventsException(long expectedTxid, long actualTxid) {
this.expectedTxid = expectedTxid;
this.actualTxid = actualTxid;
}
public long getExpectedTxid() {
return expectedTxid;
}
public long getActualTxid() {
return actualTxid;
}
@Override
public String toString() {
return "We expected the next batch of events to start with transaction ID "
+ expectedTxid + ", but it instead started with transaction ID " +
actualTxid + ". Most likely the intervening transactions were cleaned "
+ "up as part of checkpointing.";
}
}
| 1,822 | 32.145455 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataInputStream.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.client;
import java.io.InputStream;
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.crypto.CryptoInputStream;
import org.apache.hadoop.hdfs.DFSInputStream;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import com.google.common.base.Preconditions;
/**
* The Hdfs implementation of {@link FSDataInputStream}.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class HdfsDataInputStream extends FSDataInputStream {
public HdfsDataInputStream(DFSInputStream in) throws IOException {
super(in);
}
public HdfsDataInputStream(CryptoInputStream in) throws IOException {
super(in);
Preconditions.checkArgument(in.getWrappedStream() instanceof DFSInputStream,
"CryptoInputStream should wrap a DFSInputStream");
}
private DFSInputStream getDFSInputStream() {
if (in instanceof CryptoInputStream) {
return (DFSInputStream) ((CryptoInputStream) in).getWrappedStream();
}
return (DFSInputStream) in;
}
/**
* Get a reference to the wrapped output stream. We always want to return the
* actual underlying InputStream, even when we're using a CryptoStream. e.g.
* in the delegated methods below.
*
* @return the underlying output stream
*/
public InputStream getWrappedStream() {
return in;
}
/**
* Get the datanode from which the stream is currently reading.
*/
public DatanodeInfo getCurrentDatanode() {
return getDFSInputStream().getCurrentDatanode();
}
/**
* Get the block containing the target position.
*/
public ExtendedBlock getCurrentBlock() {
return getDFSInputStream().getCurrentBlock();
}
/**
* Get the collection of blocks that has already been located.
*/
public List<LocatedBlock> getAllBlocks() throws IOException {
return getDFSInputStream().getAllBlocks();
}
/**
* Get the visible length of the file. It will include the length of the last
* block even if that is in UnderConstruction state.
*
* @return The visible length of the file.
*/
public long getVisibleLength() throws IOException {
return getDFSInputStream().getFileLength();
}
/**
* Get statistics about the reads which this DFSInputStream has done.
* Note that because HdfsDataInputStream is buffered, these stats may
* be higher than you would expect just by adding up the number of
* bytes read through HdfsDataInputStream.
*/
public DFSInputStream.ReadStatistics getReadStatistics() {
return getDFSInputStream().getReadStatistics();
}
public void clearReadStatistics() {
getDFSInputStream().clearReadStatistics();
}
}
| 3,745 | 31.859649 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.client;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.URI;
import java.util.EnumSet;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CacheFlag;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.DFSInotifyEventInputStream;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.hdfs.tools.DFSAdmin;
/**
* The public API for performing administrative functions on HDFS. Those writing
* applications against HDFS should prefer this interface to directly accessing
* functionality in DistributedFileSystem or DFSClient.
*
* Note that this is distinct from the similarly-named {@link DFSAdmin}, which
* is a class that provides the functionality for the CLI `hdfs dfsadmin ...'
* commands.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class HdfsAdmin {
private DistributedFileSystem dfs;
/**
* Create a new HdfsAdmin client.
*
* @param uri the unique URI of the HDFS file system to administer
* @param conf configuration
* @throws IOException in the event the file system could not be created
*/
public HdfsAdmin(URI uri, Configuration conf) throws IOException {
FileSystem fs = FileSystem.get(uri, conf);
if (!(fs instanceof DistributedFileSystem)) {
throw new IllegalArgumentException("'" + uri + "' is not an HDFS URI.");
} else {
dfs = (DistributedFileSystem)fs;
}
}
/**
* Set the namespace quota (count of files, directories, and sym links) for a
* directory.
*
* @param src the path to set the quota for
* @param quota the value to set for the quota
* @throws IOException in the event of error
*/
public void setQuota(Path src, long quota) throws IOException {
dfs.setQuota(src, quota, HdfsConstants.QUOTA_DONT_SET);
}
/**
* Clear the namespace quota (count of files, directories and sym links) for a
* directory.
*
* @param src the path to clear the quota of
* @throws IOException in the event of error
*/
public void clearQuota(Path src) throws IOException {
dfs.setQuota(src, HdfsConstants.QUOTA_RESET, HdfsConstants.QUOTA_DONT_SET);
}
/**
* Set the storage space quota (size of files) for a directory. Note that
* directories and sym links do not occupy storage space.
*
* @param src the path to set the space quota of
* @param spaceQuota the value to set for the space quota
* @throws IOException in the event of error
*/
public void setSpaceQuota(Path src, long spaceQuota) throws IOException {
dfs.setQuota(src, HdfsConstants.QUOTA_DONT_SET, spaceQuota);
}
/**
* Clear the storage space quota (size of files) for a directory. Note that
* directories and sym links do not occupy storage space.
*
* @param src the path to clear the space quota of
* @throws IOException in the event of error
*/
public void clearSpaceQuota(Path src) throws IOException {
dfs.setQuota(src, HdfsConstants.QUOTA_DONT_SET, HdfsConstants.QUOTA_RESET);
}
/**
* Set the quota by storage type for a directory. Note that
* directories and sym links do not occupy storage type quota.
*
* @param src the target directory to set the quota by storage type
* @param type the storage type to set for quota by storage type
* @param quota the value to set for quota by storage type
* @throws IOException in the event of error
*/
public void setQuotaByStorageType(Path src, StorageType type, long quota)
throws IOException {
dfs.setQuotaByStorageType(src, type, quota);
}
/**
* Clear the space quota by storage type for a directory. Note that
* directories and sym links do not occupy storage type quota.
*
* @param src the target directory to clear the quota by storage type
* @param type the storage type to clear for quota by storage type
* @throws IOException in the event of error
*/
public void clearQuotaByStorageType(Path src, StorageType type) throws IOException {
dfs.setQuotaByStorageType(src, type, HdfsConstants.QUOTA_RESET);
}
/**
* Allow snapshot on a directory.
* @param path The path of the directory where snapshots will be taken.
*/
public void allowSnapshot(Path path) throws IOException {
dfs.allowSnapshot(path);
}
/**
* Disallow snapshot on a directory.
* @param path The path of the snapshottable directory.
*/
public void disallowSnapshot(Path path) throws IOException {
dfs.disallowSnapshot(path);
}
/**
* Add a new CacheDirectiveInfo.
*
* @param info Information about a directive to add.
* @param flags {@link CacheFlag}s to use for this operation.
* @return the ID of the directive that was created.
* @throws IOException if the directive could not be added
*/
public long addCacheDirective(CacheDirectiveInfo info,
EnumSet<CacheFlag> flags) throws IOException {
return dfs.addCacheDirective(info, flags);
}
/**
* Modify a CacheDirective.
*
* @param info Information about the directive to modify. You must set the ID
* to indicate which CacheDirective you want to modify.
* @param flags {@link CacheFlag}s to use for this operation.
* @throws IOException if the directive could not be modified
*/
public void modifyCacheDirective(CacheDirectiveInfo info,
EnumSet<CacheFlag> flags) throws IOException {
dfs.modifyCacheDirective(info, flags);
}
/**
* Remove a CacheDirective.
*
* @param id identifier of the CacheDirectiveInfo to remove
* @throws IOException if the directive could not be removed
*/
public void removeCacheDirective(long id)
throws IOException {
dfs.removeCacheDirective(id);
}
/**
* List cache directives. Incrementally fetches results from the server.
*
* @param filter Filter parameters to use when listing the directives, null to
* list all directives visible to us.
* @return A RemoteIterator which returns CacheDirectiveInfo objects.
*/
public RemoteIterator<CacheDirectiveEntry> listCacheDirectives(
CacheDirectiveInfo filter) throws IOException {
return dfs.listCacheDirectives(filter);
}
/**
* Add a cache pool.
*
* @param info
* The request to add a cache pool.
* @throws IOException
* If the request could not be completed.
*/
public void addCachePool(CachePoolInfo info) throws IOException {
dfs.addCachePool(info);
}
/**
* Modify an existing cache pool.
*
* @param info
* The request to modify a cache pool.
* @throws IOException
* If the request could not be completed.
*/
public void modifyCachePool(CachePoolInfo info) throws IOException {
dfs.modifyCachePool(info);
}
/**
* Remove a cache pool.
*
* @param poolName
* Name of the cache pool to remove.
* @throws IOException
* if the cache pool did not exist, or could not be removed.
*/
public void removeCachePool(String poolName) throws IOException {
dfs.removeCachePool(poolName);
}
/**
* List all cache pools.
*
* @return A remote iterator from which you can get CachePoolEntry objects.
* Requests will be made as needed.
* @throws IOException
* If there was an error listing cache pools.
*/
public RemoteIterator<CachePoolEntry> listCachePools() throws IOException {
return dfs.listCachePools();
}
/**
* Create an encryption zone rooted at an empty existing directory, using the
* specified encryption key. An encryption zone has an associated encryption
* key used when reading and writing files within the zone.
*
* @param path The path of the root of the encryption zone. Must refer to
* an empty, existing directory.
* @param keyName Name of key available at the KeyProvider.
* @throws IOException if there was a general IO exception
* @throws AccessControlException if the caller does not have access to path
* @throws FileNotFoundException if the path does not exist
*/
public void createEncryptionZone(Path path, String keyName)
throws IOException, AccessControlException, FileNotFoundException {
dfs.createEncryptionZone(path, keyName);
}
/**
* Get the path of the encryption zone for a given file or directory.
*
* @param path The path to get the ez for.
*
* @return The EncryptionZone of the ez, or null if path is not in an ez.
* @throws IOException if there was a general IO exception
* @throws AccessControlException if the caller does not have access to path
* @throws FileNotFoundException if the path does not exist
*/
public EncryptionZone getEncryptionZoneForPath(Path path)
throws IOException, AccessControlException, FileNotFoundException {
return dfs.getEZForPath(path);
}
/**
* Returns a RemoteIterator which can be used to list the encryption zones
* in HDFS. For large numbers of encryption zones, the iterator will fetch
* the list of zones in a number of small batches.
* <p/>
* Since the list is fetched in batches, it does not represent a
* consistent snapshot of the entire list of encryption zones.
* <p/>
* This method can only be called by HDFS superusers.
*/
public RemoteIterator<EncryptionZone> listEncryptionZones()
throws IOException {
return dfs.listEncryptionZones();
}
/**
* Exposes a stream of namesystem events. Only events occurring after the
* stream is created are available.
* See {@link org.apache.hadoop.hdfs.DFSInotifyEventInputStream}
* for information on stream usage.
* See {@link org.apache.hadoop.hdfs.inotify.Event}
* for information on the available events.
* <p/>
* Inotify users may want to tune the following HDFS parameters to
* ensure that enough extra HDFS edits are saved to support inotify clients
* that fall behind the current state of the namespace while reading events.
* The default parameter values should generally be reasonable. If edits are
* deleted before their corresponding events can be read, clients will see a
* {@link org.apache.hadoop.hdfs.inotify.MissingEventsException} on
* {@link org.apache.hadoop.hdfs.DFSInotifyEventInputStream} method calls.
*
* It should generally be sufficient to tune these parameters:
* dfs.namenode.num.extra.edits.retained
* dfs.namenode.max.extra.edits.segments.retained
*
* Parameters that affect the number of created segments and the number of
* edits that are considered necessary, i.e. do not count towards the
* dfs.namenode.num.extra.edits.retained quota):
* dfs.namenode.checkpoint.period
* dfs.namenode.checkpoint.txns
* dfs.namenode.num.checkpoints.retained
* dfs.ha.log-roll.period
* <p/>
* It is recommended that local journaling be configured
* (dfs.namenode.edits.dir) for inotify (in addition to a shared journal)
* so that edit transfers from the shared journal can be avoided.
*
* @throws IOException If there was an error obtaining the stream.
*/
public DFSInotifyEventInputStream getInotifyEventStream() throws IOException {
return dfs.getInotifyEventStream();
}
/**
* A version of {@link HdfsAdmin#getInotifyEventStream()} meant for advanced
* users who are aware of HDFS edits up to lastReadTxid (e.g. because they
* have access to an FSImage inclusive of lastReadTxid) and only want to read
* events after this point.
*/
public DFSInotifyEventInputStream getInotifyEventStream(long lastReadTxid)
throws IOException {
return dfs.getInotifyEventStream(lastReadTxid);
}
/**
* Set the source path to the specified storage policy.
*
* @param src The source path referring to either a directory or a file.
* @param policyName The name of the storage policy.
*/
public void setStoragePolicy(final Path src, final String policyName)
throws IOException {
dfs.setStoragePolicy(src, policyName);
}
}
| 13,556 | 35.940054 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/BlockReportOptions.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.client;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Options that can be specified when manually triggering a block report.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public final class BlockReportOptions {
private final boolean incremental;
private BlockReportOptions(boolean incremental) {
this.incremental = incremental;
}
public boolean isIncremental() {
return incremental;
}
public static class Factory {
private boolean incremental = false;
public Factory() {
}
public Factory setIncremental(boolean incremental) {
this.incremental = incremental;
return this;
}
public BlockReportOptions build() {
return new BlockReportOptions(incremental);
}
}
@Override
public String toString() {
return "BlockReportOptions{incremental=" + incremental + "}";
}
}
| 1,778 | 28.65 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataOutputStream.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.client;
import java.io.IOException;
import java.io.OutputStream;
import java.util.EnumSet;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.crypto.CryptoOutputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.DFSOutputStream;
import com.google.common.base.Preconditions;
/**
* The Hdfs implementation of {@link FSDataOutputStream}.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class HdfsDataOutputStream extends FSDataOutputStream {
public HdfsDataOutputStream(DFSOutputStream out, FileSystem.Statistics stats,
long startPosition) throws IOException {
super(out, stats, startPosition);
}
public HdfsDataOutputStream(DFSOutputStream out, FileSystem.Statistics stats
) throws IOException {
this(out, stats, 0L);
}
public HdfsDataOutputStream(CryptoOutputStream out, FileSystem.Statistics stats,
long startPosition) throws IOException {
super(out, stats, startPosition);
Preconditions.checkArgument(out.getWrappedStream() instanceof DFSOutputStream,
"CryptoOutputStream should wrap a DFSOutputStream");
}
public HdfsDataOutputStream(CryptoOutputStream out, FileSystem.Statistics stats)
throws IOException {
this(out, stats, 0L);
}
/**
* Get the actual number of replicas of the current block.
*
* This can be different from the designated replication factor of the file
* because the namenode does not maintain replication for the blocks which are
* currently being written to. Depending on the configuration, the client may
* continue to write to a block even if a few datanodes in the write pipeline
* have failed, or the client may add a new datanodes once a datanode has
* failed.
*
* @return the number of valid replicas of the current block
*/
public synchronized int getCurrentBlockReplication() throws IOException {
OutputStream wrappedStream = getWrappedStream();
if (wrappedStream instanceof CryptoOutputStream) {
wrappedStream = ((CryptoOutputStream) wrappedStream).getWrappedStream();
}
return ((DFSOutputStream) wrappedStream).getCurrentBlockReplication();
}
/**
* Sync buffered data to DataNodes (flush to disk devices).
*
* @param syncFlags
* Indicate the detailed semantic and actions of the hsync.
* @throws IOException
* @see FSDataOutputStream#hsync()
*/
public void hsync(EnumSet<SyncFlag> syncFlags) throws IOException {
OutputStream wrappedStream = getWrappedStream();
if (wrappedStream instanceof CryptoOutputStream) {
((CryptoOutputStream) wrappedStream).flush();
wrappedStream = ((CryptoOutputStream) wrappedStream).getWrappedStream();
}
((DFSOutputStream) wrappedStream).hsync(syncFlags);
}
public static enum SyncFlag {
/**
* When doing sync to DataNodes, also update the metadata (block length) in
* the NameNode.
*/
UPDATE_LENGTH,
/**
* Sync the data to DataNode, close the current block, and allocate a new
* block
*/
END_BLOCK;
}
}
| 4,053 | 34.876106 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsUtils.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.client;
import java.io.IOException;
import java.net.URI;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.io.IOUtils;
/**
* The public utility API for HDFS.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class HdfsUtils {
private static final Log LOG = LogFactory.getLog(HdfsUtils.class);
/**
* Is the HDFS healthy?
* HDFS is considered as healthy if it is up and not in safemode.
*
* @param uri the HDFS URI. Note that the URI path is ignored.
* @return true if HDFS is healthy; false, otherwise.
*/
public static boolean isHealthy(URI uri) {
//check scheme
final String scheme = uri.getScheme();
if (!HdfsConstants.HDFS_URI_SCHEME.equalsIgnoreCase(scheme)) {
throw new IllegalArgumentException("The scheme is not "
+ HdfsConstants.HDFS_URI_SCHEME + ", uri=" + uri);
}
final Configuration conf = new Configuration();
//disable FileSystem cache
conf.setBoolean(String.format("fs.%s.impl.disable.cache", scheme), true);
//disable client retry for rpc connection and rpc calls
conf.setBoolean(HdfsClientConfigKeys.Retry.POLICY_ENABLED_KEY, false);
conf.setInt(
CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0);
DistributedFileSystem fs = null;
try {
fs = (DistributedFileSystem)FileSystem.get(uri, conf);
final boolean safemode = fs.setSafeMode(SafeModeAction.SAFEMODE_GET);
if (LOG.isDebugEnabled()) {
LOG.debug("Is namenode in safemode? " + safemode + "; uri=" + uri);
}
fs.close();
fs = null;
return !safemode;
} catch(IOException e) {
if (LOG.isDebugEnabled()) {
LOG.debug("Got an exception for uri=" + uri, e);
}
return false;
} finally {
IOUtils.cleanup(LOG, fs);
}
}
}
| 3,156 | 35.287356 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.client.impl;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHED_CONN_RETRY_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHED_CONN_RETRY_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.Options.ChecksumOpt;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.BlockReaderFactory;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.util.ByteArrayManager;
import org.apache.hadoop.ipc.Client;
import org.apache.hadoop.util.DataChecksum;
import com.google.common.annotations.VisibleForTesting;
/**
* DFSClient configuration
*/
public class DfsClientConf {
private final int hdfsTimeout; // timeout value for a DFS operation.
private final int maxFailoverAttempts;
private final int maxRetryAttempts;
private final int failoverSleepBaseMillis;
private final int failoverSleepMaxMillis;
private final int maxBlockAcquireFailures;
private final int datanodeSocketWriteTimeout;
private final int ioBufferSize;
private final ChecksumOpt defaultChecksumOpt;
private final int writePacketSize;
private final int writeMaxPackets;
private final ByteArrayManager.Conf writeByteArrayManagerConf;
private final int socketTimeout;
private final long excludedNodesCacheExpiry;
/** Wait time window (in msec) if BlockMissingException is caught */
private final int timeWindow;
private final int numCachedConnRetry;
private final int numBlockWriteRetry;
private final int numBlockWriteLocateFollowingRetry;
private final int blockWriteLocateFollowingInitialDelayMs;
private final long defaultBlockSize;
private final long prefetchSize;
private final short defaultReplication;
private final String taskId;
private final FsPermission uMask;
private final boolean connectToDnViaHostname;
private final boolean hdfsBlocksMetadataEnabled;
private final int fileBlockStorageLocationsNumThreads;
private final int fileBlockStorageLocationsTimeoutMs;
private final int retryTimesForGetLastBlockLength;
private final int retryIntervalForGetLastBlockLength;
private final long datanodeRestartTimeout;
private final long slowIoWarningThresholdMs;
private final ShortCircuitConf shortCircuitConf;
private final long hedgedReadThresholdMillis;
private final int hedgedReadThreadpoolSize;
public DfsClientConf(Configuration conf) {
// The hdfsTimeout is currently the same as the ipc timeout
hdfsTimeout = Client.getTimeout(conf);
maxRetryAttempts = conf.getInt(
HdfsClientConfigKeys.Retry.MAX_ATTEMPTS_KEY,
HdfsClientConfigKeys.Retry.MAX_ATTEMPTS_DEFAULT);
timeWindow = conf.getInt(
HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY,
HdfsClientConfigKeys.Retry.WINDOW_BASE_DEFAULT);
retryTimesForGetLastBlockLength = conf.getInt(
HdfsClientConfigKeys.Retry.TIMES_GET_LAST_BLOCK_LENGTH_KEY,
HdfsClientConfigKeys.Retry.TIMES_GET_LAST_BLOCK_LENGTH_DEFAULT);
retryIntervalForGetLastBlockLength = conf.getInt(
HdfsClientConfigKeys.Retry.INTERVAL_GET_LAST_BLOCK_LENGTH_KEY,
HdfsClientConfigKeys.Retry.INTERVAL_GET_LAST_BLOCK_LENGTH_DEFAULT);
maxFailoverAttempts = conf.getInt(
HdfsClientConfigKeys.Failover.MAX_ATTEMPTS_KEY,
HdfsClientConfigKeys.Failover.MAX_ATTEMPTS_DEFAULT);
failoverSleepBaseMillis = conf.getInt(
HdfsClientConfigKeys.Failover.SLEEPTIME_BASE_KEY,
HdfsClientConfigKeys.Failover.SLEEPTIME_BASE_DEFAULT);
failoverSleepMaxMillis = conf.getInt(
HdfsClientConfigKeys.Failover.SLEEPTIME_MAX_KEY,
HdfsClientConfigKeys.Failover.SLEEPTIME_MAX_DEFAULT);
maxBlockAcquireFailures = conf.getInt(
DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY,
DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT);
datanodeSocketWriteTimeout = conf.getInt(DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY,
HdfsServerConstants.WRITE_TIMEOUT);
ioBufferSize = conf.getInt(
CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY,
CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT);
defaultChecksumOpt = getChecksumOptFromConf(conf);
socketTimeout = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY,
HdfsServerConstants.READ_TIMEOUT);
/** dfs.write.packet.size is an internal config variable */
writePacketSize = conf.getInt(
DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY,
DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT);
writeMaxPackets = conf.getInt(
HdfsClientConfigKeys.Write.MAX_PACKETS_IN_FLIGHT_KEY,
HdfsClientConfigKeys.Write.MAX_PACKETS_IN_FLIGHT_DEFAULT);
final boolean byteArrayManagerEnabled = conf.getBoolean(
HdfsClientConfigKeys.Write.ByteArrayManager.ENABLED_KEY,
HdfsClientConfigKeys.Write.ByteArrayManager.ENABLED_DEFAULT);
if (!byteArrayManagerEnabled) {
writeByteArrayManagerConf = null;
} else {
final int countThreshold = conf.getInt(
HdfsClientConfigKeys.Write.ByteArrayManager.COUNT_THRESHOLD_KEY,
HdfsClientConfigKeys.Write.ByteArrayManager.COUNT_THRESHOLD_DEFAULT);
final int countLimit = conf.getInt(
HdfsClientConfigKeys.Write.ByteArrayManager.COUNT_LIMIT_KEY,
HdfsClientConfigKeys.Write.ByteArrayManager.COUNT_LIMIT_DEFAULT);
final long countResetTimePeriodMs = conf.getLong(
HdfsClientConfigKeys.Write.ByteArrayManager.COUNT_RESET_TIME_PERIOD_MS_KEY,
HdfsClientConfigKeys.Write.ByteArrayManager.COUNT_RESET_TIME_PERIOD_MS_DEFAULT);
writeByteArrayManagerConf = new ByteArrayManager.Conf(
countThreshold, countLimit, countResetTimePeriodMs);
}
defaultBlockSize = conf.getLongBytes(DFS_BLOCK_SIZE_KEY,
DFS_BLOCK_SIZE_DEFAULT);
defaultReplication = (short) conf.getInt(
DFS_REPLICATION_KEY, DFS_REPLICATION_DEFAULT);
taskId = conf.get("mapreduce.task.attempt.id", "NONMAPREDUCE");
excludedNodesCacheExpiry = conf.getLong(
HdfsClientConfigKeys.Write.EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL_KEY,
HdfsClientConfigKeys.Write.EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL_DEFAULT);
prefetchSize = conf.getLong(HdfsClientConfigKeys.Read.PREFETCH_SIZE_KEY,
10 * defaultBlockSize);
numCachedConnRetry = conf.getInt(DFS_CLIENT_CACHED_CONN_RETRY_KEY,
DFS_CLIENT_CACHED_CONN_RETRY_DEFAULT);
numBlockWriteRetry = conf.getInt(
HdfsClientConfigKeys.BlockWrite.RETRIES_KEY,
HdfsClientConfigKeys.BlockWrite.RETRIES_DEFAULT);
numBlockWriteLocateFollowingRetry = conf.getInt(
HdfsClientConfigKeys.BlockWrite.LOCATEFOLLOWINGBLOCK_RETRIES_KEY,
HdfsClientConfigKeys.BlockWrite.LOCATEFOLLOWINGBLOCK_RETRIES_DEFAULT);
blockWriteLocateFollowingInitialDelayMs = conf.getInt(
HdfsClientConfigKeys.BlockWrite.LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_MS_KEY,
HdfsClientConfigKeys.BlockWrite.LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_MS_DEFAULT);
uMask = FsPermission.getUMask(conf);
connectToDnViaHostname = conf.getBoolean(DFS_CLIENT_USE_DN_HOSTNAME,
DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT);
hdfsBlocksMetadataEnabled = conf.getBoolean(
DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED,
DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED_DEFAULT);
fileBlockStorageLocationsNumThreads = conf.getInt(
DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_NUM_THREADS,
DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_NUM_THREADS_DEFAULT);
fileBlockStorageLocationsTimeoutMs = conf.getInt(
DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS,
DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS_DEFAULT);
datanodeRestartTimeout = conf.getLong(
DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY,
DFS_CLIENT_DATANODE_RESTART_TIMEOUT_DEFAULT) * 1000;
slowIoWarningThresholdMs = conf.getLong(
DFSConfigKeys.DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_KEY,
DFSConfigKeys.DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_DEFAULT);
shortCircuitConf = new ShortCircuitConf(conf);
hedgedReadThresholdMillis = conf.getLong(
HdfsClientConfigKeys.HedgedRead.THRESHOLD_MILLIS_KEY,
HdfsClientConfigKeys.HedgedRead.THRESHOLD_MILLIS_DEFAULT);
hedgedReadThreadpoolSize = conf.getInt(
HdfsClientConfigKeys.HedgedRead.THREADPOOL_SIZE_KEY,
HdfsClientConfigKeys.HedgedRead.THREADPOOL_SIZE_DEFAULT);
}
private DataChecksum.Type getChecksumType(Configuration conf) {
final String checksum = conf.get(
DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY,
DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT);
try {
return DataChecksum.Type.valueOf(checksum);
} catch(IllegalArgumentException iae) {
DFSClient.LOG.warn("Bad checksum type: " + checksum + ". Using default "
+ DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT);
return DataChecksum.Type.valueOf(
DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT);
}
}
// Construct a checksum option from conf
private ChecksumOpt getChecksumOptFromConf(Configuration conf) {
DataChecksum.Type type = getChecksumType(conf);
int bytesPerChecksum = conf.getInt(DFS_BYTES_PER_CHECKSUM_KEY,
DFS_BYTES_PER_CHECKSUM_DEFAULT);
return new ChecksumOpt(type, bytesPerChecksum);
}
/** create a DataChecksum with the given option. */
public DataChecksum createChecksum(ChecksumOpt userOpt) {
// Fill in any missing field with the default.
ChecksumOpt opt = ChecksumOpt.processChecksumOpt(
defaultChecksumOpt, userOpt);
DataChecksum dataChecksum = DataChecksum.newDataChecksum(
opt.getChecksumType(),
opt.getBytesPerChecksum());
if (dataChecksum == null) {
throw new HadoopIllegalArgumentException("Invalid checksum type: userOpt="
+ userOpt + ", default=" + defaultChecksumOpt
+ ", effective=null");
}
return dataChecksum;
}
@VisibleForTesting
public int getBlockWriteLocateFollowingInitialDelayMs() {
return blockWriteLocateFollowingInitialDelayMs;
}
/**
* @return the hdfsTimeout
*/
public int getHdfsTimeout() {
return hdfsTimeout;
}
/**
* @return the maxFailoverAttempts
*/
public int getMaxFailoverAttempts() {
return maxFailoverAttempts;
}
/**
* @return the maxRetryAttempts
*/
public int getMaxRetryAttempts() {
return maxRetryAttempts;
}
/**
* @return the failoverSleepBaseMillis
*/
public int getFailoverSleepBaseMillis() {
return failoverSleepBaseMillis;
}
/**
* @return the failoverSleepMaxMillis
*/
public int getFailoverSleepMaxMillis() {
return failoverSleepMaxMillis;
}
/**
* @return the maxBlockAcquireFailures
*/
public int getMaxBlockAcquireFailures() {
return maxBlockAcquireFailures;
}
/**
* @return the datanodeSocketWriteTimeout
*/
public int getDatanodeSocketWriteTimeout() {
return datanodeSocketWriteTimeout;
}
/**
* @return the ioBufferSize
*/
public int getIoBufferSize() {
return ioBufferSize;
}
/**
* @return the defaultChecksumOpt
*/
public ChecksumOpt getDefaultChecksumOpt() {
return defaultChecksumOpt;
}
/**
* @return the writePacketSize
*/
public int getWritePacketSize() {
return writePacketSize;
}
/**
* @return the writeMaxPackets
*/
public int getWriteMaxPackets() {
return writeMaxPackets;
}
/**
* @return the writeByteArrayManagerConf
*/
public ByteArrayManager.Conf getWriteByteArrayManagerConf() {
return writeByteArrayManagerConf;
}
/**
* @return the socketTimeout
*/
public int getSocketTimeout() {
return socketTimeout;
}
/**
* @return the excludedNodesCacheExpiry
*/
public long getExcludedNodesCacheExpiry() {
return excludedNodesCacheExpiry;
}
/**
* @return the timeWindow
*/
public int getTimeWindow() {
return timeWindow;
}
/**
* @return the numCachedConnRetry
*/
public int getNumCachedConnRetry() {
return numCachedConnRetry;
}
/**
* @return the numBlockWriteRetry
*/
public int getNumBlockWriteRetry() {
return numBlockWriteRetry;
}
/**
* @return the numBlockWriteLocateFollowingRetry
*/
public int getNumBlockWriteLocateFollowingRetry() {
return numBlockWriteLocateFollowingRetry;
}
/**
* @return the defaultBlockSize
*/
public long getDefaultBlockSize() {
return defaultBlockSize;
}
/**
* @return the prefetchSize
*/
public long getPrefetchSize() {
return prefetchSize;
}
/**
* @return the defaultReplication
*/
public short getDefaultReplication() {
return defaultReplication;
}
/**
* @return the taskId
*/
public String getTaskId() {
return taskId;
}
/**
* @return the uMask
*/
public FsPermission getUMask() {
return uMask;
}
/**
* @return the connectToDnViaHostname
*/
public boolean isConnectToDnViaHostname() {
return connectToDnViaHostname;
}
/**
* @return the hdfsBlocksMetadataEnabled
*/
public boolean isHdfsBlocksMetadataEnabled() {
return hdfsBlocksMetadataEnabled;
}
/**
* @return the fileBlockStorageLocationsNumThreads
*/
public int getFileBlockStorageLocationsNumThreads() {
return fileBlockStorageLocationsNumThreads;
}
/**
* @return the getFileBlockStorageLocationsTimeoutMs
*/
public int getFileBlockStorageLocationsTimeoutMs() {
return fileBlockStorageLocationsTimeoutMs;
}
/**
* @return the retryTimesForGetLastBlockLength
*/
public int getRetryTimesForGetLastBlockLength() {
return retryTimesForGetLastBlockLength;
}
/**
* @return the retryIntervalForGetLastBlockLength
*/
public int getRetryIntervalForGetLastBlockLength() {
return retryIntervalForGetLastBlockLength;
}
/**
* @return the datanodeRestartTimeout
*/
public long getDatanodeRestartTimeout() {
return datanodeRestartTimeout;
}
/**
* @return the slowIoWarningThresholdMs
*/
public long getSlowIoWarningThresholdMs() {
return slowIoWarningThresholdMs;
}
/**
* @return the hedgedReadThresholdMillis
*/
public long getHedgedReadThresholdMillis() {
return hedgedReadThresholdMillis;
}
/**
* @return the hedgedReadThreadpoolSize
*/
public int getHedgedReadThreadpoolSize() {
return hedgedReadThreadpoolSize;
}
/**
* @return the shortCircuitConf
*/
public ShortCircuitConf getShortCircuitConf() {
return shortCircuitConf;
}
public static class ShortCircuitConf {
private static final Log LOG = LogFactory.getLog(ShortCircuitConf.class);
private final int socketCacheCapacity;
private final long socketCacheExpiry;
private final boolean useLegacyBlockReader;
private final boolean useLegacyBlockReaderLocal;
private final String domainSocketPath;
private final boolean skipShortCircuitChecksums;
private final int shortCircuitBufferSize;
private final boolean shortCircuitLocalReads;
private final boolean domainSocketDataTraffic;
private final int shortCircuitStreamsCacheSize;
private final long shortCircuitStreamsCacheExpiryMs;
private final int shortCircuitSharedMemoryWatcherInterruptCheckMs;
private final boolean shortCircuitMmapEnabled;
private final int shortCircuitMmapCacheSize;
private final long shortCircuitMmapCacheExpiryMs;
private final long shortCircuitMmapCacheRetryTimeout;
private final long shortCircuitCacheStaleThresholdMs;
private final long keyProviderCacheExpiryMs;
@VisibleForTesting
public BlockReaderFactory.FailureInjector brfFailureInjector =
new BlockReaderFactory.FailureInjector();
public ShortCircuitConf(Configuration conf) {
socketCacheCapacity = conf.getInt(
DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY,
DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT);
socketCacheExpiry = conf.getLong(
DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY,
DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_DEFAULT);
useLegacyBlockReader = conf.getBoolean(
DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER,
DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER_DEFAULT);
useLegacyBlockReaderLocal = conf.getBoolean(
DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL,
DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL_DEFAULT);
shortCircuitLocalReads = conf.getBoolean(
HdfsClientConfigKeys.Read.ShortCircuit.KEY,
HdfsClientConfigKeys.Read.ShortCircuit.DEFAULT);
domainSocketDataTraffic = conf.getBoolean(
DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC,
DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC_DEFAULT);
domainSocketPath = conf.getTrimmed(
DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_DEFAULT);
if (LOG.isDebugEnabled()) {
LOG.debug(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL
+ " = " + useLegacyBlockReaderLocal);
LOG.debug(HdfsClientConfigKeys.Read.ShortCircuit.KEY
+ " = " + shortCircuitLocalReads);
LOG.debug(DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC
+ " = " + domainSocketDataTraffic);
LOG.debug(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY
+ " = " + domainSocketPath);
}
skipShortCircuitChecksums = conf.getBoolean(
HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_KEY,
HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_DEFAULT);
shortCircuitBufferSize = conf.getInt(
HdfsClientConfigKeys.Read.ShortCircuit.BUFFER_SIZE_KEY,
HdfsClientConfigKeys.Read.ShortCircuit.BUFFER_SIZE_DEFAULT);
shortCircuitStreamsCacheSize = conf.getInt(
HdfsClientConfigKeys.Read.ShortCircuit.STREAMS_CACHE_SIZE_KEY,
HdfsClientConfigKeys.Read.ShortCircuit.STREAMS_CACHE_SIZE_DEFAULT);
shortCircuitStreamsCacheExpiryMs = conf.getLong(
HdfsClientConfigKeys.Read.ShortCircuit.STREAMS_CACHE_EXPIRY_MS_KEY,
HdfsClientConfigKeys.Read.ShortCircuit.STREAMS_CACHE_EXPIRY_MS_DEFAULT);
shortCircuitMmapEnabled = conf.getBoolean(
HdfsClientConfigKeys.Mmap.ENABLED_KEY,
HdfsClientConfigKeys.Mmap.ENABLED_DEFAULT);
shortCircuitMmapCacheSize = conf.getInt(
HdfsClientConfigKeys.Mmap.CACHE_SIZE_KEY,
HdfsClientConfigKeys.Mmap.CACHE_SIZE_DEFAULT);
shortCircuitMmapCacheExpiryMs = conf.getLong(
HdfsClientConfigKeys.Mmap.CACHE_TIMEOUT_MS_KEY,
HdfsClientConfigKeys.Mmap.CACHE_TIMEOUT_MS_DEFAULT);
shortCircuitMmapCacheRetryTimeout = conf.getLong(
HdfsClientConfigKeys.Mmap.RETRY_TIMEOUT_MS_KEY,
HdfsClientConfigKeys.Mmap.RETRY_TIMEOUT_MS_DEFAULT);
shortCircuitCacheStaleThresholdMs = conf.getLong(
HdfsClientConfigKeys.ShortCircuit.REPLICA_STALE_THRESHOLD_MS_KEY,
HdfsClientConfigKeys.ShortCircuit.REPLICA_STALE_THRESHOLD_MS_DEFAULT);
shortCircuitSharedMemoryWatcherInterruptCheckMs = conf.getInt(
DFSConfigKeys.DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS,
DFSConfigKeys.DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS_DEFAULT);
keyProviderCacheExpiryMs = conf.getLong(
DFSConfigKeys.DFS_CLIENT_KEY_PROVIDER_CACHE_EXPIRY_MS,
DFSConfigKeys.DFS_CLIENT_KEY_PROVIDER_CACHE_EXPIRY_DEFAULT);
}
/**
* @return the socketCacheCapacity
*/
public int getSocketCacheCapacity() {
return socketCacheCapacity;
}
/**
* @return the socketCacheExpiry
*/
public long getSocketCacheExpiry() {
return socketCacheExpiry;
}
public boolean isUseLegacyBlockReaderLocal() {
return useLegacyBlockReaderLocal;
}
public String getDomainSocketPath() {
return domainSocketPath;
}
public boolean isShortCircuitLocalReads() {
return shortCircuitLocalReads;
}
public boolean isDomainSocketDataTraffic() {
return domainSocketDataTraffic;
}
/**
* @return the useLegacyBlockReader
*/
public boolean isUseLegacyBlockReader() {
return useLegacyBlockReader;
}
/**
* @return the skipShortCircuitChecksums
*/
public boolean isSkipShortCircuitChecksums() {
return skipShortCircuitChecksums;
}
/**
* @return the shortCircuitBufferSize
*/
public int getShortCircuitBufferSize() {
return shortCircuitBufferSize;
}
/**
* @return the shortCircuitStreamsCacheSize
*/
public int getShortCircuitStreamsCacheSize() {
return shortCircuitStreamsCacheSize;
}
/**
* @return the shortCircuitStreamsCacheExpiryMs
*/
public long getShortCircuitStreamsCacheExpiryMs() {
return shortCircuitStreamsCacheExpiryMs;
}
/**
* @return the shortCircuitSharedMemoryWatcherInterruptCheckMs
*/
public int getShortCircuitSharedMemoryWatcherInterruptCheckMs() {
return shortCircuitSharedMemoryWatcherInterruptCheckMs;
}
/**
* @return the shortCircuitMmapEnabled
*/
public boolean isShortCircuitMmapEnabled() {
return shortCircuitMmapEnabled;
}
/**
* @return the shortCircuitMmapCacheSize
*/
public int getShortCircuitMmapCacheSize() {
return shortCircuitMmapCacheSize;
}
/**
* @return the shortCircuitMmapCacheExpiryMs
*/
public long getShortCircuitMmapCacheExpiryMs() {
return shortCircuitMmapCacheExpiryMs;
}
/**
* @return the shortCircuitMmapCacheRetryTimeout
*/
public long getShortCircuitMmapCacheRetryTimeout() {
return shortCircuitMmapCacheRetryTimeout;
}
/**
* @return the shortCircuitCacheStaleThresholdMs
*/
public long getShortCircuitCacheStaleThresholdMs() {
return shortCircuitCacheStaleThresholdMs;
}
/**
* @return the keyProviderCacheExpiryMs
*/
public long getKeyProviderCacheExpiryMs() {
return keyProviderCacheExpiryMs;
}
public String confAsString() {
StringBuilder builder = new StringBuilder();
builder.append("shortCircuitStreamsCacheSize = ").
append(shortCircuitStreamsCacheSize).
append(", shortCircuitStreamsCacheExpiryMs = ").
append(shortCircuitStreamsCacheExpiryMs).
append(", shortCircuitMmapCacheSize = ").
append(shortCircuitMmapCacheSize).
append(", shortCircuitMmapCacheExpiryMs = ").
append(shortCircuitMmapCacheExpiryMs).
append(", shortCircuitMmapCacheRetryTimeout = ").
append(shortCircuitMmapCacheRetryTimeout).
append(", shortCircuitCacheStaleThresholdMs = ").
append(shortCircuitCacheStaleThresholdMs).
append(", socketCacheCapacity = ").
append(socketCacheCapacity).
append(", socketCacheExpiry = ").
append(socketCacheExpiry).
append(", shortCircuitLocalReads = ").
append(shortCircuitLocalReads).
append(", useLegacyBlockReaderLocal = ").
append(useLegacyBlockReaderLocal).
append(", domainSocketDataTraffic = ").
append(domainSocketDataTraffic).
append(", shortCircuitSharedMemoryWatcherInterruptCheckMs = ").
append(shortCircuitSharedMemoryWatcherInterruptCheckMs).
append(", keyProviderCacheExpiryMs = ").
append(keyProviderCacheExpiryMs);
return builder.toString();
}
}
}
| 26,389 | 34.327979 | 97 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/impl/LeaseRenewer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.client.impl;
import java.io.IOException;
import java.net.SocketTimeoutException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSOutputStream;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Daemon;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
import com.google.common.annotations.VisibleForTesting;
/**
* <p>
* Used by {@link org.apache.hadoop.hdfs.DFSClient} for renewing file-being-written leases
* on the namenode.
* When a file is opened for write (create or append),
* namenode stores a file lease for recording the identity of the writer.
* The writer (i.e. the DFSClient) is required to renew the lease periodically.
* When the lease is not renewed before it expires,
* the namenode considers the writer as failed and then it may either let
* another writer to obtain the lease or close the file.
* </p>
* <p>
* This class also provides the following functionality:
* <ul>
* <li>
* It maintains a map from (namenode, user) pairs to lease renewers.
* The same {@link LeaseRenewer} instance is used for renewing lease
* for all the {@link org.apache.hadoop.hdfs.DFSClient} to the same namenode and the same user.
* </li>
* <li>
* Each renewer maintains a list of {@link org.apache.hadoop.hdfs.DFSClient}.
* Periodically the leases for all the clients are renewed.
* A client is removed from the list when the client is closed.
* </li>
* <li>
* A thread per namenode per user is used by the {@link LeaseRenewer}
* to renew the leases.
* </li>
* </ul>
* </p>
*/
@InterfaceAudience.Private
public class LeaseRenewer {
static final Log LOG = LogFactory.getLog(LeaseRenewer.class);
static final long LEASE_RENEWER_GRACE_DEFAULT = 60*1000L;
static final long LEASE_RENEWER_SLEEP_DEFAULT = 1000L;
/** Get a {@link LeaseRenewer} instance */
public static LeaseRenewer getInstance(final String authority,
final UserGroupInformation ugi, final DFSClient dfsc) throws IOException {
final LeaseRenewer r = Factory.INSTANCE.get(authority, ugi);
r.addClient(dfsc);
return r;
}
/**
* A factory for sharing {@link LeaseRenewer} objects
* among {@link DFSClient} instances
* so that there is only one renewer per authority per user.
*/
private static class Factory {
private static final Factory INSTANCE = new Factory();
private static class Key {
/** Namenode info */
final String authority;
/** User info */
final UserGroupInformation ugi;
private Key(final String authority, final UserGroupInformation ugi) {
if (authority == null) {
throw new HadoopIllegalArgumentException("authority == null");
} else if (ugi == null) {
throw new HadoopIllegalArgumentException("ugi == null");
}
this.authority = authority;
this.ugi = ugi;
}
@Override
public int hashCode() {
return authority.hashCode() ^ ugi.hashCode();
}
@Override
public boolean equals(Object obj) {
if (obj == this) {
return true;
}
if (obj != null && obj instanceof Key) {
final Key that = (Key)obj;
return this.authority.equals(that.authority)
&& this.ugi.equals(that.ugi);
}
return false;
}
@Override
public String toString() {
return ugi.getShortUserName() + "@" + authority;
}
}
/** A map for per user per namenode renewers. */
private final Map<Key, LeaseRenewer> renewers = new HashMap<Key, LeaseRenewer>();
/** Get a renewer. */
private synchronized LeaseRenewer get(final String authority,
final UserGroupInformation ugi) {
final Key k = new Key(authority, ugi);
LeaseRenewer r = renewers.get(k);
if (r == null) {
r = new LeaseRenewer(k);
renewers.put(k, r);
}
return r;
}
/** Remove the given renewer. */
private synchronized void remove(final LeaseRenewer r) {
final LeaseRenewer stored = renewers.get(r.factorykey);
//Since a renewer may expire, the stored renewer can be different.
if (r == stored) {
if (!r.clientsRunning()) {
renewers.remove(r.factorykey);
}
}
}
}
/** The time in milliseconds that the map became empty. */
private long emptyTime = Long.MAX_VALUE;
/** A fixed lease renewal time period in milliseconds */
private long renewal = HdfsServerConstants.LEASE_SOFTLIMIT_PERIOD/2;
/** A daemon for renewing lease */
private Daemon daemon = null;
/** Only the daemon with currentId should run. */
private int currentId = 0;
/**
* A period in milliseconds that the lease renewer thread should run
* after the map became empty.
* In other words,
* if the map is empty for a time period longer than the grace period,
* the renewer should terminate.
*/
private long gracePeriod;
/**
* The time period in milliseconds
* that the renewer sleeps for each iteration.
*/
private long sleepPeriod;
private final Factory.Key factorykey;
/** A list of clients corresponding to this renewer. */
private final List<DFSClient> dfsclients = new ArrayList<DFSClient>();
/**
* A stringified stack trace of the call stack when the Lease Renewer
* was instantiated. This is only generated if trace-level logging is
* enabled on this class.
*/
private final String instantiationTrace;
private LeaseRenewer(Factory.Key factorykey) {
this.factorykey = factorykey;
unsyncSetGraceSleepPeriod(LEASE_RENEWER_GRACE_DEFAULT);
if (LOG.isTraceEnabled()) {
instantiationTrace = StringUtils.stringifyException(
new Throwable("TRACE"));
} else {
instantiationTrace = null;
}
}
/** @return the renewal time in milliseconds. */
private synchronized long getRenewalTime() {
return renewal;
}
/** Used for testing only. */
@VisibleForTesting
public synchronized void setRenewalTime(final long renewal) {
this.renewal = renewal;
}
/** Add a client. */
private synchronized void addClient(final DFSClient dfsc) {
for(DFSClient c : dfsclients) {
if (c == dfsc) {
//client already exists, nothing to do.
return;
}
}
//client not found, add it
dfsclients.add(dfsc);
//update renewal time
final int hdfsTimeout = dfsc.getConf().getHdfsTimeout();
if (hdfsTimeout > 0) {
final long half = hdfsTimeout/2;
if (half < renewal) {
this.renewal = half;
}
}
}
private synchronized boolean clientsRunning() {
for(Iterator<DFSClient> i = dfsclients.iterator(); i.hasNext(); ) {
if (!i.next().isClientRunning()) {
i.remove();
}
}
return !dfsclients.isEmpty();
}
private synchronized long getSleepPeriod() {
return sleepPeriod;
}
/** Set the grace period and adjust the sleep period accordingly. */
synchronized void setGraceSleepPeriod(final long gracePeriod) {
unsyncSetGraceSleepPeriod(gracePeriod);
}
private void unsyncSetGraceSleepPeriod(final long gracePeriod) {
if (gracePeriod < 100L) {
throw new HadoopIllegalArgumentException(gracePeriod
+ " = gracePeriod < 100ms is too small.");
}
this.gracePeriod = gracePeriod;
final long half = gracePeriod/2;
this.sleepPeriod = half < LEASE_RENEWER_SLEEP_DEFAULT?
half: LEASE_RENEWER_SLEEP_DEFAULT;
}
/** Is the daemon running? */
synchronized boolean isRunning() {
return daemon != null && daemon.isAlive();
}
/** Does this renewer have nothing to renew? */
public boolean isEmpty() {
return dfsclients.isEmpty();
}
/** Used only by tests */
synchronized String getDaemonName() {
return daemon.getName();
}
/** Is the empty period longer than the grace period? */
private synchronized boolean isRenewerExpired() {
return emptyTime != Long.MAX_VALUE
&& Time.monotonicNow() - emptyTime > gracePeriod;
}
public synchronized void put(final long inodeId, final DFSOutputStream out,
final DFSClient dfsc) {
if (dfsc.isClientRunning()) {
if (!isRunning() || isRenewerExpired()) {
//start a new deamon with a new id.
final int id = ++currentId;
daemon = new Daemon(new Runnable() {
@Override
public void run() {
try {
if (LOG.isDebugEnabled()) {
LOG.debug("Lease renewer daemon for " + clientsString()
+ " with renew id " + id + " started");
}
LeaseRenewer.this.run(id);
} catch(InterruptedException e) {
if (LOG.isDebugEnabled()) {
LOG.debug(LeaseRenewer.this.getClass().getSimpleName()
+ " is interrupted.", e);
}
} finally {
synchronized(LeaseRenewer.this) {
Factory.INSTANCE.remove(LeaseRenewer.this);
}
if (LOG.isDebugEnabled()) {
LOG.debug("Lease renewer daemon for " + clientsString()
+ " with renew id " + id + " exited");
}
}
}
@Override
public String toString() {
return String.valueOf(LeaseRenewer.this);
}
});
daemon.start();
}
dfsc.putFileBeingWritten(inodeId, out);
emptyTime = Long.MAX_VALUE;
}
}
@VisibleForTesting
synchronized void setEmptyTime(long time) {
emptyTime = time;
}
/** Close a file. */
public void closeFile(final long inodeId, final DFSClient dfsc) {
dfsc.removeFileBeingWritten(inodeId);
synchronized(this) {
if (dfsc.isFilesBeingWrittenEmpty()) {
dfsclients.remove(dfsc);
}
//update emptyTime if necessary
if (emptyTime == Long.MAX_VALUE) {
for(DFSClient c : dfsclients) {
if (!c.isFilesBeingWrittenEmpty()) {
//found a non-empty file-being-written map
return;
}
}
//discover the first time that all file-being-written maps are empty.
emptyTime = Time.monotonicNow();
}
}
}
/** Close the given client. */
public synchronized void closeClient(final DFSClient dfsc) {
dfsclients.remove(dfsc);
if (dfsclients.isEmpty()) {
if (!isRunning() || isRenewerExpired()) {
Factory.INSTANCE.remove(LeaseRenewer.this);
return;
}
if (emptyTime == Long.MAX_VALUE) {
//discover the first time that the client list is empty.
emptyTime = Time.monotonicNow();
}
}
//update renewal time
if (renewal == dfsc.getConf().getHdfsTimeout()/2) {
long min = HdfsServerConstants.LEASE_SOFTLIMIT_PERIOD;
for(DFSClient c : dfsclients) {
final int timeout = c.getConf().getHdfsTimeout();
if (timeout > 0 && timeout < min) {
min = timeout;
}
}
renewal = min/2;
}
}
public void interruptAndJoin() throws InterruptedException {
Daemon daemonCopy = null;
synchronized (this) {
if (isRunning()) {
daemon.interrupt();
daemonCopy = daemon;
}
}
if (daemonCopy != null) {
if(LOG.isDebugEnabled()) {
LOG.debug("Wait for lease checker to terminate");
}
daemonCopy.join();
}
}
private void renew() throws IOException {
final List<DFSClient> copies;
synchronized(this) {
copies = new ArrayList<DFSClient>(dfsclients);
}
//sort the client names for finding out repeated names.
Collections.sort(copies, new Comparator<DFSClient>() {
@Override
public int compare(final DFSClient left, final DFSClient right) {
return left.getClientName().compareTo(right.getClientName());
}
});
String previousName = "";
for(int i = 0; i < copies.size(); i++) {
final DFSClient c = copies.get(i);
//skip if current client name is the same as the previous name.
if (!c.getClientName().equals(previousName)) {
if (!c.renewLease()) {
if (LOG.isDebugEnabled()) {
LOG.debug("Did not renew lease for client " +
c);
}
continue;
}
previousName = c.getClientName();
if (LOG.isDebugEnabled()) {
LOG.debug("Lease renewed for client " + previousName);
}
}
}
}
/**
* Periodically check in with the namenode and renew all the leases
* when the lease period is half over.
*/
private void run(final int id) throws InterruptedException {
for(long lastRenewed = Time.monotonicNow(); !Thread.interrupted();
Thread.sleep(getSleepPeriod())) {
final long elapsed = Time.monotonicNow() - lastRenewed;
if (elapsed >= getRenewalTime()) {
try {
renew();
if (LOG.isDebugEnabled()) {
LOG.debug("Lease renewer daemon for " + clientsString()
+ " with renew id " + id + " executed");
}
lastRenewed = Time.monotonicNow();
} catch (SocketTimeoutException ie) {
LOG.warn("Failed to renew lease for " + clientsString() + " for "
+ (elapsed/1000) + " seconds. Aborting ...", ie);
synchronized (this) {
while (!dfsclients.isEmpty()) {
DFSClient dfsClient = dfsclients.get(0);
dfsClient.closeAllFilesBeingWritten(true);
closeClient(dfsClient);
}
//Expire the current LeaseRenewer thread.
emptyTime = 0;
}
break;
} catch (IOException ie) {
LOG.warn("Failed to renew lease for " + clientsString() + " for "
+ (elapsed/1000) + " seconds. Will retry shortly ...", ie);
}
}
synchronized(this) {
if (id != currentId || isRenewerExpired()) {
if (LOG.isDebugEnabled()) {
if (id != currentId) {
LOG.debug("Lease renewer daemon for " + clientsString()
+ " with renew id " + id + " is not current");
} else {
LOG.debug("Lease renewer daemon for " + clientsString()
+ " with renew id " + id + " expired");
}
}
//no longer the current daemon or expired
return;
}
// if no clients are in running state or there is no more clients
// registered with this renewer, stop the daemon after the grace
// period.
if (!clientsRunning() && emptyTime == Long.MAX_VALUE) {
emptyTime = Time.monotonicNow();
}
}
}
}
@Override
public String toString() {
String s = getClass().getSimpleName() + ":" + factorykey;
if (LOG.isTraceEnabled()) {
return s + ", clients=" + clientsString()
+ ", created at " + instantiationTrace;
}
return s;
}
/** Get the names of all clients */
private synchronized String clientsString() {
if (dfsclients.isEmpty()) {
return "[]";
} else {
final StringBuilder b = new StringBuilder("[").append(
dfsclients.get(0).getClientName());
for(int i = 1; i < dfsclients.size(); i++) {
b.append(", ").append(dfsclients.get(i).getClientName());
}
return b.append("]").toString();
}
}
}
| 16,722 | 30.853333 | 95 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/impl/CorruptFileBlockIterator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.client.impl;
import java.io.IOException;
import java.util.NoSuchElementException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
/**
* Provides an iterator interface for listCorruptFileBlocks.
* This class is used by DistributedFileSystem and Hdfs.
*/
@InterfaceAudience.Private
public class CorruptFileBlockIterator implements RemoteIterator<Path> {
private final DFSClient dfs;
private final String path;
private String[] files = null;
private int fileIdx = 0;
private String cookie = null;
private Path nextPath = null;
private int callsMade = 0;
public CorruptFileBlockIterator(DFSClient dfs, Path path) throws IOException {
this.dfs = dfs;
this.path = path2String(path);
loadNext();
}
/**
* @return the number of calls made to the DFSClient.
* This is for debugging and testing purposes.
*/
public int getCallsMade() {
return callsMade;
}
private String path2String(Path path) {
return path.toUri().getPath();
}
private Path string2Path(String string) {
return new Path(string);
}
private void loadNext() throws IOException {
if (files == null || fileIdx >= files.length) {
CorruptFileBlocks cfb = dfs.listCorruptFileBlocks(path, cookie);
files = cfb.getFiles();
cookie = cfb.getCookie();
fileIdx = 0;
callsMade++;
}
if (fileIdx >= files.length) {
// received an empty response
// there are no more corrupt file blocks
nextPath = null;
} else {
nextPath = string2Path(files[fileIdx]);
fileIdx++;
}
}
@Override
public boolean hasNext() {
return nextPath != null;
}
@Override
public Path next() throws IOException {
if (!hasNext()) {
throw new NoSuchElementException("No more corrupt file blocks");
}
Path result = nextPath;
loadNext();
return result;
}
}
| 2,901 | 26.638095 | 80 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/AuthFilter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.web;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import javax.servlet.FilterChain;
import javax.servlet.FilterConfig;
import javax.servlet.ServletException;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletRequestWrapper;
import org.apache.hadoop.hdfs.web.resources.DelegationParam;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler;
import org.apache.hadoop.util.StringUtils;
/**
* Subclass of {@link AuthenticationFilter} that
* obtains Hadoop-Auth configuration for webhdfs.
*/
public class AuthFilter extends AuthenticationFilter {
private static final String CONF_PREFIX = "dfs.web.authentication.";
/**
* Returns the filter configuration properties,
* including the ones prefixed with {@link #CONF_PREFIX}.
* The prefix is removed from the returned property names.
*
* @param prefix parameter not used.
* @param config parameter contains the initialization values.
* @return Hadoop-Auth configuration properties.
* @throws ServletException
*/
@Override
protected Properties getConfiguration(String prefix, FilterConfig config)
throws ServletException {
final Properties p = super.getConfiguration(CONF_PREFIX, config);
// set authentication type
p.setProperty(AUTH_TYPE, UserGroupInformation.isSecurityEnabled()?
KerberosAuthenticationHandler.TYPE: PseudoAuthenticationHandler.TYPE);
// if not set, enable anonymous for pseudo authentication
if (p.getProperty(PseudoAuthenticationHandler.ANONYMOUS_ALLOWED) == null) {
p.setProperty(PseudoAuthenticationHandler.ANONYMOUS_ALLOWED, "true");
}
//set cookie path
p.setProperty(COOKIE_PATH, "/");
return p;
}
@Override
public void doFilter(ServletRequest request, ServletResponse response,
FilterChain filterChain) throws IOException, ServletException {
final HttpServletRequest httpRequest = toLowerCase((HttpServletRequest)request);
final String tokenString = httpRequest.getParameter(DelegationParam.NAME);
if (tokenString != null) {
//Token is present in the url, therefore token will be used for
//authentication, bypass kerberos authentication.
filterChain.doFilter(httpRequest, response);
return;
}
super.doFilter(httpRequest, response, filterChain);
}
private static HttpServletRequest toLowerCase(final HttpServletRequest request) {
@SuppressWarnings("unchecked")
final Map<String, String[]> original = (Map<String, String[]>)request.getParameterMap();
if (!ParamFilter.containsUpperCase(original.keySet())) {
return request;
}
final Map<String, List<String>> m = new HashMap<String, List<String>>();
for(Map.Entry<String, String[]> entry : original.entrySet()) {
final String key = StringUtils.toLowerCase(entry.getKey());
List<String> strings = m.get(key);
if (strings == null) {
strings = new ArrayList<String>();
m.put(key, strings);
}
for(String v : entry.getValue()) {
strings.add(v);
}
}
return new HttpServletRequestWrapper(request) {
private Map<String, String[]> parameters = null;
@Override
public Map<String, String[]> getParameterMap() {
if (parameters == null) {
parameters = new HashMap<String, String[]>();
for(Map.Entry<String, List<String>> entry : m.entrySet()) {
final List<String> a = entry.getValue();
parameters.put(entry.getKey(), a.toArray(new String[a.size()]));
}
}
return parameters;
}
@Override
public String getParameter(String name) {
final List<String> a = m.get(name);
return a == null? null: a.get(0);
}
@Override
public String[] getParameterValues(String name) {
return getParameterMap().get(name);
}
@Override
public Enumeration<String> getParameterNames() {
final Iterator<String> i = m.keySet().iterator();
return new Enumeration<String>() {
@Override
public boolean hasMoreElements() {
return i.hasNext();
}
@Override
public String nextElement() {
return i.next();
}
};
}
};
}
}
| 5,612 | 35.686275 | 92 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/HsftpFileSystem.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.web;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
/**
* An implementation of a protocol for accessing filesystems over HTTPS. The
* following implementation provides a limited, read-only interface to a
* filesystem over HTTPS.
*
* @see org.apache.hadoop.hdfs.server.namenode.ListPathsServlet
* @see org.apache.hadoop.hdfs.server.namenode.FileDataServlet
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class HsftpFileSystem extends HftpFileSystem {
/**
* Return the protocol scheme for the FileSystem.
* <p/>
*
* @return <code>hsftp</code>
*/
@Override
public String getScheme() {
return WebHdfsConstants.HSFTP_SCHEME;
}
/**
* Return the underlying protocol that is used to talk to the namenode.
*/
@Override
protected String getUnderlyingProtocol() {
return "https";
}
@Override
protected void initTokenAspect() {
tokenAspect = new TokenAspect<HsftpFileSystem>(this, tokenServiceName,
WebHdfsConstants.HSFTP_TOKEN_KIND);
}
@Override
protected int getDefaultPort() {
return getConf().getInt(HdfsClientConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY,
DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT);
}
}
| 2,242 | 31.507246 | 77 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.web;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.XAttrHelper;
import org.apache.hadoop.hdfs.protocol.*;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.util.StringUtils;
import org.codehaus.jackson.map.ObjectMapper;
import com.google.common.collect.Lists;
import java.io.IOException;
import java.util.*;
/** JSON Utilities */
public class JsonUtil {
private static final Object[] EMPTY_OBJECT_ARRAY = {};
/** Convert a token object to a Json string. */
public static String toJsonString(final Token<? extends TokenIdentifier> token
) throws IOException {
return toJsonString(Token.class, toJsonMap(token));
}
private static Map<String, Object> toJsonMap(
final Token<? extends TokenIdentifier> token) throws IOException {
if (token == null) {
return null;
}
final Map<String, Object> m = new TreeMap<String, Object>();
m.put("urlString", token.encodeToUrlString());
return m;
}
/** Convert an exception object to a Json string. */
public static String toJsonString(final Exception e) {
final Map<String, Object> m = new TreeMap<String, Object>();
m.put("exception", e.getClass().getSimpleName());
m.put("message", e.getMessage());
m.put("javaClassName", e.getClass().getName());
return toJsonString(RemoteException.class, m);
}
private static String toJsonString(final Class<?> clazz, final Object value) {
return toJsonString(clazz.getSimpleName(), value);
}
/** Convert a key-value pair to a Json string. */
public static String toJsonString(final String key, final Object value) {
final Map<String, Object> m = new TreeMap<String, Object>();
m.put(key, value);
ObjectMapper mapper = new ObjectMapper();
try {
return mapper.writeValueAsString(m);
} catch (IOException ignored) {
}
return null;
}
/** Convert a FsPermission object to a string. */
private static String toString(final FsPermission permission) {
return String.format("%o", permission.toShort());
}
/** Convert a HdfsFileStatus object to a Json string. */
public static String toJsonString(final HdfsFileStatus status,
boolean includeType) {
if (status == null) {
return null;
}
final Map<String, Object> m = new TreeMap<String, Object>();
m.put("pathSuffix", status.getLocalName());
m.put("type", WebHdfsConstants.PathType.valueOf(status));
if (status.isSymlink()) {
m.put("symlink", status.getSymlink());
}
m.put("length", status.getLen());
m.put("owner", status.getOwner());
m.put("group", status.getGroup());
FsPermission perm = status.getPermission();
m.put("permission", toString(perm));
if (perm.getAclBit()) {
m.put("aclBit", true);
}
if (perm.getEncryptedBit()) {
m.put("encBit", true);
}
m.put("accessTime", status.getAccessTime());
m.put("modificationTime", status.getModificationTime());
m.put("blockSize", status.getBlockSize());
m.put("replication", status.getReplication());
m.put("fileId", status.getFileId());
m.put("childrenNum", status.getChildrenNum());
m.put("storagePolicy", status.getStoragePolicy());
ObjectMapper mapper = new ObjectMapper();
try {
return includeType ?
toJsonString(FileStatus.class, m) : mapper.writeValueAsString(m);
} catch (IOException ignored) {
}
return null;
}
/** Convert an ExtendedBlock to a Json map. */
private static Map<String, Object> toJsonMap(final ExtendedBlock extendedblock) {
if (extendedblock == null) {
return null;
}
final Map<String, Object> m = new TreeMap<String, Object>();
m.put("blockPoolId", extendedblock.getBlockPoolId());
m.put("blockId", extendedblock.getBlockId());
m.put("numBytes", extendedblock.getNumBytes());
m.put("generationStamp", extendedblock.getGenerationStamp());
return m;
}
/** Convert a DatanodeInfo to a Json map. */
static Map<String, Object> toJsonMap(final DatanodeInfo datanodeinfo) {
if (datanodeinfo == null) {
return null;
}
// TODO: Fix storageID
final Map<String, Object> m = new TreeMap<String, Object>();
m.put("ipAddr", datanodeinfo.getIpAddr());
// 'name' is equivalent to ipAddr:xferPort. Older clients (1.x, 0.23.x)
// expects this instead of the two fields.
m.put("name", datanodeinfo.getXferAddr());
m.put("hostName", datanodeinfo.getHostName());
m.put("storageID", datanodeinfo.getDatanodeUuid());
m.put("xferPort", datanodeinfo.getXferPort());
m.put("infoPort", datanodeinfo.getInfoPort());
m.put("infoSecurePort", datanodeinfo.getInfoSecurePort());
m.put("ipcPort", datanodeinfo.getIpcPort());
m.put("capacity", datanodeinfo.getCapacity());
m.put("dfsUsed", datanodeinfo.getDfsUsed());
m.put("remaining", datanodeinfo.getRemaining());
m.put("blockPoolUsed", datanodeinfo.getBlockPoolUsed());
m.put("cacheCapacity", datanodeinfo.getCacheCapacity());
m.put("cacheUsed", datanodeinfo.getCacheUsed());
m.put("lastUpdate", datanodeinfo.getLastUpdate());
m.put("lastUpdateMonotonic", datanodeinfo.getLastUpdateMonotonic());
m.put("xceiverCount", datanodeinfo.getXceiverCount());
m.put("networkLocation", datanodeinfo.getNetworkLocation());
m.put("adminState", datanodeinfo.getAdminState().name());
return m;
}
/** Convert a DatanodeInfo[] to a Json array. */
private static Object[] toJsonArray(final DatanodeInfo[] array) {
if (array == null) {
return null;
} else if (array.length == 0) {
return EMPTY_OBJECT_ARRAY;
} else {
final Object[] a = new Object[array.length];
for(int i = 0; i < array.length; i++) {
a[i] = toJsonMap(array[i]);
}
return a;
}
}
/** Convert a LocatedBlock to a Json map. */
private static Map<String, Object> toJsonMap(final LocatedBlock locatedblock
) throws IOException {
if (locatedblock == null) {
return null;
}
final Map<String, Object> m = new TreeMap<String, Object>();
m.put("blockToken", toJsonMap(locatedblock.getBlockToken()));
m.put("isCorrupt", locatedblock.isCorrupt());
m.put("startOffset", locatedblock.getStartOffset());
m.put("block", toJsonMap(locatedblock.getBlock()));
m.put("locations", toJsonArray(locatedblock.getLocations()));
m.put("cachedLocations", toJsonArray(locatedblock.getCachedLocations()));
return m;
}
/** Convert a LocatedBlock[] to a Json array. */
private static Object[] toJsonArray(final List<LocatedBlock> array
) throws IOException {
if (array == null) {
return null;
} else if (array.size() == 0) {
return EMPTY_OBJECT_ARRAY;
} else {
final Object[] a = new Object[array.size()];
for(int i = 0; i < array.size(); i++) {
a[i] = toJsonMap(array.get(i));
}
return a;
}
}
/** Convert LocatedBlocks to a Json string. */
public static String toJsonString(final LocatedBlocks locatedblocks
) throws IOException {
if (locatedblocks == null) {
return null;
}
final Map<String, Object> m = new TreeMap<String, Object>();
m.put("fileLength", locatedblocks.getFileLength());
m.put("isUnderConstruction", locatedblocks.isUnderConstruction());
m.put("locatedBlocks", toJsonArray(locatedblocks.getLocatedBlocks()));
m.put("lastLocatedBlock", toJsonMap(locatedblocks.getLastLocatedBlock()));
m.put("isLastBlockComplete", locatedblocks.isLastBlockComplete());
return toJsonString(LocatedBlocks.class, m);
}
/** Convert a ContentSummary to a Json string. */
public static String toJsonString(final ContentSummary contentsummary) {
if (contentsummary == null) {
return null;
}
final Map<String, Object> m = new TreeMap<String, Object>();
m.put("length", contentsummary.getLength());
m.put("fileCount", contentsummary.getFileCount());
m.put("directoryCount", contentsummary.getDirectoryCount());
m.put("quota", contentsummary.getQuota());
m.put("spaceConsumed", contentsummary.getSpaceConsumed());
m.put("spaceQuota", contentsummary.getSpaceQuota());
return toJsonString(ContentSummary.class, m);
}
/** Convert a MD5MD5CRC32FileChecksum to a Json string. */
public static String toJsonString(final MD5MD5CRC32FileChecksum checksum) {
if (checksum == null) {
return null;
}
final Map<String, Object> m = new TreeMap<String, Object>();
m.put("algorithm", checksum.getAlgorithmName());
m.put("length", checksum.getLength());
m.put("bytes", StringUtils.byteToHexString(checksum.getBytes()));
return toJsonString(FileChecksum.class, m);
}
/** Convert a AclStatus object to a Json string. */
public static String toJsonString(final AclStatus status) {
if (status == null) {
return null;
}
final Map<String, Object> m = new TreeMap<String, Object>();
m.put("owner", status.getOwner());
m.put("group", status.getGroup());
m.put("stickyBit", status.isStickyBit());
final List<String> stringEntries = new ArrayList<>();
for (AclEntry entry : status.getEntries()) {
stringEntries.add(entry.toString());
}
m.put("entries", stringEntries);
FsPermission perm = status.getPermission();
if (perm != null) {
m.put("permission", toString(perm));
if (perm.getAclBit()) {
m.put("aclBit", true);
}
if (perm.getEncryptedBit()) {
m.put("encBit", true);
}
}
final Map<String, Map<String, Object>> finalMap =
new TreeMap<String, Map<String, Object>>();
finalMap.put(AclStatus.class.getSimpleName(), m);
ObjectMapper mapper = new ObjectMapper();
try {
return mapper.writeValueAsString(finalMap);
} catch (IOException ignored) {
}
return null;
}
private static Map<String, Object> toJsonMap(final XAttr xAttr,
final XAttrCodec encoding) throws IOException {
if (xAttr == null) {
return null;
}
final Map<String, Object> m = new TreeMap<String, Object>();
m.put("name", XAttrHelper.getPrefixName(xAttr));
m.put("value", xAttr.getValue() != null ?
XAttrCodec.encodeValue(xAttr.getValue(), encoding) : null);
return m;
}
private static Object[] toJsonArray(final List<XAttr> array,
final XAttrCodec encoding) throws IOException {
if (array == null) {
return null;
} else if (array.size() == 0) {
return EMPTY_OBJECT_ARRAY;
} else {
final Object[] a = new Object[array.size()];
for(int i = 0; i < array.size(); i++) {
a[i] = toJsonMap(array.get(i), encoding);
}
return a;
}
}
public static String toJsonString(final List<XAttr> xAttrs,
final XAttrCodec encoding) throws IOException {
final Map<String, Object> finalMap = new TreeMap<String, Object>();
finalMap.put("XAttrs", toJsonArray(xAttrs, encoding));
ObjectMapper mapper = new ObjectMapper();
return mapper.writeValueAsString(finalMap);
}
public static String toJsonString(final List<XAttr> xAttrs)
throws IOException {
final List<String> names = Lists.newArrayListWithCapacity(xAttrs.size());
for (XAttr xAttr : xAttrs) {
names.add(XAttrHelper.getPrefixName(xAttr));
}
ObjectMapper mapper = new ObjectMapper();
String ret = mapper.writeValueAsString(names);
final Map<String, Object> finalMap = new TreeMap<String, Object>();
finalMap.put("XAttrNames", ret);
return mapper.writeValueAsString(finalMap);
}
}
| 12,695 | 34.563025 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/ParamFilter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.web;
import java.net.URI;
import java.util.List;
import java.util.Map;
import javax.ws.rs.core.MultivaluedMap;
import javax.ws.rs.core.UriBuilder;
import com.sun.jersey.spi.container.ContainerRequest;
import com.sun.jersey.spi.container.ContainerRequestFilter;
import com.sun.jersey.spi.container.ContainerResponseFilter;
import com.sun.jersey.spi.container.ResourceFilter;
import org.apache.hadoop.util.StringUtils;
/**
* A filter to change parameter names to lower cases
* so that parameter names are considered as case insensitive.
*/
public class ParamFilter implements ResourceFilter {
private static final ContainerRequestFilter LOWER_CASE
= new ContainerRequestFilter() {
@Override
public ContainerRequest filter(final ContainerRequest request) {
final MultivaluedMap<String, String> parameters = request.getQueryParameters();
if (containsUpperCase(parameters.keySet())) {
//rebuild URI
final URI lower = rebuildQuery(request.getRequestUri(), parameters);
request.setUris(request.getBaseUri(), lower);
}
return request;
}
};
@Override
public ContainerRequestFilter getRequestFilter() {
return LOWER_CASE;
}
@Override
public ContainerResponseFilter getResponseFilter() {
return null;
}
/** Do the strings contain upper case letters? */
static boolean containsUpperCase(final Iterable<String> strings) {
for(String s : strings) {
for(int i = 0; i < s.length(); i++) {
if (Character.isUpperCase(s.charAt(i))) {
return true;
}
}
}
return false;
}
/** Rebuild the URI query with lower case parameter names. */
private static URI rebuildQuery(final URI uri,
final MultivaluedMap<String, String> parameters) {
UriBuilder b = UriBuilder.fromUri(uri).replaceQuery("");
for(Map.Entry<String, List<String>> e : parameters.entrySet()) {
final String key = StringUtils.toLowerCase(e.getKey());
for(String v : e.getValue()) {
b = b.queryParam(key, v);
}
}
return b.build();
}
}
| 2,919 | 32.953488 | 85 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/HftpFileSystem.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.web;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.net.ConnectException;
import java.net.HttpURLConnection;
import java.net.InetSocketAddress;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import java.security.PrivilegedExceptionAction;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.TimeZone;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.DelegationTokenRenewer;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileChecksum;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.hdfs.tools.DelegationTokenFetcher;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.util.ServletUtil;
import org.xml.sax.Attributes;
import org.xml.sax.InputSource;
import org.xml.sax.SAXException;
import org.xml.sax.XMLReader;
import org.xml.sax.helpers.DefaultHandler;
import org.xml.sax.helpers.XMLReaderFactory;
/**
* An implementation of a protocol for accessing filesystems over HTTP.
* The following implementation provides a limited, read-only interface
* to a filesystem over HTTP.
* @see org.apache.hadoop.hdfs.server.namenode.ListPathsServlet
* @see org.apache.hadoop.hdfs.server.namenode.FileDataServlet
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class HftpFileSystem extends FileSystem
implements DelegationTokenRenewer.Renewable, TokenAspect.TokenManagementDelegator {
static {
HttpURLConnection.setFollowRedirects(true);
}
URLConnectionFactory connectionFactory;
protected UserGroupInformation ugi;
private URI hftpURI;
protected URI nnUri;
public static final String HFTP_TIMEZONE = "UTC";
public static final String HFTP_DATE_FORMAT = "yyyy-MM-dd'T'HH:mm:ssZ";
protected TokenAspect<? extends HftpFileSystem> tokenAspect;
private Token<?> delegationToken;
private Token<?> renewToken;
protected Text tokenServiceName;
@Override
public URI getCanonicalUri() {
return super.getCanonicalUri();
}
public static final SimpleDateFormat getDateFormat() {
final SimpleDateFormat df = new SimpleDateFormat(HFTP_DATE_FORMAT);
df.setTimeZone(TimeZone.getTimeZone(HFTP_TIMEZONE));
return df;
}
protected static final ThreadLocal<SimpleDateFormat> df =
new ThreadLocal<SimpleDateFormat>() {
@Override
protected SimpleDateFormat initialValue() {
return getDateFormat();
}
};
@Override
protected int getDefaultPort() {
return getConf().getInt(HdfsClientConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY,
DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT);
}
/**
* We generate the address with one of the following ports, in
* order of preference.
* 1. Port from the hftp URI e.g. hftp://namenode:4000/ will return 4000.
* 2. Port configured via DFS_NAMENODE_HTTP_PORT_KEY
* 3. DFS_NAMENODE_HTTP_PORT_DEFAULT i.e. 50070.
*
* @param uri
*/
protected InetSocketAddress getNamenodeAddr(URI uri) {
// use authority so user supplied uri can override port
return NetUtils.createSocketAddr(uri.getAuthority(), getDefaultPort());
}
protected URI getNamenodeUri(URI uri) {
return DFSUtil.createUri(getUnderlyingProtocol(), getNamenodeAddr(uri));
}
/**
* See the documentation of {@Link #getNamenodeAddr(URI)} for the logic
* behind selecting the canonical service name.
* @return
*/
@Override
public String getCanonicalServiceName() {
return SecurityUtil.buildTokenService(nnUri).toString();
}
@Override
protected URI canonicalizeUri(URI uri) {
return NetUtils.getCanonicalUri(uri, getDefaultPort());
}
/**
* Return the protocol scheme for the FileSystem.
* <p/>
*
* @return <code>hftp</code>
*/
@Override
public String getScheme() {
return WebHdfsConstants.HFTP_SCHEME;
}
/**
* Initialize connectionFactory and tokenAspect. This function is intended to
* be overridden by HsFtpFileSystem.
*/
protected void initTokenAspect() {
tokenAspect = new TokenAspect<HftpFileSystem>(this, tokenServiceName, WebHdfsConstants.HFTP_TOKEN_KIND);
}
@Override
public void initialize(final URI name, final Configuration conf)
throws IOException {
super.initialize(name, conf);
setConf(conf);
this.connectionFactory = URLConnectionFactory
.newDefaultURLConnectionFactory(conf);
this.ugi = UserGroupInformation.getCurrentUser();
this.nnUri = getNamenodeUri(name);
this.tokenServiceName = SecurityUtil.buildTokenService(nnUri);
try {
this.hftpURI = new URI(name.getScheme(), name.getAuthority(),
null, null, null);
} catch (URISyntaxException e) {
throw new IllegalArgumentException(e);
}
initTokenAspect();
if (UserGroupInformation.isSecurityEnabled()) {
tokenAspect.initDelegationToken(ugi);
}
}
@Override
public Token<?> getRenewToken() {
return renewToken;
}
/**
* Return the underlying protocol that is used to talk to the namenode.
*/
protected String getUnderlyingProtocol() {
return "http";
}
@Override
public synchronized <T extends TokenIdentifier> void setDelegationToken(Token<T> token) {
/**
* XXX The kind of the token has been changed by DelegationTokenFetcher. We
* use the token for renewal, since the reflection utilities needs the value
* of the kind field to correctly renew the token.
*
* For other operations, however, the client has to send a
* HDFS_DELEGATION_KIND token over the wire so that it can talk to Hadoop
* 0.20.203 clusters. Later releases fix this problem. See HDFS-5440 for
* more details.
*/
renewToken = token;
delegationToken = new Token<T>(token);
delegationToken.setKind(DelegationTokenIdentifier.HDFS_DELEGATION_KIND);
}
@Override
public synchronized Token<?> getDelegationToken(final String renewer)
throws IOException {
try {
// Renew TGT if needed
UserGroupInformation connectUgi = ugi.getRealUser();
final String proxyUser = connectUgi == null ? null : ugi
.getShortUserName();
if (connectUgi == null) {
connectUgi = ugi;
}
return connectUgi.doAs(new PrivilegedExceptionAction<Token<?>>() {
@Override
public Token<?> run() throws IOException {
Credentials c;
try {
c = DelegationTokenFetcher.getDTfromRemote(connectionFactory,
nnUri, renewer, proxyUser);
} catch (IOException e) {
if (e.getCause() instanceof ConnectException) {
LOG.warn("Couldn't connect to " + nnUri +
", assuming security is disabled");
return null;
}
if (LOG.isDebugEnabled()) {
LOG.debug("Exception getting delegation token", e);
}
throw e;
}
for (Token<? extends TokenIdentifier> t : c.getAllTokens()) {
if(LOG.isDebugEnabled()) {
LOG.debug("Got dt for " + getUri() + ";t.service="
+t.getService());
}
return t;
}
return null;
}
});
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
@Override
public URI getUri() {
return hftpURI;
}
/**
* Return a URL pointing to given path on the namenode.
*
* @param path to obtain the URL for
* @param query string to append to the path
* @return namenode URL referring to the given path
* @throws IOException on error constructing the URL
*/
protected URL getNamenodeURL(String path, String query) throws IOException {
final URL url = new URL(getUnderlyingProtocol(), nnUri.getHost(),
nnUri.getPort(), path + '?' + query);
if (LOG.isTraceEnabled()) {
LOG.trace("url=" + url);
}
return url;
}
/**
* Get encoded UGI parameter string for a URL.
*
* @return user_shortname,group1,group2...
*/
private String getEncodedUgiParameter() {
StringBuilder ugiParameter = new StringBuilder(
ServletUtil.encodeQueryValue(ugi.getShortUserName()));
for(String g: ugi.getGroupNames()) {
ugiParameter.append(",");
ugiParameter.append(ServletUtil.encodeQueryValue(g));
}
return ugiParameter.toString();
}
/**
* Open an HTTP connection to the namenode to read file data and metadata.
* @param path The path component of the URL
* @param query The query component of the URL
*/
protected HttpURLConnection openConnection(String path, String query)
throws IOException {
query = addDelegationTokenParam(query);
final URL url = getNamenodeURL(path, query);
final HttpURLConnection connection;
connection = (HttpURLConnection)connectionFactory.openConnection(url);
connection.setRequestMethod("GET");
connection.connect();
return connection;
}
protected String addDelegationTokenParam(String query) throws IOException {
String tokenString = null;
if (UserGroupInformation.isSecurityEnabled()) {
synchronized (this) {
tokenAspect.ensureTokenInitialized();
if (delegationToken != null) {
tokenString = delegationToken.encodeToUrlString();
return (query + JspHelper.getDelegationTokenUrlParam(tokenString));
}
}
}
return query;
}
static class RangeHeaderUrlOpener extends ByteRangeInputStream.URLOpener {
private final URLConnectionFactory connFactory;
RangeHeaderUrlOpener(URLConnectionFactory connFactory, final URL url) {
super(url);
this.connFactory = connFactory;
}
protected HttpURLConnection openConnection() throws IOException {
return (HttpURLConnection)connFactory.openConnection(url);
}
/** Use HTTP Range header for specifying offset. */
@Override
protected HttpURLConnection connect(final long offset,
final boolean resolved) throws IOException {
final HttpURLConnection conn = openConnection();
conn.setRequestMethod("GET");
if (offset != 0L) {
conn.setRequestProperty("Range", "bytes=" + offset + "-");
}
conn.connect();
//Expects HTTP_OK or HTTP_PARTIAL response codes.
final int code = conn.getResponseCode();
if (offset != 0L && code != HttpURLConnection.HTTP_PARTIAL) {
throw new IOException("HTTP_PARTIAL expected, received " + code);
} else if (offset == 0L && code != HttpURLConnection.HTTP_OK) {
throw new IOException("HTTP_OK expected, received " + code);
}
return conn;
}
}
static class RangeHeaderInputStream extends ByteRangeInputStream {
RangeHeaderInputStream(RangeHeaderUrlOpener o, RangeHeaderUrlOpener r)
throws IOException {
super(o, r);
}
RangeHeaderInputStream(URLConnectionFactory connFactory, final URL url)
throws IOException {
this(new RangeHeaderUrlOpener(connFactory, url),
new RangeHeaderUrlOpener(connFactory, null));
}
@Override
protected URL getResolvedUrl(final HttpURLConnection connection) {
return connection.getURL();
}
}
@Override
public FSDataInputStream open(Path f, int buffersize) throws IOException {
f = f.makeQualified(getUri(), getWorkingDirectory());
String path = "/data" + ServletUtil.encodePath(f.toUri().getPath());
String query = addDelegationTokenParam("ugi=" + getEncodedUgiParameter());
URL u = getNamenodeURL(path, query);
return new FSDataInputStream(new RangeHeaderInputStream(connectionFactory, u));
}
@Override
public void close() throws IOException {
super.close();
tokenAspect.removeRenewAction();
}
/** Class to parse and store a listing reply from the server. */
class LsParser extends DefaultHandler {
final ArrayList<FileStatus> fslist = new ArrayList<FileStatus>();
@Override
public void startElement(String ns, String localname, String qname,
Attributes attrs) throws SAXException {
if ("listing".equals(qname)) return;
if (!"file".equals(qname) && !"directory".equals(qname)) {
if (RemoteException.class.getSimpleName().equals(qname)) {
throw new SAXException(RemoteException.valueOf(attrs));
}
throw new SAXException("Unrecognized entry: " + qname);
}
long modif;
long atime = 0;
try {
final SimpleDateFormat ldf = df.get();
modif = ldf.parse(attrs.getValue("modified")).getTime();
String astr = attrs.getValue("accesstime");
if (astr != null) {
atime = ldf.parse(astr).getTime();
}
} catch (ParseException e) { throw new SAXException(e); }
FileStatus fs = "file".equals(qname)
? new FileStatus(
Long.parseLong(attrs.getValue("size")), false,
Short.valueOf(attrs.getValue("replication")).shortValue(),
Long.parseLong(attrs.getValue("blocksize")),
modif, atime, FsPermission.valueOf(attrs.getValue("permission")),
attrs.getValue("owner"), attrs.getValue("group"),
HftpFileSystem.this.makeQualified(
new Path(getUri().toString(), attrs.getValue("path"))))
: new FileStatus(0L, true, 0, 0L,
modif, atime, FsPermission.valueOf(attrs.getValue("permission")),
attrs.getValue("owner"), attrs.getValue("group"),
HftpFileSystem.this.makeQualified(
new Path(getUri().toString(), attrs.getValue("path"))));
fslist.add(fs);
}
private void fetchList(String path, boolean recur) throws IOException {
try {
XMLReader xr = XMLReaderFactory.createXMLReader();
xr.setContentHandler(this);
HttpURLConnection connection = openConnection(
"/listPaths" + ServletUtil.encodePath(path),
"ugi=" + getEncodedUgiParameter() + (recur ? "&recursive=yes" : ""));
InputStream resp = connection.getInputStream();
xr.parse(new InputSource(resp));
} catch(SAXException e) {
final Exception embedded = e.getException();
if (embedded != null && embedded instanceof IOException) {
throw (IOException)embedded;
}
throw new IOException("invalid xml directory content", e);
}
}
public FileStatus getFileStatus(Path f) throws IOException {
fetchList(f.toUri().getPath(), false);
if (fslist.size() == 0) {
throw new FileNotFoundException("File does not exist: " + f);
}
return fslist.get(0);
}
public FileStatus[] listStatus(Path f, boolean recur) throws IOException {
fetchList(f.toUri().getPath(), recur);
if (fslist.size() > 0 && (fslist.size() != 1 || fslist.get(0).isDirectory())) {
fslist.remove(0);
}
return fslist.toArray(new FileStatus[0]);
}
public FileStatus[] listStatus(Path f) throws IOException {
return listStatus(f, false);
}
}
@Override
public FileStatus[] listStatus(Path f) throws IOException {
LsParser lsparser = new LsParser();
return lsparser.listStatus(f);
}
@Override
public FileStatus getFileStatus(Path f) throws IOException {
LsParser lsparser = new LsParser();
return lsparser.getFileStatus(f);
}
private class ChecksumParser extends DefaultHandler {
private FileChecksum filechecksum;
@Override
public void startElement(String ns, String localname, String qname,
Attributes attrs) throws SAXException {
if (!MD5MD5CRC32FileChecksum.class.getName().equals(qname)) {
if (RemoteException.class.getSimpleName().equals(qname)) {
throw new SAXException(RemoteException.valueOf(attrs));
}
throw new SAXException("Unrecognized entry: " + qname);
}
filechecksum = MD5MD5CRC32FileChecksum.valueOf(attrs);
}
private FileChecksum getFileChecksum(String f) throws IOException {
final HttpURLConnection connection = openConnection(
"/fileChecksum" + ServletUtil.encodePath(f),
"ugi=" + getEncodedUgiParameter());
try {
final XMLReader xr = XMLReaderFactory.createXMLReader();
xr.setContentHandler(this);
xr.parse(new InputSource(connection.getInputStream()));
} catch(SAXException e) {
final Exception embedded = e.getException();
if (embedded != null && embedded instanceof IOException) {
throw (IOException)embedded;
}
throw new IOException("invalid xml directory content", e);
} finally {
connection.disconnect();
}
return filechecksum;
}
}
@Override
public FileChecksum getFileChecksum(Path f) throws IOException {
final String s = makeQualified(f).toUri().getPath();
return new ChecksumParser().getFileChecksum(s);
}
@Override
public Path getWorkingDirectory() {
return new Path("/").makeQualified(getUri(), null);
}
@Override
public void setWorkingDirectory(Path f) { }
/** This optional operation is not yet supported. */
@Override
public FSDataOutputStream append(Path f, int bufferSize,
Progressable progress) throws IOException {
throw new IOException("Not supported");
}
@Override
public FSDataOutputStream create(Path f, FsPermission permission,
boolean overwrite, int bufferSize, short replication,
long blockSize, Progressable progress) throws IOException {
throw new IOException("Not supported");
}
@Override
public boolean rename(Path src, Path dst) throws IOException {
throw new IOException("Not supported");
}
@Override
public boolean delete(Path f, boolean recursive) throws IOException {
throw new IOException("Not supported");
}
@Override
public boolean mkdirs(Path f, FsPermission permission) throws IOException {
throw new IOException("Not supported");
}
/**
* A parser for parsing {@link ContentSummary} xml.
*/
private class ContentSummaryParser extends DefaultHandler {
private ContentSummary contentsummary;
@Override
public void startElement(String ns, String localname, String qname,
Attributes attrs) throws SAXException {
if (!ContentSummary.class.getName().equals(qname)) {
if (RemoteException.class.getSimpleName().equals(qname)) {
throw new SAXException(RemoteException.valueOf(attrs));
}
throw new SAXException("Unrecognized entry: " + qname);
}
contentsummary = toContentSummary(attrs);
}
/**
* Connect to the name node and get content summary.
* @param path The path
* @return The content summary for the path.
* @throws IOException
*/
private ContentSummary getContentSummary(String path) throws IOException {
final HttpURLConnection connection = openConnection(
"/contentSummary" + ServletUtil.encodePath(path),
"ugi=" + getEncodedUgiParameter());
InputStream in = null;
try {
in = connection.getInputStream();
final XMLReader xr = XMLReaderFactory.createXMLReader();
xr.setContentHandler(this);
xr.parse(new InputSource(in));
} catch(FileNotFoundException fnfe) {
//the server may not support getContentSummary
return null;
} catch(SAXException saxe) {
final Exception embedded = saxe.getException();
if (embedded != null && embedded instanceof IOException) {
throw (IOException)embedded;
}
throw new IOException("Invalid xml format", saxe);
} finally {
if (in != null) {
in.close();
}
connection.disconnect();
}
return contentsummary;
}
}
/** Return the object represented in the attributes. */
private static ContentSummary toContentSummary(Attributes attrs
) throws SAXException {
final String length = attrs.getValue("length");
final String fileCount = attrs.getValue("fileCount");
final String directoryCount = attrs.getValue("directoryCount");
final String quota = attrs.getValue("quota");
final String spaceConsumed = attrs.getValue("spaceConsumed");
final String spaceQuota = attrs.getValue("spaceQuota");
if (length == null
|| fileCount == null
|| directoryCount == null
|| quota == null
|| spaceConsumed == null
|| spaceQuota == null) {
return null;
}
try {
return new ContentSummary(
Long.parseLong(length),
Long.parseLong(fileCount),
Long.parseLong(directoryCount),
Long.parseLong(quota),
Long.parseLong(spaceConsumed),
Long.parseLong(spaceQuota));
} catch(Exception e) {
throw new SAXException("Invalid attributes: length=" + length
+ ", fileCount=" + fileCount
+ ", directoryCount=" + directoryCount
+ ", quota=" + quota
+ ", spaceConsumed=" + spaceConsumed
+ ", spaceQuota=" + spaceQuota, e);
}
}
@Override
public ContentSummary getContentSummary(Path f) throws IOException {
final String s = makeQualified(f).toUri().getPath();
final ContentSummary cs = new ContentSummaryParser().getContentSummary(s);
return cs != null? cs: super.getContentSummary(f);
}
@SuppressWarnings("unchecked")
@Override
public long renewDelegationToken(final Token<?> token) throws IOException {
// update the kerberos credentials, if they are coming from a keytab
UserGroupInformation connectUgi = ugi.getRealUser();
if (connectUgi == null) {
connectUgi = ugi;
}
try {
return connectUgi.doAs(new PrivilegedExceptionAction<Long>() {
@Override
public Long run() throws Exception {
InetSocketAddress serviceAddr = SecurityUtil
.getTokenServiceAddr(token);
return DelegationTokenFetcher.renewDelegationToken(connectionFactory,
DFSUtil.createUri(getUnderlyingProtocol(), serviceAddr),
(Token<DelegationTokenIdentifier>) token);
}
});
} catch (InterruptedException e) {
throw new IOException(e);
}
}
@SuppressWarnings("unchecked")
@Override
public void cancelDelegationToken(final Token<?> token) throws IOException {
UserGroupInformation connectUgi = ugi.getRealUser();
if (connectUgi == null) {
connectUgi = ugi;
}
try {
connectUgi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
InetSocketAddress serviceAddr = SecurityUtil
.getTokenServiceAddr(token);
DelegationTokenFetcher.cancelDelegationToken(connectionFactory,
DFSUtil.createUri(getUnderlyingProtocol(), serviceAddr),
(Token<DelegationTokenIdentifier>) token);
return null;
}
});
} catch (InterruptedException e) {
throw new IOException(e);
}
}
}
| 25,003 | 33.205198 | 108 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/UserProvider.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.web.resources;
import java.io.IOException;
import java.lang.reflect.Type;
import javax.servlet.ServletContext;
import javax.servlet.http.HttpServletRequest;
import javax.ws.rs.core.Context;
import javax.ws.rs.ext.Provider;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
import com.sun.jersey.api.core.HttpContext;
import com.sun.jersey.core.spi.component.ComponentContext;
import com.sun.jersey.core.spi.component.ComponentScope;
import com.sun.jersey.server.impl.inject.AbstractHttpContextInjectable;
import com.sun.jersey.spi.inject.Injectable;
import com.sun.jersey.spi.inject.InjectableProvider;
/** Inject user information to http operations. */
@Provider
public class UserProvider
extends AbstractHttpContextInjectable<UserGroupInformation>
implements InjectableProvider<Context, Type> {
@Context HttpServletRequest request;
@Context ServletContext servletcontext;
@Override
public UserGroupInformation getValue(final HttpContext context) {
final Configuration conf = (Configuration) servletcontext
.getAttribute(JspHelper.CURRENT_CONF);
try {
return JspHelper.getUGI(servletcontext, request, conf,
AuthenticationMethod.KERBEROS, false);
} catch (IOException e) {
throw new SecurityException(
SecurityUtil.FAILED_TO_GET_UGI_MSG_HEADER + " " + e, e);
}
}
@Override
public ComponentScope getScope() {
return ComponentScope.PerRequest;
}
@Override
public Injectable<UserGroupInformation> getInjectable(
final ComponentContext componentContext, final Context context,
final Type type) {
return type.equals(UserGroupInformation.class)? this : null;
}
}
| 2,739 | 36.534247 | 76 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ExceptionHandler.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.web.resources;
import java.io.FileNotFoundException;
import java.io.IOException;
import javax.servlet.http.HttpServletResponse;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import javax.ws.rs.ext.ExceptionMapper;
import javax.ws.rs.ext.Provider;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hdfs.web.JsonUtil;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.ipc.StandbyException;
import org.apache.hadoop.security.authorize.AuthorizationException;
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
import com.google.common.annotations.VisibleForTesting;
import com.sun.jersey.api.ParamException;
import com.sun.jersey.api.container.ContainerException;
/** Handle exceptions. */
@Provider
public class ExceptionHandler implements ExceptionMapper<Exception> {
public static final Log LOG = LogFactory.getLog(ExceptionHandler.class);
private static Exception toCause(Exception e) {
final Throwable t = e.getCause();
if (e instanceof SecurityException) {
// For the issue reported in HDFS-6475, if SecurityException's cause
// is InvalidToken, and the InvalidToken's cause is StandbyException,
// return StandbyException; Otherwise, leave the exception as is,
// since they are handled elsewhere. See HDFS-6588.
if (t != null && t instanceof InvalidToken) {
final Throwable t1 = t.getCause();
if (t1 != null && t1 instanceof StandbyException) {
e = (StandbyException)t1;
}
}
} else {
if (t != null && t instanceof Exception) {
e = (Exception)t;
}
}
return e;
}
private @Context HttpServletResponse response;
@Override
public Response toResponse(Exception e) {
if (LOG.isTraceEnabled()) {
LOG.trace("GOT EXCEPITION", e);
}
//clear content type
response.setContentType(null);
//Convert exception
if (e instanceof ParamException) {
final ParamException paramexception = (ParamException)e;
e = new IllegalArgumentException("Invalid value for webhdfs parameter \""
+ paramexception.getParameterName() + "\": "
+ e.getCause().getMessage(), e);
}
if (e instanceof ContainerException) {
e = toCause(e);
}
if (e instanceof RemoteException) {
e = ((RemoteException)e).unwrapRemoteException();
}
if (e instanceof SecurityException) {
e = toCause(e);
}
//Map response status
final Response.Status s;
if (e instanceof SecurityException) {
s = Response.Status.FORBIDDEN;
} else if (e instanceof AuthorizationException) {
s = Response.Status.FORBIDDEN;
} else if (e instanceof FileNotFoundException) {
s = Response.Status.NOT_FOUND;
} else if (e instanceof IOException) {
s = Response.Status.FORBIDDEN;
} else if (e instanceof UnsupportedOperationException) {
s = Response.Status.BAD_REQUEST;
} else if (e instanceof IllegalArgumentException) {
s = Response.Status.BAD_REQUEST;
} else {
LOG.warn("INTERNAL_SERVER_ERROR", e);
s = Response.Status.INTERNAL_SERVER_ERROR;
}
final String js = JsonUtil.toJsonString(e);
return Response.status(s).type(MediaType.APPLICATION_JSON).entity(js).build();
}
@VisibleForTesting
public void initResponse(HttpServletResponse response) {
this.response = response;
}
}
| 4,346 | 33.776 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/TokenServiceParam.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.web.resources;
import org.apache.hadoop.hdfs.web.resources.StringParam;
public class TokenServiceParam extends StringParam {
/** Parameter name */
public static final String NAME = "service";
/** Default parameter value. */
public static final String DEFAULT = NULL;
private static final StringParam.Domain DOMAIN = new StringParam.Domain(NAME, null);
/**
* Constructor.
* @param str a string representation of the parameter value.
*/
public TokenServiceParam(final String str) {
super(DOMAIN, str == null || str.equals(DEFAULT)? null: str);
}
@Override
public String getName() {
return NAME;
}
}
| 1,481 | 31.933333 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/NamenodeAddressParam.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.web.resources;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
/** Namenode RPC address parameter. */
public class NamenodeAddressParam extends StringParam {
/** Parameter name. */
public static final String NAME = "namenoderpcaddress";
/** Default parameter value. */
public static final String DEFAULT = "";
private static final Domain DOMAIN = new Domain(NAME, null);
/**
* Constructor.
* @param str a string representation of the parameter value.
*/
public NamenodeAddressParam(final String str) {
super(DOMAIN, str == null || str.equals(DEFAULT)? null: DOMAIN.parse(str));
}
/**
* Construct an object using the RPC address of the given namenode.
*/
public NamenodeAddressParam(final NameNode namenode) {
super(DOMAIN, namenode.getTokenServiceName());
}
@Override
public String getName() {
return NAME;
}
}
| 1,717 | 33.36 | 79 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/UriFsPathParam.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.web.resources;
/** The FileSystem path parameter. */
public class UriFsPathParam extends StringParam {
/** Parameter name. */
public static final String NAME = "path";
private static final Domain DOMAIN = new Domain(NAME, null);
/**
* Constructor.
* @param str a string representation of the parameter value.
*/
public UriFsPathParam(String str) {
super(DOMAIN, str);
}
@Override
public String getName() {
return NAME;
}
/** @return the absolute path. */
public final String getAbsolutePath() {
final String path = getValue(); //The first / has been stripped out.
return path == null? null: "/" + path;
}
}
| 1,499 | 31.608696 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/TokenKindParam.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.web.resources;
public class TokenKindParam extends StringParam {
/** Parameter name */
public static final String NAME = "kind";
/** Default parameter value. */
public static final String DEFAULT = NULL;
private static final StringParam.Domain DOMAIN = new StringParam.Domain(NAME, null);
/**
* Constructor.
* @param str a string representation of the parameter value.
*/
public TokenKindParam(final String str) {
super(DOMAIN, str == null || str.equals(DEFAULT)? null: str);
}
@Override
public String getName() {
return NAME;
}
}
| 1,414 | 31.906977 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ClientMmap.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.shortcircuit;
import org.apache.hadoop.classification.InterfaceAudience;
import java.io.Closeable;
import java.nio.MappedByteBuffer;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
/**
* A reference to a memory-mapped region used by an HDFS client.
*/
@InterfaceAudience.Private
public class ClientMmap implements Closeable {
static final Log LOG = LogFactory.getLog(ClientMmap.class);
/**
* A reference to the block replica which this mmap relates to.
*/
private ShortCircuitReplica replica;
/**
* The java ByteBuffer object.
*/
private final MappedByteBuffer map;
/**
* Whether or not this ClientMmap anchors the replica into memory while
* it exists. Closing an anchored ClientMmap unanchors the replica.
*/
private final boolean anchored;
ClientMmap(ShortCircuitReplica replica, MappedByteBuffer map,
boolean anchored) {
this.replica = replica;
this.map = map;
this.anchored = anchored;
}
/**
* Close the ClientMmap object.
*/
@Override
public void close() {
if (replica != null) {
if (anchored) {
replica.removeNoChecksumAnchor();
}
replica.unref();
}
replica = null;
}
public MappedByteBuffer getMappedByteBuffer() {
return map;
}
}
| 2,147 | 27.64 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitReplica.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.shortcircuit;
import java.io.FileInputStream;
import java.io.IOException;
import java.nio.MappedByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.channels.FileChannel.MapMode;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.ExtendedBlockId;
import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.Slot;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.nativeio.NativeIO;
import org.apache.hadoop.util.Time;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
/**
* A ShortCircuitReplica object contains file descriptors for a block that
* we are reading via short-circuit local reads.
*
* The file descriptors can be shared between multiple threads because
* all the operations we perform are stateless-- i.e., we use pread
* instead of read, to avoid using the shared position state.
*/
@InterfaceAudience.Private
public class ShortCircuitReplica {
public static final Log LOG = LogFactory.getLog(ShortCircuitCache.class);
/**
* Identifies this ShortCircuitReplica object.
*/
final ExtendedBlockId key;
/**
* The block data input stream.
*/
private final FileInputStream dataStream;
/**
* The block metadata input stream.
*
* TODO: make this nullable if the file has no checksums on disk.
*/
private final FileInputStream metaStream;
/**
* Block metadata header.
*/
private final BlockMetadataHeader metaHeader;
/**
* The cache we belong to.
*/
private final ShortCircuitCache cache;
/**
* Monotonic time at which the replica was created.
*/
private final long creationTimeMs;
/**
* If non-null, the shared memory slot associated with this replica.
*/
private final Slot slot;
/**
* Current mmap state.
*
* Protected by the cache lock.
*/
Object mmapData;
/**
* True if this replica has been purged from the cache; false otherwise.
*
* Protected by the cache lock.
*/
boolean purged = false;
/**
* Number of external references to this replica. Replicas are referenced
* by the cache, BlockReaderLocal instances, and by ClientMmap instances.
* The number starts at 2 because when we create a replica, it is referenced
* by both the cache and the requester.
*
* Protected by the cache lock.
*/
int refCount = 2;
/**
* The monotonic time in nanoseconds at which the replica became evictable, or
* null if it is not evictable.
*
* Protected by the cache lock.
*/
private Long evictableTimeNs = null;
public ShortCircuitReplica(ExtendedBlockId key,
FileInputStream dataStream, FileInputStream metaStream,
ShortCircuitCache cache, long creationTimeMs, Slot slot) throws IOException {
this.key = key;
this.dataStream = dataStream;
this.metaStream = metaStream;
this.metaHeader =
BlockMetadataHeader.preadHeader(metaStream.getChannel());
if (metaHeader.getVersion() != 1) {
throw new IOException("invalid metadata header version " +
metaHeader.getVersion() + ". Can only handle version 1.");
}
this.cache = cache;
this.creationTimeMs = creationTimeMs;
this.slot = slot;
}
/**
* Decrement the reference count.
*/
public void unref() {
cache.unref(this);
}
/**
* Check if the replica is stale.
*
* Must be called with the cache lock held.
*/
boolean isStale() {
if (slot != null) {
// Check staleness by looking at the shared memory area we use to
// communicate with the DataNode.
boolean stale = !slot.isValid();
if (LOG.isTraceEnabled()) {
LOG.trace(this + ": checked shared memory segment. isStale=" + stale);
}
return stale;
} else {
// Fall back to old, time-based staleness method.
long deltaMs = Time.monotonicNow() - creationTimeMs;
long staleThresholdMs = cache.getStaleThresholdMs();
if (deltaMs > staleThresholdMs) {
if (LOG.isTraceEnabled()) {
LOG.trace(this + " is stale because it's " + deltaMs +
" ms old, and staleThresholdMs = " + staleThresholdMs);
}
return true;
} else {
if (LOG.isTraceEnabled()) {
LOG.trace(this + " is not stale because it's only " + deltaMs +
" ms old, and staleThresholdMs = " + staleThresholdMs);
}
return false;
}
}
}
/**
* Try to add a no-checksum anchor to our shared memory slot.
*
* It is only possible to add this anchor when the block is mlocked on the Datanode.
* The DataNode will not munlock the block until the number of no-checksum anchors
* for the block reaches zero.
*
* This method does not require any synchronization.
*
* @return True if we successfully added a no-checksum anchor.
*/
public boolean addNoChecksumAnchor() {
if (slot == null) {
return false;
}
boolean result = slot.addAnchor();
if (LOG.isTraceEnabled()) {
if (result) {
LOG.trace(this + ": added no-checksum anchor to slot " + slot);
} else {
LOG.trace(this + ": could not add no-checksum anchor to slot " + slot);
}
}
return result;
}
/**
* Remove a no-checksum anchor for our shared memory slot.
*
* This method does not require any synchronization.
*/
public void removeNoChecksumAnchor() {
if (slot != null) {
slot.removeAnchor();
}
}
/**
* Check if the replica has an associated mmap that has been fully loaded.
*
* Must be called with the cache lock held.
*/
@VisibleForTesting
public boolean hasMmap() {
return ((mmapData != null) && (mmapData instanceof MappedByteBuffer));
}
/**
* Free the mmap associated with this replica.
*
* Must be called with the cache lock held.
*/
void munmap() {
MappedByteBuffer mmap = (MappedByteBuffer)mmapData;
NativeIO.POSIX.munmap(mmap);
mmapData = null;
}
/**
* Close the replica.
*
* Must be called after there are no more references to the replica in the
* cache or elsewhere.
*/
void close() {
String suffix = "";
Preconditions.checkState(refCount == 0,
"tried to close replica with refCount %d: %s", refCount, this);
refCount = -1;
Preconditions.checkState(purged,
"tried to close unpurged replica %s", this);
if (hasMmap()) {
munmap();
if (LOG.isTraceEnabled()) {
suffix += " munmapped.";
}
}
IOUtils.cleanup(LOG, dataStream, metaStream);
if (slot != null) {
cache.scheduleSlotReleaser(slot);
if (LOG.isTraceEnabled()) {
suffix += " scheduling " + slot + " for later release.";
}
}
if (LOG.isTraceEnabled()) {
LOG.trace("closed " + this + suffix);
}
}
public FileInputStream getDataStream() {
return dataStream;
}
public FileInputStream getMetaStream() {
return metaStream;
}
public BlockMetadataHeader getMetaHeader() {
return metaHeader;
}
public ExtendedBlockId getKey() {
return key;
}
public ClientMmap getOrCreateClientMmap(boolean anchor) {
return cache.getOrCreateClientMmap(this, anchor);
}
MappedByteBuffer loadMmapInternal() {
try {
FileChannel channel = dataStream.getChannel();
MappedByteBuffer mmap = channel.map(MapMode.READ_ONLY, 0,
Math.min(Integer.MAX_VALUE, channel.size()));
if (LOG.isTraceEnabled()) {
LOG.trace(this + ": created mmap of size " + channel.size());
}
return mmap;
} catch (IOException e) {
LOG.warn(this + ": mmap error", e);
return null;
} catch (RuntimeException e) {
LOG.warn(this + ": mmap error", e);
return null;
}
}
/**
* Get the evictable time in nanoseconds.
*
* Note: you must hold the cache lock to call this function.
*
* @return the evictable time in nanoseconds.
*/
public Long getEvictableTimeNs() {
return evictableTimeNs;
}
/**
* Set the evictable time in nanoseconds.
*
* Note: you must hold the cache lock to call this function.
*
* @param evictableTimeNs The evictable time in nanoseconds, or null
* to set no evictable time.
*/
void setEvictableTimeNs(Long evictableTimeNs) {
this.evictableTimeNs = evictableTimeNs;
}
@VisibleForTesting
public Slot getSlot() {
return slot;
}
/**
* Convert the replica to a string for debugging purposes.
* Note that we can't take the lock here.
*/
@Override
public String toString() {
return new StringBuilder().append("ShortCircuitReplica{").
append("key=").append(key).
append(", metaHeader.version=").append(metaHeader.getVersion()).
append(", metaHeader.checksum=").append(metaHeader.getChecksum()).
append(", ident=").append("0x").
append(Integer.toHexString(System.identityHashCode(this))).
append(", creationTimeMs=").append(creationTimeMs).
append("}").toString();
}
}
| 10,096 | 27.848571 | 86 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShm.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.shortcircuit;
import java.io.FileInputStream;
import java.io.IOException;
import java.util.Iterator;
import org.apache.hadoop.hdfs.net.DomainPeer;
import org.apache.hadoop.hdfs.shortcircuit.DfsClientShmManager.EndpointShmManager;
import org.apache.hadoop.net.unix.DomainSocket;
import org.apache.hadoop.net.unix.DomainSocketWatcher;
import com.google.common.base.Preconditions;
/**
* DfsClientShm is a subclass of ShortCircuitShm which is used by the
* DfsClient.
* When the UNIX domain socket associated with this shared memory segment
* closes unexpectedly, we mark the slots inside this segment as disconnected.
* ShortCircuitReplica objects that contain disconnected slots are stale,
* and will not be used to service new reads or mmap operations.
* However, in-progress read or mmap operations will continue to proceed.
* Once the last slot is deallocated, the segment can be safely munmapped.
*
* Slots may also become stale because the associated replica has been deleted
* on the DataNode. In this case, the DataNode will clear the 'valid' bit.
* The client will then see these slots as stale (see
* #{ShortCircuitReplica#isStale}).
*/
public class DfsClientShm extends ShortCircuitShm
implements DomainSocketWatcher.Handler {
/**
* The EndpointShmManager associated with this shared memory segment.
*/
private final EndpointShmManager manager;
/**
* The UNIX domain socket associated with this DfsClientShm.
* We rely on the DomainSocketWatcher to close the socket associated with
* this DomainPeer when necessary.
*/
private final DomainPeer peer;
/**
* True if this shared memory segment has lost its connection to the
* DataNode.
*
* {@link DfsClientShm#handle} sets this to true.
*/
private boolean disconnected = false;
DfsClientShm(ShmId shmId, FileInputStream stream, EndpointShmManager manager,
DomainPeer peer) throws IOException {
super(shmId, stream);
this.manager = manager;
this.peer = peer;
}
public EndpointShmManager getEndpointShmManager() {
return manager;
}
public DomainPeer getPeer() {
return peer;
}
/**
* Determine if the shared memory segment is disconnected from the DataNode.
*
* This must be called with the DfsClientShmManager lock held.
*
* @return True if the shared memory segment is stale.
*/
public synchronized boolean isDisconnected() {
return disconnected;
}
/**
* Handle the closure of the UNIX domain socket associated with this shared
* memory segment by marking this segment as stale.
*
* If there are no slots associated with this shared memory segment, it will
* be freed immediately in this function.
*/
@Override
public boolean handle(DomainSocket sock) {
manager.unregisterShm(getShmId());
synchronized (this) {
Preconditions.checkState(!disconnected);
disconnected = true;
boolean hadSlots = false;
for (Iterator<Slot> iter = slotIterator(); iter.hasNext(); ) {
Slot slot = iter.next();
slot.makeInvalid();
hadSlots = true;
}
if (!hadSlots) {
free();
}
}
return true;
}
}
| 4,024 | 32.541667 | 82 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.shortcircuit;
import java.io.FileInputStream;
import java.io.IOException;
import java.lang.reflect.Field;
import java.util.BitSet;
import java.util.Iterator;
import java.util.NoSuchElementException;
import java.util.Random;
import org.apache.commons.lang.builder.EqualsBuilder;
import org.apache.commons.lang.builder.HashCodeBuilder;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.InvalidRequestException;
import org.apache.hadoop.hdfs.ExtendedBlockId;
import org.apache.hadoop.io.nativeio.NativeIO;
import org.apache.hadoop.io.nativeio.NativeIO.POSIX;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.util.StringUtils;
import sun.misc.Unsafe;
import com.google.common.base.Preconditions;
import com.google.common.collect.ComparisonChain;
import com.google.common.primitives.Ints;
/**
* A shared memory segment used to implement short-circuit reads.
*/
public class ShortCircuitShm {
private static final Log LOG = LogFactory.getLog(ShortCircuitShm.class);
protected static final int BYTES_PER_SLOT = 64;
private static final Unsafe unsafe = safetyDance();
private static Unsafe safetyDance() {
try {
Field f = Unsafe.class.getDeclaredField("theUnsafe");
f.setAccessible(true);
return (Unsafe)f.get(null);
} catch (Throwable e) {
LOG.error("failed to load misc.Unsafe", e);
}
return null;
}
/**
* Calculate the usable size of a shared memory segment.
* We round down to a multiple of the slot size and do some validation.
*
* @param stream The stream we're using.
* @return The usable size of the shared memory segment.
*/
private static int getUsableLength(FileInputStream stream)
throws IOException {
int intSize = Ints.checkedCast(stream.getChannel().size());
int slots = intSize / BYTES_PER_SLOT;
if (slots == 0) {
throw new IOException("size of shared memory segment was " +
intSize + ", but that is not enough to hold even one slot.");
}
return slots * BYTES_PER_SLOT;
}
/**
* Identifies a DfsClientShm.
*/
public static class ShmId implements Comparable<ShmId> {
private static final Random random = new Random();
private final long hi;
private final long lo;
/**
* Generate a random ShmId.
*
* We generate ShmIds randomly to prevent a malicious client from
* successfully guessing one and using that to interfere with another
* client.
*/
public static ShmId createRandom() {
return new ShmId(random.nextLong(), random.nextLong());
}
public ShmId(long hi, long lo) {
this.hi = hi;
this.lo = lo;
}
public long getHi() {
return hi;
}
public long getLo() {
return lo;
}
@Override
public boolean equals(Object o) {
if ((o == null) || (o.getClass() != this.getClass())) {
return false;
}
ShmId other = (ShmId)o;
return new EqualsBuilder().
append(hi, other.hi).
append(lo, other.lo).
isEquals();
}
@Override
public int hashCode() {
return new HashCodeBuilder().
append(this.hi).
append(this.lo).
toHashCode();
}
@Override
public String toString() {
return String.format("%016x%016x", hi, lo);
}
@Override
public int compareTo(ShmId other) {
return ComparisonChain.start().
compare(hi, other.hi).
compare(lo, other.lo).
result();
}
};
/**
* Uniquely identifies a slot.
*/
public static class SlotId {
private final ShmId shmId;
private final int slotIdx;
public SlotId(ShmId shmId, int slotIdx) {
this.shmId = shmId;
this.slotIdx = slotIdx;
}
public ShmId getShmId() {
return shmId;
}
public int getSlotIdx() {
return slotIdx;
}
@Override
public boolean equals(Object o) {
if ((o == null) || (o.getClass() != this.getClass())) {
return false;
}
SlotId other = (SlotId)o;
return new EqualsBuilder().
append(shmId, other.shmId).
append(slotIdx, other.slotIdx).
isEquals();
}
@Override
public int hashCode() {
return new HashCodeBuilder().
append(this.shmId).
append(this.slotIdx).
toHashCode();
}
@Override
public String toString() {
return String.format("SlotId(%s:%d)", shmId.toString(), slotIdx);
}
}
public class SlotIterator implements Iterator<Slot> {
int slotIdx = -1;
@Override
public boolean hasNext() {
synchronized (ShortCircuitShm.this) {
return allocatedSlots.nextSetBit(slotIdx + 1) != -1;
}
}
@Override
public Slot next() {
synchronized (ShortCircuitShm.this) {
int nextSlotIdx = allocatedSlots.nextSetBit(slotIdx + 1);
if (nextSlotIdx == -1) {
throw new NoSuchElementException();
}
slotIdx = nextSlotIdx;
return slots[nextSlotIdx];
}
}
@Override
public void remove() {
throw new UnsupportedOperationException("SlotIterator " +
"doesn't support removal");
}
}
/**
* A slot containing information about a replica.
*
* The format is:
* word 0
* bit 0:32 Slot flags (see below).
* bit 33:63 Anchor count.
* word 1:7
* Reserved for future use, such as statistics.
* Padding is also useful for avoiding false sharing.
*
* Little-endian versus big-endian is not relevant here since both the client
* and the server reside on the same computer and use the same orientation.
*/
public class Slot {
/**
* Flag indicating that the slot is valid.
*
* The DFSClient sets this flag when it allocates a new slot within one of
* its shared memory regions.
*
* The DataNode clears this flag when the replica associated with this slot
* is no longer valid. The client itself also clears this flag when it
* believes that the DataNode is no longer using this slot to communicate.
*/
private static final long VALID_FLAG = 1L<<63;
/**
* Flag indicating that the slot can be anchored.
*/
private static final long ANCHORABLE_FLAG = 1L<<62;
/**
* The slot address in memory.
*/
private final long slotAddress;
/**
* BlockId of the block this slot is used for.
*/
private final ExtendedBlockId blockId;
Slot(long slotAddress, ExtendedBlockId blockId) {
this.slotAddress = slotAddress;
this.blockId = blockId;
}
/**
* Get the short-circuit memory segment associated with this Slot.
*
* @return The enclosing short-circuit memory segment.
*/
public ShortCircuitShm getShm() {
return ShortCircuitShm.this;
}
/**
* Get the ExtendedBlockId associated with this slot.
*
* @return The ExtendedBlockId of this slot.
*/
public ExtendedBlockId getBlockId() {
return blockId;
}
/**
* Get the SlotId of this slot, containing both shmId and slotIdx.
*
* @return The SlotId of this slot.
*/
public SlotId getSlotId() {
return new SlotId(getShmId(), getSlotIdx());
}
/**
* Get the Slot index.
*
* @return The index of this slot.
*/
public int getSlotIdx() {
return Ints.checkedCast(
(slotAddress - baseAddress) / BYTES_PER_SLOT);
}
/**
* Clear the slot.
*/
void clear() {
unsafe.putLongVolatile(null, this.slotAddress, 0);
}
private boolean isSet(long flag) {
long prev = unsafe.getLongVolatile(null, this.slotAddress);
return (prev & flag) != 0;
}
private void setFlag(long flag) {
long prev;
do {
prev = unsafe.getLongVolatile(null, this.slotAddress);
if ((prev & flag) != 0) {
return;
}
} while (!unsafe.compareAndSwapLong(null, this.slotAddress,
prev, prev | flag));
}
private void clearFlag(long flag) {
long prev;
do {
prev = unsafe.getLongVolatile(null, this.slotAddress);
if ((prev & flag) == 0) {
return;
}
} while (!unsafe.compareAndSwapLong(null, this.slotAddress,
prev, prev & (~flag)));
}
public boolean isValid() {
return isSet(VALID_FLAG);
}
public void makeValid() {
setFlag(VALID_FLAG);
}
public void makeInvalid() {
clearFlag(VALID_FLAG);
}
public boolean isAnchorable() {
return isSet(ANCHORABLE_FLAG);
}
public void makeAnchorable() {
setFlag(ANCHORABLE_FLAG);
}
public void makeUnanchorable() {
clearFlag(ANCHORABLE_FLAG);
}
public boolean isAnchored() {
long prev = unsafe.getLongVolatile(null, this.slotAddress);
if ((prev & VALID_FLAG) == 0) {
// Slot is no longer valid.
return false;
}
return ((prev & 0x7fffffff) != 0);
}
/**
* Try to add an anchor for a given slot.
*
* When a slot is anchored, we know that the block it refers to is resident
* in memory.
*
* @return True if the slot is anchored.
*/
public boolean addAnchor() {
long prev;
do {
prev = unsafe.getLongVolatile(null, this.slotAddress);
if ((prev & VALID_FLAG) == 0) {
// Slot is no longer valid.
return false;
}
if ((prev & ANCHORABLE_FLAG) == 0) {
// Slot can't be anchored right now.
return false;
}
if ((prev & 0x7fffffff) == 0x7fffffff) {
// Too many other threads have anchored the slot (2 billion?)
return false;
}
} while (!unsafe.compareAndSwapLong(null, this.slotAddress,
prev, prev + 1));
return true;
}
/**
* Remove an anchor for a given slot.
*/
public void removeAnchor() {
long prev;
do {
prev = unsafe.getLongVolatile(null, this.slotAddress);
Preconditions.checkState((prev & 0x7fffffff) != 0,
"Tried to remove anchor for slot " + slotAddress +", which was " +
"not anchored.");
} while (!unsafe.compareAndSwapLong(null, this.slotAddress,
prev, prev - 1));
}
@Override
public String toString() {
return "Slot(slotIdx=" + getSlotIdx() + ", shm=" + getShm() + ")";
}
}
/**
* ID for this SharedMemorySegment.
*/
private final ShmId shmId;
/**
* The base address of the memory-mapped file.
*/
private final long baseAddress;
/**
* The mmapped length of the shared memory segment
*/
private final int mmappedLength;
/**
* The slots associated with this shared memory segment.
* slot[i] contains the slot at offset i * BYTES_PER_SLOT,
* or null if that slot is not allocated.
*/
private final Slot slots[];
/**
* A bitset where each bit represents a slot which is in use.
*/
private final BitSet allocatedSlots;
/**
* Create the ShortCircuitShm.
*
* @param shmId The ID to use.
* @param stream The stream that we're going to use to create this
* shared memory segment.
*
* Although this is a FileInputStream, we are going to
* assume that the underlying file descriptor is writable
* as well as readable. It would be more appropriate to use
* a RandomAccessFile here, but that class does not have
* any public accessor which returns a FileDescriptor,
* unlike FileInputStream.
*/
public ShortCircuitShm(ShmId shmId, FileInputStream stream)
throws IOException {
if (!NativeIO.isAvailable()) {
throw new UnsupportedOperationException("NativeIO is not available.");
}
if (Shell.WINDOWS) {
throw new UnsupportedOperationException(
"DfsClientShm is not yet implemented for Windows.");
}
if (unsafe == null) {
throw new UnsupportedOperationException(
"can't use DfsClientShm because we failed to " +
"load misc.Unsafe.");
}
this.shmId = shmId;
this.mmappedLength = getUsableLength(stream);
this.baseAddress = POSIX.mmap(stream.getFD(),
POSIX.MMAP_PROT_READ | POSIX.MMAP_PROT_WRITE, true, mmappedLength);
this.slots = new Slot[mmappedLength / BYTES_PER_SLOT];
this.allocatedSlots = new BitSet(slots.length);
if (LOG.isTraceEnabled()) {
LOG.trace("creating " + this.getClass().getSimpleName() +
"(shmId=" + shmId +
", mmappedLength=" + mmappedLength +
", baseAddress=" + String.format("%x", baseAddress) +
", slots.length=" + slots.length + ")");
}
}
public final ShmId getShmId() {
return shmId;
}
/**
* Determine if this shared memory object is empty.
*
* @return True if the shared memory object is empty.
*/
synchronized final public boolean isEmpty() {
return allocatedSlots.nextSetBit(0) == -1;
}
/**
* Determine if this shared memory object is full.
*
* @return True if the shared memory object is full.
*/
synchronized final public boolean isFull() {
return allocatedSlots.nextClearBit(0) >= slots.length;
}
/**
* Calculate the base address of a slot.
*
* @param slotIdx Index of the slot.
* @return The base address of the slot.
*/
private final long calculateSlotAddress(int slotIdx) {
long offset = slotIdx;
offset *= BYTES_PER_SLOT;
return this.baseAddress + offset;
}
/**
* Allocate a new slot and register it.
*
* This function chooses an empty slot, initializes it, and then returns
* the relevant Slot object.
*
* @return The new slot.
*/
synchronized public final Slot allocAndRegisterSlot(
ExtendedBlockId blockId) {
int idx = allocatedSlots.nextClearBit(0);
if (idx >= slots.length) {
throw new RuntimeException(this + ": no more slots are available.");
}
allocatedSlots.set(idx, true);
Slot slot = new Slot(calculateSlotAddress(idx), blockId);
slot.clear();
slot.makeValid();
slots[idx] = slot;
if (LOG.isTraceEnabled()) {
LOG.trace(this + ": allocAndRegisterSlot " + idx + ": allocatedSlots=" + allocatedSlots +
StringUtils.getStackTrace(Thread.currentThread()));
}
return slot;
}
synchronized public final Slot getSlot(int slotIdx)
throws InvalidRequestException {
if (!allocatedSlots.get(slotIdx)) {
throw new InvalidRequestException(this + ": slot " + slotIdx +
" does not exist.");
}
return slots[slotIdx];
}
/**
* Register a slot.
*
* This function looks at a slot which has already been initialized (by
* another process), and registers it with us. Then, it returns the
* relevant Slot object.
*
* @return The slot.
*
* @throws InvalidRequestException
* If the slot index we're trying to allocate has not been
* initialized, or is already in use.
*/
synchronized public final Slot registerSlot(int slotIdx,
ExtendedBlockId blockId) throws InvalidRequestException {
if (slotIdx < 0) {
throw new InvalidRequestException(this + ": invalid negative slot " +
"index " + slotIdx);
}
if (slotIdx >= slots.length) {
throw new InvalidRequestException(this + ": invalid slot " +
"index " + slotIdx);
}
if (allocatedSlots.get(slotIdx)) {
throw new InvalidRequestException(this + ": slot " + slotIdx +
" is already in use.");
}
Slot slot = new Slot(calculateSlotAddress(slotIdx), blockId);
if (!slot.isValid()) {
throw new InvalidRequestException(this + ": slot " + slotIdx +
" is not marked as valid.");
}
slots[slotIdx] = slot;
allocatedSlots.set(slotIdx, true);
if (LOG.isTraceEnabled()) {
LOG.trace(this + ": registerSlot " + slotIdx + ": allocatedSlots=" + allocatedSlots +
StringUtils.getStackTrace(Thread.currentThread()));
}
return slot;
}
/**
* Unregisters a slot.
*
* This doesn't alter the contents of the slot. It just means
*
* @param slotIdx Index of the slot to unregister.
*/
synchronized public final void unregisterSlot(int slotIdx) {
Preconditions.checkState(allocatedSlots.get(slotIdx),
"tried to unregister slot " + slotIdx + ", which was not registered.");
allocatedSlots.set(slotIdx, false);
slots[slotIdx] = null;
if (LOG.isTraceEnabled()) {
LOG.trace(this + ": unregisterSlot " + slotIdx);
}
}
/**
* Iterate over all allocated slots.
*
* Note that this method isn't safe if
*
* @return The slot iterator.
*/
public SlotIterator slotIterator() {
return new SlotIterator();
}
public void free() {
try {
POSIX.munmap(baseAddress, mmappedLength);
} catch (IOException e) {
LOG.warn(this + ": failed to munmap", e);
}
LOG.trace(this + ": freed");
}
@Override
public String toString() {
return this.getClass().getSimpleName() + "(" + shmId + ")";
}
}
| 18,338 | 27.344668 | 95 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DomainSocketFactory.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.shortcircuit;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.concurrent.TimeUnit;
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.io.IOUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.client.impl.DfsClientConf.ShortCircuitConf;
import org.apache.hadoop.net.unix.DomainSocket;
import org.apache.hadoop.util.PerformanceAdvisory;
import com.google.common.base.Preconditions;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
public class DomainSocketFactory {
private static final Log LOG = LogFactory.getLog(DomainSocketFactory.class);
public enum PathState {
UNUSABLE(false, false),
SHORT_CIRCUIT_DISABLED(true, false),
VALID(true, true);
PathState(boolean usableForDataTransfer, boolean usableForShortCircuit) {
this.usableForDataTransfer = usableForDataTransfer;
this.usableForShortCircuit = usableForShortCircuit;
}
public boolean getUsableForDataTransfer() {
return usableForDataTransfer;
}
public boolean getUsableForShortCircuit() {
return usableForShortCircuit;
}
private final boolean usableForDataTransfer;
private final boolean usableForShortCircuit;
}
public static class PathInfo {
private final static PathInfo NOT_CONFIGURED =
new PathInfo("", PathState.UNUSABLE);
final private String path;
final private PathState state;
PathInfo(String path, PathState state) {
this.path = path;
this.state = state;
}
public String getPath() {
return path;
}
public PathState getPathState() {
return state;
}
@Override
public String toString() {
return new StringBuilder().append("PathInfo{path=").append(path).
append(", state=").append(state).append("}").toString();
}
}
/**
* Information about domain socket paths.
*/
final Cache<String, PathState> pathMap =
CacheBuilder.newBuilder()
.expireAfterWrite(10, TimeUnit.MINUTES)
.build();
public DomainSocketFactory(ShortCircuitConf conf) {
final String feature;
if (conf.isShortCircuitLocalReads() && (!conf.isUseLegacyBlockReaderLocal())) {
feature = "The short-circuit local reads feature";
} else if (conf.isDomainSocketDataTraffic()) {
feature = "UNIX domain socket data traffic";
} else {
feature = null;
}
if (feature == null) {
PerformanceAdvisory.LOG.debug(
"Both short-circuit local reads and UNIX domain socket are disabled.");
} else {
if (conf.getDomainSocketPath().isEmpty()) {
throw new HadoopIllegalArgumentException(feature + " is enabled but "
+ DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY + " is not set.");
} else if (DomainSocket.getLoadingFailureReason() != null) {
LOG.warn(feature + " cannot be used because "
+ DomainSocket.getLoadingFailureReason());
} else {
LOG.debug(feature + " is enabled.");
}
}
}
/**
* Get information about a domain socket path.
*
* @param addr The inet address to use.
* @param conf The client configuration.
*
* @return Information about the socket path.
*/
public PathInfo getPathInfo(InetSocketAddress addr, ShortCircuitConf conf) {
// If there is no domain socket path configured, we can't use domain
// sockets.
if (conf.getDomainSocketPath().isEmpty()) return PathInfo.NOT_CONFIGURED;
// If we can't do anything with the domain socket, don't create it.
if (!conf.isDomainSocketDataTraffic() &&
(!conf.isShortCircuitLocalReads() || conf.isUseLegacyBlockReaderLocal())) {
return PathInfo.NOT_CONFIGURED;
}
// If the DomainSocket code is not loaded, we can't create
// DomainSocket objects.
if (DomainSocket.getLoadingFailureReason() != null) {
return PathInfo.NOT_CONFIGURED;
}
// UNIX domain sockets can only be used to talk to local peers
if (!DFSClient.isLocalAddress(addr)) return PathInfo.NOT_CONFIGURED;
String escapedPath = DomainSocket.getEffectivePath(
conf.getDomainSocketPath(), addr.getPort());
PathState status = pathMap.getIfPresent(escapedPath);
if (status == null) {
return new PathInfo(escapedPath, PathState.VALID);
} else {
return new PathInfo(escapedPath, status);
}
}
public DomainSocket createSocket(PathInfo info, int socketTimeout) {
Preconditions.checkArgument(info.getPathState() != PathState.UNUSABLE);
boolean success = false;
DomainSocket sock = null;
try {
sock = DomainSocket.connect(info.getPath());
sock.setAttribute(DomainSocket.RECEIVE_TIMEOUT, socketTimeout);
success = true;
} catch (IOException e) {
LOG.warn("error creating DomainSocket", e);
// fall through
} finally {
if (!success) {
if (sock != null) {
IOUtils.closeQuietly(sock);
}
pathMap.put(info.getPath(), PathState.UNUSABLE);
sock = null;
}
}
return sock;
}
public void disableShortCircuitForPath(String path) {
pathMap.put(path, PathState.SHORT_CIRCUIT_DISABLED);
}
public void disableDomainSocketPath(String path) {
pathMap.put(path, PathState.UNUSABLE);
}
@VisibleForTesting
public void clearPathMap() {
pathMap.invalidateAll();
}
}
| 6,471 | 32.189744 | 83 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitReplicaInfo.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.shortcircuit;
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
public final class ShortCircuitReplicaInfo {
private final ShortCircuitReplica replica;
private final InvalidToken exc;
public ShortCircuitReplicaInfo() {
this.replica = null;
this.exc = null;
}
public ShortCircuitReplicaInfo(ShortCircuitReplica replica) {
this.replica = replica;
this.exc = null;
}
public ShortCircuitReplicaInfo(InvalidToken exc) {
this.replica = null;
this.exc = exc;
}
public ShortCircuitReplica getReplica() {
return replica;
}
public InvalidToken getInvalidTokenException() {
return exc;
}
public String toString() {
StringBuilder builder = new StringBuilder();
String prefix = "";
builder.append("ShortCircuitReplicaInfo{");
if (replica != null) {
builder.append(prefix).append(replica);
prefix = ", ";
}
if (exc != null) {
builder.append(prefix).append(exc);
prefix = ", ";
}
builder.append("}");
return builder.toString();
}
}
| 1,904 | 28.765625 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.shortcircuit;
import java.io.BufferedOutputStream;
import java.io.Closeable;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.nio.MappedByteBuffer;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
import java.util.TreeMap;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.ReentrantLock;
import org.apache.commons.lang.mutable.MutableBoolean;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.ExtendedBlockId;
import org.apache.hadoop.hdfs.client.impl.DfsClientConf.ShortCircuitConf;
import org.apache.hadoop.hdfs.net.DomainPeer;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
import org.apache.hadoop.hdfs.protocolPB.PBHelper;
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.Slot;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ipc.RetriableException;
import org.apache.hadoop.net.unix.DomainSocket;
import org.apache.hadoop.net.unix.DomainSocketWatcher;
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.Waitable;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
/**
* The ShortCircuitCache tracks things which the client needs to access
* HDFS block files via short-circuit.
*
* These things include: memory-mapped regions, file descriptors, and shared
* memory areas for communicating with the DataNode.
*/
@InterfaceAudience.Private
public class ShortCircuitCache implements Closeable {
public static final Log LOG = LogFactory.getLog(ShortCircuitCache.class);
/**
* Expiry thread which makes sure that the file descriptors get closed
* after a while.
*/
private class CacheCleaner implements Runnable, Closeable {
private ScheduledFuture<?> future;
/**
* Run the CacheCleaner thread.
*
* Whenever a thread requests a ShortCircuitReplica object, we will make
* sure it gets one. That ShortCircuitReplica object can then be re-used
* when another thread requests a ShortCircuitReplica object for the same
* block. So in that sense, there is no maximum size to the cache.
*
* However, when a ShortCircuitReplica object is unreferenced by the
* thread(s) that are using it, it becomes evictable. There are two
* separate eviction lists-- one for mmaped objects, and another for
* non-mmaped objects. We do this in order to avoid having the regular
* files kick the mmaped files out of the cache too quickly. Reusing
* an already-existing mmap gives a huge performance boost, since the
* page table entries don't have to be re-populated. Both the mmap
* and non-mmap evictable lists have maximum sizes and maximum lifespans.
*/
@Override
public void run() {
ShortCircuitCache.this.lock.lock();
try {
if (ShortCircuitCache.this.closed) return;
long curMs = Time.monotonicNow();
if (LOG.isDebugEnabled()) {
LOG.debug(this + ": cache cleaner running at " + curMs);
}
int numDemoted = demoteOldEvictableMmaped(curMs);
int numPurged = 0;
Long evictionTimeNs = Long.valueOf(0);
while (true) {
Entry<Long, ShortCircuitReplica> entry =
evictable.ceilingEntry(evictionTimeNs);
if (entry == null) break;
evictionTimeNs = entry.getKey();
long evictionTimeMs =
TimeUnit.MILLISECONDS.convert(evictionTimeNs, TimeUnit.NANOSECONDS);
if (evictionTimeMs + maxNonMmappedEvictableLifespanMs >= curMs) break;
ShortCircuitReplica replica = entry.getValue();
if (LOG.isTraceEnabled()) {
LOG.trace("CacheCleaner: purging " + replica + ": " +
StringUtils.getStackTrace(Thread.currentThread()));
}
purge(replica);
numPurged++;
}
if (LOG.isDebugEnabled()) {
LOG.debug(this + ": finishing cache cleaner run started at " +
curMs + ". Demoted " + numDemoted + " mmapped replicas; " +
"purged " + numPurged + " replicas.");
}
} finally {
ShortCircuitCache.this.lock.unlock();
}
}
@Override
public void close() throws IOException {
if (future != null) {
future.cancel(false);
}
}
public void setFuture(ScheduledFuture<?> future) {
this.future = future;
}
/**
* Get the rate at which this cleaner thread should be scheduled.
*
* We do this by taking the minimum expiration time and dividing by 4.
*
* @return the rate in milliseconds at which this thread should be
* scheduled.
*/
public long getRateInMs() {
long minLifespanMs =
Math.min(maxNonMmappedEvictableLifespanMs,
maxEvictableMmapedLifespanMs);
long sampleTimeMs = minLifespanMs / 4;
return (sampleTimeMs < 1) ? 1 : sampleTimeMs;
}
}
/**
* A task which asks the DataNode to release a short-circuit shared memory
* slot. If successful, this will tell the DataNode to stop monitoring
* changes to the mlock status of the replica associated with the slot.
* It will also allow us (the client) to re-use this slot for another
* replica. If we can't communicate with the DataNode for some reason,
* we tear down the shared memory segment to avoid being in an inconsistent
* state.
*/
private class SlotReleaser implements Runnable {
/**
* The slot that we need to release.
*/
private final Slot slot;
SlotReleaser(Slot slot) {
this.slot = slot;
}
@Override
public void run() {
if (LOG.isTraceEnabled()) {
LOG.trace(ShortCircuitCache.this + ": about to release " + slot);
}
final DfsClientShm shm = (DfsClientShm)slot.getShm();
final DomainSocket shmSock = shm.getPeer().getDomainSocket();
DomainSocket sock = null;
DataOutputStream out = null;
final String path = shmSock.getPath();
boolean success = false;
try {
sock = DomainSocket.connect(path);
out = new DataOutputStream(
new BufferedOutputStream(sock.getOutputStream()));
new Sender(out).releaseShortCircuitFds(slot.getSlotId());
DataInputStream in = new DataInputStream(sock.getInputStream());
ReleaseShortCircuitAccessResponseProto resp =
ReleaseShortCircuitAccessResponseProto.parseFrom(
PBHelper.vintPrefixed(in));
if (resp.getStatus() != Status.SUCCESS) {
String error = resp.hasError() ? resp.getError() : "(unknown)";
throw new IOException(resp.getStatus().toString() + ": " + error);
}
if (LOG.isTraceEnabled()) {
LOG.trace(ShortCircuitCache.this + ": released " + slot);
}
success = true;
} catch (IOException e) {
LOG.error(ShortCircuitCache.this + ": failed to release " +
"short-circuit shared memory slot " + slot + " by sending " +
"ReleaseShortCircuitAccessRequestProto to " + path +
". Closing shared memory segment.", e);
} finally {
if (success) {
shmManager.freeSlot(slot);
} else {
shm.getEndpointShmManager().shutdown(shm);
}
IOUtils.cleanup(LOG, sock, out);
}
}
}
public interface ShortCircuitReplicaCreator {
/**
* Attempt to create a ShortCircuitReplica object.
*
* This callback will be made without holding any locks.
*
* @return a non-null ShortCircuitReplicaInfo object.
*/
ShortCircuitReplicaInfo createShortCircuitReplicaInfo();
}
/**
* Lock protecting the cache.
*/
private final ReentrantLock lock = new ReentrantLock();
/**
* The executor service that runs the cacheCleaner.
*/
private final ScheduledThreadPoolExecutor cleanerExecutor
= new ScheduledThreadPoolExecutor(1, new ThreadFactoryBuilder().
setDaemon(true).setNameFormat("ShortCircuitCache_Cleaner").
build());
/**
* The executor service that runs the cacheCleaner.
*/
private final ScheduledThreadPoolExecutor releaserExecutor
= new ScheduledThreadPoolExecutor(1, new ThreadFactoryBuilder().
setDaemon(true).setNameFormat("ShortCircuitCache_SlotReleaser").
build());
/**
* A map containing all ShortCircuitReplicaInfo objects, organized by Key.
* ShortCircuitReplicaInfo objects may contain a replica, or an InvalidToken
* exception.
*/
private final HashMap<ExtendedBlockId, Waitable<ShortCircuitReplicaInfo>>
replicaInfoMap = new HashMap<ExtendedBlockId,
Waitable<ShortCircuitReplicaInfo>>();
/**
* The CacheCleaner. We don't create this and schedule it until it becomes
* necessary.
*/
private CacheCleaner cacheCleaner;
/**
* Tree of evictable elements.
*
* Maps (unique) insertion time in nanoseconds to the element.
*/
private final TreeMap<Long, ShortCircuitReplica> evictable =
new TreeMap<Long, ShortCircuitReplica>();
/**
* Maximum total size of the cache, including both mmapped and
* no$-mmapped elements.
*/
private final int maxTotalSize;
/**
* Non-mmaped elements older than this will be closed.
*/
private long maxNonMmappedEvictableLifespanMs;
/**
* Tree of mmaped evictable elements.
*
* Maps (unique) insertion time in nanoseconds to the element.
*/
private final TreeMap<Long, ShortCircuitReplica> evictableMmapped =
new TreeMap<Long, ShortCircuitReplica>();
/**
* Maximum number of mmaped evictable elements.
*/
private int maxEvictableMmapedSize;
/**
* Mmaped elements older than this will be closed.
*/
private final long maxEvictableMmapedLifespanMs;
/**
* The minimum number of milliseconds we'll wait after an unsuccessful
* mmap attempt before trying again.
*/
private final long mmapRetryTimeoutMs;
/**
* How long we will keep replicas in the cache before declaring them
* to be stale.
*/
private final long staleThresholdMs;
/**
* True if the ShortCircuitCache is closed.
*/
private boolean closed = false;
/**
* Number of existing mmaps associated with this cache.
*/
private int outstandingMmapCount = 0;
/**
* Manages short-circuit shared memory segments for the client.
*/
private final DfsClientShmManager shmManager;
public static ShortCircuitCache fromConf(ShortCircuitConf conf) {
return new ShortCircuitCache(
conf.getShortCircuitStreamsCacheSize(),
conf.getShortCircuitStreamsCacheExpiryMs(),
conf.getShortCircuitMmapCacheSize(),
conf.getShortCircuitMmapCacheExpiryMs(),
conf.getShortCircuitMmapCacheRetryTimeout(),
conf.getShortCircuitCacheStaleThresholdMs(),
conf.getShortCircuitSharedMemoryWatcherInterruptCheckMs());
}
public ShortCircuitCache(int maxTotalSize, long maxNonMmappedEvictableLifespanMs,
int maxEvictableMmapedSize, long maxEvictableMmapedLifespanMs,
long mmapRetryTimeoutMs, long staleThresholdMs, int shmInterruptCheckMs) {
Preconditions.checkArgument(maxTotalSize >= 0);
this.maxTotalSize = maxTotalSize;
Preconditions.checkArgument(maxNonMmappedEvictableLifespanMs >= 0);
this.maxNonMmappedEvictableLifespanMs = maxNonMmappedEvictableLifespanMs;
Preconditions.checkArgument(maxEvictableMmapedSize >= 0);
this.maxEvictableMmapedSize = maxEvictableMmapedSize;
Preconditions.checkArgument(maxEvictableMmapedLifespanMs >= 0);
this.maxEvictableMmapedLifespanMs = maxEvictableMmapedLifespanMs;
this.mmapRetryTimeoutMs = mmapRetryTimeoutMs;
this.staleThresholdMs = staleThresholdMs;
DfsClientShmManager shmManager = null;
if ((shmInterruptCheckMs > 0) &&
(DomainSocketWatcher.getLoadingFailureReason() == null)) {
try {
shmManager = new DfsClientShmManager(shmInterruptCheckMs);
} catch (IOException e) {
LOG.error("failed to create ShortCircuitShmManager", e);
}
}
this.shmManager = shmManager;
}
public long getStaleThresholdMs() {
return staleThresholdMs;
}
/**
* Increment the reference count of a replica, and remove it from any free
* list it may be in.
*
* You must hold the cache lock while calling this function.
*
* @param replica The replica we're removing.
*/
private void ref(ShortCircuitReplica replica) {
lock.lock();
try {
Preconditions.checkArgument(replica.refCount > 0,
"can't ref %s because its refCount reached %d", replica,
replica.refCount);
Long evictableTimeNs = replica.getEvictableTimeNs();
replica.refCount++;
if (evictableTimeNs != null) {
String removedFrom = removeEvictable(replica);
if (LOG.isTraceEnabled()) {
LOG.trace(this + ": " + removedFrom +
" no longer contains " + replica + ". refCount " +
(replica.refCount - 1) + " -> " + replica.refCount +
StringUtils.getStackTrace(Thread.currentThread()));
}
} else if (LOG.isTraceEnabled()) {
LOG.trace(this + ": replica refCount " +
(replica.refCount - 1) + " -> " + replica.refCount +
StringUtils.getStackTrace(Thread.currentThread()));
}
} finally {
lock.unlock();
}
}
/**
* Unreference a replica.
*
* You must hold the cache lock while calling this function.
*
* @param replica The replica being unreferenced.
*/
void unref(ShortCircuitReplica replica) {
lock.lock();
try {
// If the replica is stale or unusable, but we haven't purged it yet,
// let's do that. It would be a shame to evict a non-stale replica so
// that we could put a stale or unusable one into the cache.
if (!replica.purged) {
String purgeReason = null;
if (!replica.getDataStream().getChannel().isOpen()) {
purgeReason = "purging replica because its data channel is closed.";
} else if (!replica.getMetaStream().getChannel().isOpen()) {
purgeReason = "purging replica because its meta channel is closed.";
} else if (replica.isStale()) {
purgeReason = "purging replica because it is stale.";
}
if (purgeReason != null) {
if (LOG.isDebugEnabled()) {
LOG.debug(this + ": " + purgeReason);
}
purge(replica);
}
}
String addedString = "";
boolean shouldTrimEvictionMaps = false;
int newRefCount = --replica.refCount;
if (newRefCount == 0) {
// Close replica, since there are no remaining references to it.
Preconditions.checkArgument(replica.purged,
"Replica %s reached a refCount of 0 without being purged", replica);
replica.close();
} else if (newRefCount == 1) {
Preconditions.checkState(null == replica.getEvictableTimeNs(),
"Replica %s had a refCount higher than 1, " +
"but was still evictable (evictableTimeNs = %d)",
replica, replica.getEvictableTimeNs());
if (!replica.purged) {
// Add the replica to the end of an eviction list.
// Eviction lists are sorted by time.
if (replica.hasMmap()) {
insertEvictable(System.nanoTime(), replica, evictableMmapped);
addedString = "added to evictableMmapped, ";
} else {
insertEvictable(System.nanoTime(), replica, evictable);
addedString = "added to evictable, ";
}
shouldTrimEvictionMaps = true;
}
} else {
Preconditions.checkArgument(replica.refCount >= 0,
"replica's refCount went negative (refCount = %d" +
" for %s)", replica.refCount, replica);
}
if (LOG.isTraceEnabled()) {
LOG.trace(this + ": unref replica " + replica +
": " + addedString + " refCount " +
(newRefCount + 1) + " -> " + newRefCount +
StringUtils.getStackTrace(Thread.currentThread()));
}
if (shouldTrimEvictionMaps) {
trimEvictionMaps();
}
} finally {
lock.unlock();
}
}
/**
* Demote old evictable mmaps into the regular eviction map.
*
* You must hold the cache lock while calling this function.
*
* @param now Current time in monotonic milliseconds.
* @return Number of replicas demoted.
*/
private int demoteOldEvictableMmaped(long now) {
int numDemoted = 0;
boolean needMoreSpace = false;
Long evictionTimeNs = Long.valueOf(0);
while (true) {
Entry<Long, ShortCircuitReplica> entry =
evictableMmapped.ceilingEntry(evictionTimeNs);
if (entry == null) break;
evictionTimeNs = entry.getKey();
long evictionTimeMs =
TimeUnit.MILLISECONDS.convert(evictionTimeNs, TimeUnit.NANOSECONDS);
if (evictionTimeMs + maxEvictableMmapedLifespanMs >= now) {
if (evictableMmapped.size() < maxEvictableMmapedSize) {
break;
}
needMoreSpace = true;
}
ShortCircuitReplica replica = entry.getValue();
if (LOG.isTraceEnabled()) {
String rationale = needMoreSpace ? "because we need more space" :
"because it's too old";
LOG.trace("demoteOldEvictable: demoting " + replica + ": " +
rationale + ": " +
StringUtils.getStackTrace(Thread.currentThread()));
}
removeEvictable(replica, evictableMmapped);
munmap(replica);
insertEvictable(evictionTimeNs, replica, evictable);
numDemoted++;
}
return numDemoted;
}
/**
* Trim the eviction lists.
*/
private void trimEvictionMaps() {
long now = Time.monotonicNow();
demoteOldEvictableMmaped(now);
while (true) {
long evictableSize = evictable.size();
long evictableMmappedSize = evictableMmapped.size();
if (evictableSize + evictableMmappedSize <= maxTotalSize) {
return;
}
ShortCircuitReplica replica;
if (evictableSize == 0) {
replica = evictableMmapped.firstEntry().getValue();
} else {
replica = evictable.firstEntry().getValue();
}
if (LOG.isTraceEnabled()) {
LOG.trace(this + ": trimEvictionMaps is purging " + replica +
StringUtils.getStackTrace(Thread.currentThread()));
}
purge(replica);
}
}
/**
* Munmap a replica, updating outstandingMmapCount.
*
* @param replica The replica to munmap.
*/
private void munmap(ShortCircuitReplica replica) {
replica.munmap();
outstandingMmapCount--;
}
/**
* Remove a replica from an evictable map.
*
* @param replica The replica to remove.
* @return The map it was removed from.
*/
private String removeEvictable(ShortCircuitReplica replica) {
if (replica.hasMmap()) {
removeEvictable(replica, evictableMmapped);
return "evictableMmapped";
} else {
removeEvictable(replica, evictable);
return "evictable";
}
}
/**
* Remove a replica from an evictable map.
*
* @param replica The replica to remove.
* @param map The map to remove it from.
*/
private void removeEvictable(ShortCircuitReplica replica,
TreeMap<Long, ShortCircuitReplica> map) {
Long evictableTimeNs = replica.getEvictableTimeNs();
Preconditions.checkNotNull(evictableTimeNs);
ShortCircuitReplica removed = map.remove(evictableTimeNs);
Preconditions.checkState(removed == replica,
"failed to make %s unevictable", replica);
replica.setEvictableTimeNs(null);
}
/**
* Insert a replica into an evictable map.
*
* If an element already exists with this eviction time, we add a nanosecond
* to it until we find an unused key.
*
* @param evictionTimeNs The eviction time in absolute nanoseconds.
* @param replica The replica to insert.
* @param map The map to insert it into.
*/
private void insertEvictable(Long evictionTimeNs,
ShortCircuitReplica replica, TreeMap<Long, ShortCircuitReplica> map) {
while (map.containsKey(evictionTimeNs)) {
evictionTimeNs++;
}
Preconditions.checkState(null == replica.getEvictableTimeNs());
replica.setEvictableTimeNs(evictionTimeNs);
map.put(evictionTimeNs, replica);
}
/**
* Purge a replica from the cache.
*
* This doesn't necessarily close the replica, since there may be
* outstanding references to it. However, it does mean the cache won't
* hand it out to anyone after this.
*
* You must hold the cache lock while calling this function.
*
* @param replica The replica being removed.
*/
private void purge(ShortCircuitReplica replica) {
boolean removedFromInfoMap = false;
String evictionMapName = null;
Preconditions.checkArgument(!replica.purged);
replica.purged = true;
Waitable<ShortCircuitReplicaInfo> val = replicaInfoMap.get(replica.key);
if (val != null) {
ShortCircuitReplicaInfo info = val.getVal();
if ((info != null) && (info.getReplica() == replica)) {
replicaInfoMap.remove(replica.key);
removedFromInfoMap = true;
}
}
Long evictableTimeNs = replica.getEvictableTimeNs();
if (evictableTimeNs != null) {
evictionMapName = removeEvictable(replica);
}
if (LOG.isTraceEnabled()) {
StringBuilder builder = new StringBuilder();
builder.append(this).append(": ").append(": purged ").
append(replica).append(" from the cache.");
if (removedFromInfoMap) {
builder.append(" Removed from the replicaInfoMap.");
}
if (evictionMapName != null) {
builder.append(" Removed from ").append(evictionMapName);
}
LOG.trace(builder.toString());
}
unref(replica);
}
/**
* Fetch or create a replica.
*
* You must hold the cache lock while calling this function.
*
* @param key Key to use for lookup.
* @param creator Replica creator callback. Will be called without
* the cache lock being held.
*
* @return Null if no replica could be found or created.
* The replica, otherwise.
*/
public ShortCircuitReplicaInfo fetchOrCreate(ExtendedBlockId key,
ShortCircuitReplicaCreator creator) {
Waitable<ShortCircuitReplicaInfo> newWaitable = null;
lock.lock();
try {
ShortCircuitReplicaInfo info = null;
do {
if (closed) {
if (LOG.isTraceEnabled()) {
LOG.trace(this + ": can't fetchOrCreate " + key +
" because the cache is closed.");
}
return null;
}
Waitable<ShortCircuitReplicaInfo> waitable = replicaInfoMap.get(key);
if (waitable != null) {
try {
info = fetch(key, waitable);
} catch (RetriableException e) {
if (LOG.isDebugEnabled()) {
LOG.debug(this + ": retrying " + e.getMessage());
}
continue;
}
}
} while (false);
if (info != null) return info;
// We need to load the replica ourselves.
newWaitable = new Waitable<ShortCircuitReplicaInfo>(lock.newCondition());
replicaInfoMap.put(key, newWaitable);
} finally {
lock.unlock();
}
return create(key, creator, newWaitable);
}
/**
* Fetch an existing ReplicaInfo object.
*
* @param key The key that we're using.
* @param waitable The waitable object to wait on.
* @return The existing ReplicaInfo object, or null if there is
* none.
*
* @throws RetriableException If the caller needs to retry.
*/
private ShortCircuitReplicaInfo fetch(ExtendedBlockId key,
Waitable<ShortCircuitReplicaInfo> waitable) throws RetriableException {
// Another thread is already in the process of loading this
// ShortCircuitReplica. So we simply wait for it to complete.
ShortCircuitReplicaInfo info;
try {
if (LOG.isTraceEnabled()) {
LOG.trace(this + ": found waitable for " + key);
}
info = waitable.await();
} catch (InterruptedException e) {
LOG.info(this + ": interrupted while waiting for " + key);
Thread.currentThread().interrupt();
throw new RetriableException("interrupted");
}
if (info.getInvalidTokenException() != null) {
LOG.info(this + ": could not get " + key + " due to InvalidToken " +
"exception.", info.getInvalidTokenException());
return info;
}
ShortCircuitReplica replica = info.getReplica();
if (replica == null) {
LOG.warn(this + ": failed to get " + key);
return info;
}
if (replica.purged) {
// Ignore replicas that have already been purged from the cache.
throw new RetriableException("Ignoring purged replica " +
replica + ". Retrying.");
}
// Check if the replica is stale before using it.
// If it is, purge it and retry.
if (replica.isStale()) {
LOG.info(this + ": got stale replica " + replica + ". Removing " +
"this replica from the replicaInfoMap and retrying.");
// Remove the cache's reference to the replica. This may or may not
// trigger a close.
purge(replica);
throw new RetriableException("ignoring stale replica " + replica);
}
ref(replica);
return info;
}
private ShortCircuitReplicaInfo create(ExtendedBlockId key,
ShortCircuitReplicaCreator creator,
Waitable<ShortCircuitReplicaInfo> newWaitable) {
// Handle loading a new replica.
ShortCircuitReplicaInfo info = null;
try {
if (LOG.isTraceEnabled()) {
LOG.trace(this + ": loading " + key);
}
info = creator.createShortCircuitReplicaInfo();
} catch (RuntimeException e) {
LOG.warn(this + ": failed to load " + key, e);
}
if (info == null) info = new ShortCircuitReplicaInfo();
lock.lock();
try {
if (info.getReplica() != null) {
// On success, make sure the cache cleaner thread is running.
if (LOG.isTraceEnabled()) {
LOG.trace(this + ": successfully loaded " + info.getReplica());
}
startCacheCleanerThreadIfNeeded();
// Note: new ShortCircuitReplicas start with a refCount of 2,
// indicating that both this cache and whoever requested the
// creation of the replica hold a reference. So we don't need
// to increment the reference count here.
} else {
// On failure, remove the waitable from the replicaInfoMap.
Waitable<ShortCircuitReplicaInfo> waitableInMap = replicaInfoMap.get(key);
if (waitableInMap == newWaitable) replicaInfoMap.remove(key);
if (info.getInvalidTokenException() != null) {
LOG.info(this + ": could not load " + key + " due to InvalidToken " +
"exception.", info.getInvalidTokenException());
} else {
LOG.warn(this + ": failed to load " + key);
}
}
newWaitable.provide(info);
} finally {
lock.unlock();
}
return info;
}
private void startCacheCleanerThreadIfNeeded() {
if (cacheCleaner == null) {
cacheCleaner = new CacheCleaner();
long rateMs = cacheCleaner.getRateInMs();
ScheduledFuture<?> future =
cleanerExecutor.scheduleAtFixedRate(cacheCleaner, rateMs, rateMs,
TimeUnit.MILLISECONDS);
cacheCleaner.setFuture(future);
if (LOG.isDebugEnabled()) {
LOG.debug(this + ": starting cache cleaner thread which will run " +
"every " + rateMs + " ms");
}
}
}
ClientMmap getOrCreateClientMmap(ShortCircuitReplica replica,
boolean anchored) {
Condition newCond;
lock.lock();
try {
while (replica.mmapData != null) {
if (replica.mmapData instanceof MappedByteBuffer) {
ref(replica);
MappedByteBuffer mmap = (MappedByteBuffer)replica.mmapData;
return new ClientMmap(replica, mmap, anchored);
} else if (replica.mmapData instanceof Long) {
long lastAttemptTimeMs = (Long)replica.mmapData;
long delta = Time.monotonicNow() - lastAttemptTimeMs;
if (delta < mmapRetryTimeoutMs) {
if (LOG.isTraceEnabled()) {
LOG.trace(this + ": can't create client mmap for " +
replica + " because we failed to " +
"create one just " + delta + "ms ago.");
}
return null;
}
if (LOG.isTraceEnabled()) {
LOG.trace(this + ": retrying client mmap for " + replica +
", " + delta + " ms after the previous failure.");
}
} else if (replica.mmapData instanceof Condition) {
Condition cond = (Condition)replica.mmapData;
cond.awaitUninterruptibly();
} else {
Preconditions.checkState(false, "invalid mmapData type %s",
replica.mmapData.getClass().getName());
}
}
newCond = lock.newCondition();
replica.mmapData = newCond;
} finally {
lock.unlock();
}
MappedByteBuffer map = replica.loadMmapInternal();
lock.lock();
try {
if (map == null) {
replica.mmapData = Long.valueOf(Time.monotonicNow());
newCond.signalAll();
return null;
} else {
outstandingMmapCount++;
replica.mmapData = map;
ref(replica);
newCond.signalAll();
return new ClientMmap(replica, map, anchored);
}
} finally {
lock.unlock();
}
}
/**
* Close the cache and free all associated resources.
*/
@Override
public void close() {
try {
lock.lock();
if (closed) return;
closed = true;
LOG.info(this + ": closing");
maxNonMmappedEvictableLifespanMs = 0;
maxEvictableMmapedSize = 0;
// Close and join cacheCleaner thread.
IOUtils.cleanup(LOG, cacheCleaner);
// Purge all replicas.
while (true) {
Entry<Long, ShortCircuitReplica> entry = evictable.firstEntry();
if (entry == null) break;
purge(entry.getValue());
}
while (true) {
Entry<Long, ShortCircuitReplica> entry = evictableMmapped.firstEntry();
if (entry == null) break;
purge(entry.getValue());
}
} finally {
lock.unlock();
}
releaserExecutor.shutdown();
cleanerExecutor.shutdown();
// wait for existing tasks to terminate
try {
if (!releaserExecutor.awaitTermination(30, TimeUnit.SECONDS)) {
LOG.error("Forcing SlotReleaserThreadPool to shutdown!");
releaserExecutor.shutdownNow();
}
} catch (InterruptedException e) {
releaserExecutor.shutdownNow();
Thread.currentThread().interrupt();
LOG.error("Interrupted while waiting for SlotReleaserThreadPool "
+ "to terminate", e);
}
// wait for existing tasks to terminate
try {
if (!cleanerExecutor.awaitTermination(30, TimeUnit.SECONDS)) {
LOG.error("Forcing CleanerThreadPool to shutdown!");
cleanerExecutor.shutdownNow();
}
} catch (InterruptedException e) {
cleanerExecutor.shutdownNow();
Thread.currentThread().interrupt();
LOG.error("Interrupted while waiting for CleanerThreadPool "
+ "to terminate", e);
}
IOUtils.cleanup(LOG, shmManager);
}
@VisibleForTesting // ONLY for testing
public interface CacheVisitor {
void visit(int numOutstandingMmaps,
Map<ExtendedBlockId, ShortCircuitReplica> replicas,
Map<ExtendedBlockId, InvalidToken> failedLoads,
Map<Long, ShortCircuitReplica> evictable,
Map<Long, ShortCircuitReplica> evictableMmapped);
}
@VisibleForTesting // ONLY for testing
public void accept(CacheVisitor visitor) {
lock.lock();
try {
Map<ExtendedBlockId, ShortCircuitReplica> replicas =
new HashMap<ExtendedBlockId, ShortCircuitReplica>();
Map<ExtendedBlockId, InvalidToken> failedLoads =
new HashMap<ExtendedBlockId, InvalidToken>();
for (Entry<ExtendedBlockId, Waitable<ShortCircuitReplicaInfo>> entry :
replicaInfoMap.entrySet()) {
Waitable<ShortCircuitReplicaInfo> waitable = entry.getValue();
if (waitable.hasVal()) {
if (waitable.getVal().getReplica() != null) {
replicas.put(entry.getKey(), waitable.getVal().getReplica());
} else {
// The exception may be null here, indicating a failed load that
// isn't the result of an invalid block token.
failedLoads.put(entry.getKey(),
waitable.getVal().getInvalidTokenException());
}
}
}
if (LOG.isDebugEnabled()) {
StringBuilder builder = new StringBuilder();
builder.append("visiting ").append(visitor.getClass().getName()).
append("with outstandingMmapCount=").append(outstandingMmapCount).
append(", replicas=");
String prefix = "";
for (Entry<ExtendedBlockId, ShortCircuitReplica> entry : replicas.entrySet()) {
builder.append(prefix).append(entry.getValue());
prefix = ",";
}
prefix = "";
builder.append(", failedLoads=");
for (Entry<ExtendedBlockId, InvalidToken> entry : failedLoads.entrySet()) {
builder.append(prefix).append(entry.getValue());
prefix = ",";
}
prefix = "";
builder.append(", evictable=");
for (Entry<Long, ShortCircuitReplica> entry : evictable.entrySet()) {
builder.append(prefix).append(entry.getKey()).
append(":").append(entry.getValue());
prefix = ",";
}
prefix = "";
builder.append(", evictableMmapped=");
for (Entry<Long, ShortCircuitReplica> entry : evictableMmapped.entrySet()) {
builder.append(prefix).append(entry.getKey()).
append(":").append(entry.getValue());
prefix = ",";
}
LOG.debug(builder.toString());
}
visitor.visit(outstandingMmapCount, replicas, failedLoads,
evictable, evictableMmapped);
} finally {
lock.unlock();
}
}
@Override
public String toString() {
return "ShortCircuitCache(0x" +
Integer.toHexString(System.identityHashCode(this)) + ")";
}
/**
* Allocate a new shared memory slot.
*
* @param datanode The datanode to allocate a shm slot with.
* @param peer A peer connected to the datanode.
* @param usedPeer Will be set to true if we use up the provided peer.
* @param blockId The block id and block pool id of the block we're
* allocating this slot for.
* @param clientName The name of the DFSClient allocating the shared
* memory.
* @return Null if short-circuit shared memory is disabled;
* a short-circuit memory slot otherwise.
* @throws IOException An exception if there was an error talking to
* the datanode.
*/
public Slot allocShmSlot(DatanodeInfo datanode,
DomainPeer peer, MutableBoolean usedPeer,
ExtendedBlockId blockId, String clientName) throws IOException {
if (shmManager != null) {
return shmManager.allocSlot(datanode, peer, usedPeer,
blockId, clientName);
} else {
return null;
}
}
/**
* Free a slot immediately.
*
* ONLY use this if the DataNode is not yet aware of the slot.
*
* @param slot The slot to free.
*/
public void freeSlot(Slot slot) {
Preconditions.checkState(shmManager != null);
slot.makeInvalid();
shmManager.freeSlot(slot);
}
/**
* Schedule a shared memory slot to be released.
*
* @param slot The slot to release.
*/
public void scheduleSlotReleaser(Slot slot) {
Preconditions.checkState(shmManager != null);
releaserExecutor.execute(new SlotReleaser(slot));
}
@VisibleForTesting
public DfsClientShmManager getDfsClientShmManager() {
return shmManager;
}
}
| 37,961 | 34.511693 | 103 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.shortcircuit;
import java.io.BufferedOutputStream;
import java.io.Closeable;
import java.io.DataOutputStream;
import java.io.EOFException;
import java.io.FileInputStream;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map.Entry;
import java.util.TreeMap;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.ReentrantLock;
import org.apache.commons.lang.mutable.MutableBoolean;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.ExtendedBlockId;
import org.apache.hadoop.hdfs.net.DomainPeer;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto;
import org.apache.hadoop.hdfs.protocolPB.PBHelper;
import org.apache.hadoop.hdfs.server.datanode.ShortCircuitRegistry;
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.ShmId;
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.Slot;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.unix.DomainSocket;
import org.apache.hadoop.net.unix.DomainSocketWatcher;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
/**
* Manages short-circuit memory segments for an HDFS client.
*
* Clients are responsible for requesting and releasing shared memory segments used
* for communicating with the DataNode. The client will try to allocate new slots
* in the set of existing segments, falling back to getting a new segment from the
* DataNode via {@link DataTransferProtocol#requestShortCircuitFds}.
*
* The counterpart to this class on the DataNode is {@link ShortCircuitRegistry}.
* See {@link ShortCircuitRegistry} for more information on the communication protocol.
*/
@InterfaceAudience.Private
public class DfsClientShmManager implements Closeable {
private static final Log LOG = LogFactory.getLog(DfsClientShmManager.class);
/**
* Manages short-circuit memory segments that pertain to a given DataNode.
*/
class EndpointShmManager {
/**
* The datanode we're managing.
*/
private final DatanodeInfo datanode;
/**
* Shared memory segments which have no empty slots.
*
* Protected by the manager lock.
*/
private final TreeMap<ShmId, DfsClientShm> full =
new TreeMap<ShmId, DfsClientShm>();
/**
* Shared memory segments which have at least one empty slot.
*
* Protected by the manager lock.
*/
private final TreeMap<ShmId, DfsClientShm> notFull =
new TreeMap<ShmId, DfsClientShm>();
/**
* True if this datanode doesn't support short-circuit shared memory
* segments.
*
* Protected by the manager lock.
*/
private boolean disabled = false;
/**
* True if we're in the process of loading a shared memory segment from
* this DataNode.
*
* Protected by the manager lock.
*/
private boolean loading = false;
EndpointShmManager (DatanodeInfo datanode) {
this.datanode = datanode;
}
/**
* Pull a slot out of a preexisting shared memory segment.
*
* Must be called with the manager lock held.
*
* @param blockId The blockId to put inside the Slot object.
*
* @return null if none of our shared memory segments contain a
* free slot; the slot object otherwise.
*/
private Slot allocSlotFromExistingShm(ExtendedBlockId blockId) {
if (notFull.isEmpty()) {
return null;
}
Entry<ShmId, DfsClientShm> entry = notFull.firstEntry();
DfsClientShm shm = entry.getValue();
ShmId shmId = shm.getShmId();
Slot slot = shm.allocAndRegisterSlot(blockId);
if (shm.isFull()) {
if (LOG.isTraceEnabled()) {
LOG.trace(this + ": pulled the last slot " + slot.getSlotIdx() +
" out of " + shm);
}
DfsClientShm removedShm = notFull.remove(shmId);
Preconditions.checkState(removedShm == shm);
full.put(shmId, shm);
} else {
if (LOG.isTraceEnabled()) {
LOG.trace(this + ": pulled slot " + slot.getSlotIdx() +
" out of " + shm);
}
}
return slot;
}
/**
* Ask the DataNode for a new shared memory segment. This function must be
* called with the manager lock held. We will release the lock while
* communicating with the DataNode.
*
* @param clientName The current client name.
* @param peer The peer to use to talk to the DataNode.
*
* @return Null if the DataNode does not support shared memory
* segments, or experienced an error creating the
* shm. The shared memory segment itself on success.
* @throws IOException If there was an error communicating over the socket.
* We will not throw an IOException unless the socket
* itself (or the network) is the problem.
*/
private DfsClientShm requestNewShm(String clientName, DomainPeer peer)
throws IOException {
final DataOutputStream out =
new DataOutputStream(
new BufferedOutputStream(peer.getOutputStream()));
new Sender(out).requestShortCircuitShm(clientName);
ShortCircuitShmResponseProto resp =
ShortCircuitShmResponseProto.parseFrom(
PBHelper.vintPrefixed(peer.getInputStream()));
String error = resp.hasError() ? resp.getError() : "(unknown)";
switch (resp.getStatus()) {
case SUCCESS:
DomainSocket sock = peer.getDomainSocket();
byte buf[] = new byte[1];
FileInputStream fis[] = new FileInputStream[1];
if (sock.recvFileInputStreams(fis, buf, 0, buf.length) < 0) {
throw new EOFException("got EOF while trying to transfer the " +
"file descriptor for the shared memory segment.");
}
if (fis[0] == null) {
throw new IOException("the datanode " + datanode + " failed to " +
"pass a file descriptor for the shared memory segment.");
}
try {
DfsClientShm shm =
new DfsClientShm(PBHelper.convert(resp.getId()),
fis[0], this, peer);
if (LOG.isTraceEnabled()) {
LOG.trace(this + ": createNewShm: created " + shm);
}
return shm;
} finally {
IOUtils.cleanup(LOG, fis[0]);
}
case ERROR_UNSUPPORTED:
// The DataNode just does not support short-circuit shared memory
// access, and we should stop asking.
LOG.info(this + ": datanode does not support short-circuit " +
"shared memory access: " + error);
disabled = true;
return null;
default:
// The datanode experienced some kind of unexpected error when trying to
// create the short-circuit shared memory segment.
LOG.warn(this + ": error requesting short-circuit shared memory " +
"access: " + error);
return null;
}
}
/**
* Allocate a new shared memory slot connected to this datanode.
*
* Must be called with the EndpointShmManager lock held.
*
* @param peer The peer to use to talk to the DataNode.
* @param usedPeer (out param) Will be set to true if we used the peer.
* When a peer is used
*
* @param clientName The client name.
* @param blockId The block ID to use.
* @return null if the DataNode does not support shared memory
* segments, or experienced an error creating the
* shm. The shared memory segment itself on success.
* @throws IOException If there was an error communicating over the socket.
*/
Slot allocSlot(DomainPeer peer, MutableBoolean usedPeer,
String clientName, ExtendedBlockId blockId) throws IOException {
while (true) {
if (closed) {
if (LOG.isTraceEnabled()) {
LOG.trace(this + ": the DfsClientShmManager has been closed.");
}
return null;
}
if (disabled) {
if (LOG.isTraceEnabled()) {
LOG.trace(this + ": shared memory segment access is disabled.");
}
return null;
}
// Try to use an existing slot.
Slot slot = allocSlotFromExistingShm(blockId);
if (slot != null) {
return slot;
}
// There are no free slots. If someone is loading more slots, wait
// for that to finish.
if (loading) {
if (LOG.isTraceEnabled()) {
LOG.trace(this + ": waiting for loading to finish...");
}
finishedLoading.awaitUninterruptibly();
} else {
// Otherwise, load the slot ourselves.
loading = true;
lock.unlock();
DfsClientShm shm;
try {
shm = requestNewShm(clientName, peer);
if (shm == null) continue;
// See #{DfsClientShmManager#domainSocketWatcher} for details
// about why we do this before retaking the manager lock.
domainSocketWatcher.add(peer.getDomainSocket(), shm);
// The DomainPeer is now our responsibility, and should not be
// closed by the caller.
usedPeer.setValue(true);
} finally {
lock.lock();
loading = false;
finishedLoading.signalAll();
}
if (shm.isDisconnected()) {
// If the peer closed immediately after the shared memory segment
// was created, the DomainSocketWatcher callback might already have
// fired and marked the shm as disconnected. In this case, we
// obviously don't want to add the SharedMemorySegment to our list
// of valid not-full segments.
if (LOG.isDebugEnabled()) {
LOG.debug(this + ": the UNIX domain socket associated with " +
"this short-circuit memory closed before we could make " +
"use of the shm.");
}
} else {
notFull.put(shm.getShmId(), shm);
}
}
}
}
/**
* Stop tracking a slot.
*
* Must be called with the EndpointShmManager lock held.
*
* @param slot The slot to release.
*/
void freeSlot(Slot slot) {
DfsClientShm shm = (DfsClientShm)slot.getShm();
shm.unregisterSlot(slot.getSlotIdx());
if (shm.isDisconnected()) {
// Stale shared memory segments should not be tracked here.
Preconditions.checkState(!full.containsKey(shm.getShmId()));
Preconditions.checkState(!notFull.containsKey(shm.getShmId()));
if (shm.isEmpty()) {
if (LOG.isTraceEnabled()) {
LOG.trace(this + ": freeing empty stale " + shm);
}
shm.free();
}
} else {
ShmId shmId = shm.getShmId();
full.remove(shmId); // The shm can't be full if we just freed a slot.
if (shm.isEmpty()) {
notFull.remove(shmId);
// If the shared memory segment is now empty, we call shutdown(2) on
// the UNIX domain socket associated with it. The DomainSocketWatcher,
// which is watching this socket, will call DfsClientShm#handle,
// cleaning up this shared memory segment.
//
// See #{DfsClientShmManager#domainSocketWatcher} for details about why
// we don't want to call DomainSocketWatcher#remove directly here.
//
// Note that we could experience 'fragmentation' here, where the
// DFSClient allocates a bunch of slots in different shared memory
// segments, and then frees most of them, but never fully empties out
// any segment. We make some attempt to avoid this fragmentation by
// always allocating new slots out of the shared memory segment with the
// lowest ID, but it could still occur. In most workloads,
// fragmentation should not be a major concern, since it doesn't impact
// peak file descriptor usage or the speed of allocation.
if (LOG.isTraceEnabled()) {
LOG.trace(this + ": shutting down UNIX domain socket for " +
"empty " + shm);
}
shutdown(shm);
} else {
notFull.put(shmId, shm);
}
}
}
/**
* Unregister a shared memory segment.
*
* Once a segment is unregistered, we will not allocate any more slots
* inside that segment.
*
* The DomainSocketWatcher calls this while holding the DomainSocketWatcher
* lock.
*
* @param shmId The ID of the shared memory segment to unregister.
*/
void unregisterShm(ShmId shmId) {
lock.lock();
try {
full.remove(shmId);
notFull.remove(shmId);
} finally {
lock.unlock();
}
}
@Override
public String toString() {
return String.format("EndpointShmManager(%s, parent=%s)",
datanode, DfsClientShmManager.this);
}
PerDatanodeVisitorInfo getVisitorInfo() {
return new PerDatanodeVisitorInfo(full, notFull, disabled);
}
final void shutdown(DfsClientShm shm) {
try {
shm.getPeer().getDomainSocket().shutdown();
} catch (IOException e) {
LOG.warn(this + ": error shutting down shm: got IOException calling " +
"shutdown(SHUT_RDWR)", e);
}
}
}
private boolean closed = false;
private final ReentrantLock lock = new ReentrantLock();
/**
* A condition variable which is signalled when we finish loading a segment
* from the Datanode.
*/
private final Condition finishedLoading = lock.newCondition();
/**
* Information about each Datanode.
*/
private final HashMap<DatanodeInfo, EndpointShmManager> datanodes =
new HashMap<DatanodeInfo, EndpointShmManager>(1);
/**
* The DomainSocketWatcher which keeps track of the UNIX domain socket
* associated with each shared memory segment.
*
* Note: because the DomainSocketWatcher makes callbacks into this
* DfsClientShmManager object, you must MUST NOT attempt to take the
* DomainSocketWatcher lock while holding the DfsClientShmManager lock,
* or else deadlock might result. This means that most DomainSocketWatcher
* methods are off-limits unless you release the manager lock first.
*/
private final DomainSocketWatcher domainSocketWatcher;
DfsClientShmManager(int interruptCheckPeriodMs) throws IOException {
this.domainSocketWatcher = new DomainSocketWatcher(interruptCheckPeriodMs,
"client");
}
public Slot allocSlot(DatanodeInfo datanode, DomainPeer peer,
MutableBoolean usedPeer, ExtendedBlockId blockId,
String clientName) throws IOException {
lock.lock();
try {
if (closed) {
LOG.trace(this + ": the DfsClientShmManager isclosed.");
return null;
}
EndpointShmManager shmManager = datanodes.get(datanode);
if (shmManager == null) {
shmManager = new EndpointShmManager(datanode);
datanodes.put(datanode, shmManager);
}
return shmManager.allocSlot(peer, usedPeer, clientName, blockId);
} finally {
lock.unlock();
}
}
public void freeSlot(Slot slot) {
lock.lock();
try {
DfsClientShm shm = (DfsClientShm)slot.getShm();
shm.getEndpointShmManager().freeSlot(slot);
} finally {
lock.unlock();
}
}
@VisibleForTesting
public static class PerDatanodeVisitorInfo {
public final TreeMap<ShmId, DfsClientShm> full;
public final TreeMap<ShmId, DfsClientShm> notFull;
public final boolean disabled;
PerDatanodeVisitorInfo(TreeMap<ShmId, DfsClientShm> full,
TreeMap<ShmId, DfsClientShm> notFull, boolean disabled) {
this.full = full;
this.notFull = notFull;
this.disabled = disabled;
}
}
@VisibleForTesting
public interface Visitor {
void visit(HashMap<DatanodeInfo, PerDatanodeVisitorInfo> info)
throws IOException;
}
@VisibleForTesting
public void visit(Visitor visitor) throws IOException {
lock.lock();
try {
HashMap<DatanodeInfo, PerDatanodeVisitorInfo> info =
new HashMap<DatanodeInfo, PerDatanodeVisitorInfo>();
for (Entry<DatanodeInfo, EndpointShmManager> entry :
datanodes.entrySet()) {
info.put(entry.getKey(), entry.getValue().getVisitorInfo());
}
visitor.visit(info);
} finally {
lock.unlock();
}
}
/**
* Close the DfsClientShmManager.
*/
@Override
public void close() throws IOException {
lock.lock();
try {
if (closed) return;
closed = true;
} finally {
lock.unlock();
}
// When closed, the domainSocketWatcher will issue callbacks that mark
// all the outstanding DfsClientShm segments as stale.
IOUtils.cleanup(LOG, domainSocketWatcher);
}
@Override
public String toString() {
return String.format("ShortCircuitShmManager(%08x)",
System.identityHashCode(this));
}
@VisibleForTesting
public DomainSocketWatcher getDomainSocketWatcher() {
return domainSocketWatcher;
}
}
| 18,613 | 35.143689 | 93 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/RwLock.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.util;
/** Read-write lock interface. */
public interface RwLock {
/** Acquire read lock. */
public void readLock();
/** Release read lock. */
public void readUnlock();
/** Check if the current thread holds read lock. */
public boolean hasReadLock();
/** Acquire write lock. */
public void writeLock();
/** Acquire write lock, unless interrupted while waiting */
void writeLockInterruptibly() throws InterruptedException;
/** Release write lock. */
public void writeUnlock();
/** Check if the current thread holds write lock. */
public boolean hasWriteLock();
}
| 1,439 | 32.488372 | 75 |
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/ByteArrayManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.util;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.Map;
import java.util.Queue;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.util.Time;
import com.google.common.base.Preconditions;
/**
* Manage byte array creation and release.
*/
@InterfaceAudience.Private
public abstract class ByteArrayManager {
static final Log LOG = LogFactory.getLog(ByteArrayManager.class);
private static final ThreadLocal<StringBuilder> debugMessage = new ThreadLocal<StringBuilder>() {
protected StringBuilder initialValue() {
return new StringBuilder();
}
};
private static void logDebugMessage() {
final StringBuilder b = debugMessage.get();
LOG.debug(b);
b.setLength(0);
}
static final int MIN_ARRAY_LENGTH = 32;
static final byte[] EMPTY_BYTE_ARRAY = {};
/**
* @return the least power of two greater than or equal to n, i.e. return
* the least integer x with x >= n and x a power of two.
*
* @throws HadoopIllegalArgumentException
* if n <= 0.
*/
public static int leastPowerOfTwo(final int n) {
if (n <= 0) {
throw new HadoopIllegalArgumentException("n = " + n + " <= 0");
}
final int highestOne = Integer.highestOneBit(n);
if (highestOne == n) {
return n; // n is a power of two.
}
final int roundUp = highestOne << 1;
if (roundUp < 0) {
final long overflow = ((long) highestOne) << 1;
throw new ArithmeticException(
"Overflow: for n = " + n + ", the least power of two (the least"
+ " integer x with x >= n and x a power of two) = "
+ overflow + " > Integer.MAX_VALUE = " + Integer.MAX_VALUE);
}
return roundUp;
}
/**
* A counter with a time stamp so that it is reset automatically
* if there is no increment for the time period.
*/
static class Counter {
private final long countResetTimePeriodMs;
private long count = 0L;
private long timestamp = Time.monotonicNow();
Counter(long countResetTimePeriodMs) {
this.countResetTimePeriodMs = countResetTimePeriodMs;
}
synchronized long getCount() {
return count;
}
/**
* Increment the counter, and reset it if there is no increment
* for acertain time period.
*
* @return the new count.
*/
synchronized long increment() {
final long now = Time.monotonicNow();
if (now - timestamp > countResetTimePeriodMs) {
count = 0; // reset the counter
}
timestamp = now;
return ++count;
}
}
/** A map from integers to counters. */
static class CounterMap {
/** @see ByteArrayManager.Conf#countResetTimePeriodMs */
private final long countResetTimePeriodMs;
private final Map<Integer, Counter> map = new HashMap<Integer, Counter>();
private CounterMap(long countResetTimePeriodMs) {
this.countResetTimePeriodMs = countResetTimePeriodMs;
}
/**
* @return the counter for the given key;
* and create a new counter if it does not exist.
*/
synchronized Counter get(final Integer key, final boolean createIfNotExist) {
Counter count = map.get(key);
if (count == null && createIfNotExist) {
count = new Counter(countResetTimePeriodMs);
map.put(key, count);
}
return count;
}
synchronized void clear() {
map.clear();
}
}
/** Manage byte arrays with the same fixed length. */
static class FixedLengthManager {
private final int byteArrayLength;
private final int maxAllocated;
private final Queue<byte[]> freeQueue = new LinkedList<byte[]>();
private int numAllocated = 0;
FixedLengthManager(int arrayLength, int maxAllocated) {
this.byteArrayLength = arrayLength;
this.maxAllocated = maxAllocated;
}
/**
* Allocate a byte array.
*
* If the number of allocated arrays >= maximum, the current thread is
* blocked until the number of allocated arrays drops to below the maximum.
*
* The byte array allocated by this method must be returned for recycling
* via the {@link FixedLengthManager#recycle(byte[])} method.
*/
synchronized byte[] allocate() throws InterruptedException {
if (LOG.isDebugEnabled()) {
debugMessage.get().append(", ").append(this);
}
for(; numAllocated >= maxAllocated;) {
if (LOG.isDebugEnabled()) {
debugMessage.get().append(": wait ...");
logDebugMessage();
}
wait();
if (LOG.isDebugEnabled()) {
debugMessage.get().append("wake up: ").append(this);
}
}
numAllocated++;
final byte[] array = freeQueue.poll();
if (LOG.isDebugEnabled()) {
debugMessage.get().append(", recycled? ").append(array != null);
}
return array != null? array : new byte[byteArrayLength];
}
/**
* Recycle the given byte array, which must have the same length as the
* array length managed by this object.
*
* The byte array may or may not be allocated
* by the {@link FixedLengthManager#allocate()} method.
*/
synchronized int recycle(byte[] array) {
Preconditions.checkNotNull(array);
Preconditions.checkArgument(array.length == byteArrayLength);
if (LOG.isDebugEnabled()) {
debugMessage.get().append(", ").append(this);
}
notify();
numAllocated--;
if (numAllocated < 0) {
// it is possible to drop below 0 since
// some byte arrays may not be created by the allocate() method.
numAllocated = 0;
}
if (freeQueue.size() < maxAllocated - numAllocated) {
if (LOG.isDebugEnabled()) {
debugMessage.get().append(", freeQueue.offer");
}
freeQueue.offer(array);
}
return freeQueue.size();
}
@Override
public synchronized String toString() {
return "[" + byteArrayLength + ": " + numAllocated + "/"
+ maxAllocated + ", free=" + freeQueue.size() + "]";
}
}
/** A map from array lengths to byte array managers. */
static class ManagerMap {
private final int countLimit;
private final Map<Integer, FixedLengthManager> map = new HashMap<Integer, FixedLengthManager>();
ManagerMap(int countLimit) {
this.countLimit = countLimit;
}
/** @return the manager for the given array length. */
synchronized FixedLengthManager get(final Integer arrayLength,
final boolean createIfNotExist) {
FixedLengthManager manager = map.get(arrayLength);
if (manager == null && createIfNotExist) {
manager = new FixedLengthManager(arrayLength, countLimit);
map.put(arrayLength, manager);
}
return manager;
}
synchronized void clear() {
map.clear();
}
}
public static class Conf {
/**
* The count threshold for each array length so that a manager is created
* only after the allocation count exceeds the threshold.
*/
private final int countThreshold;
/**
* The maximum number of arrays allowed for each array length.
*/
private final int countLimit;
/**
* The time period in milliseconds that the allocation count for each array
* length is reset to zero if there is no increment.
*/
private final long countResetTimePeriodMs;
public Conf(int countThreshold, int countLimit, long countResetTimePeriodMs) {
this.countThreshold = countThreshold;
this.countLimit = countLimit;
this.countResetTimePeriodMs = countResetTimePeriodMs;
}
}
/**
* Create a byte array for the given length, where the length of
* the returned array is larger than or equal to the given length.
*
* The current thread may be blocked if some resource is unavailable.
*
* The byte array created by this method must be released
* via the {@link ByteArrayManager#release(byte[])} method.
*
* @return a byte array with length larger than or equal to the given length.
*/
public abstract byte[] newByteArray(int size) throws InterruptedException;
/**
* Release the given byte array.
*
* The byte array may or may not be created
* by the {@link ByteArrayManager#newByteArray(int)} method.
*
* @return the number of free array.
*/
public abstract int release(byte[] array);
public static ByteArrayManager newInstance(Conf conf) {
return conf == null? new NewByteArrayWithoutLimit(): new Impl(conf);
}
/**
* A dummy implementation which simply calls new byte[].
*/
static class NewByteArrayWithoutLimit extends ByteArrayManager {
@Override
public byte[] newByteArray(int size) throws InterruptedException {
return new byte[size];
}
@Override
public int release(byte[] array) {
return 0;
}
}
/**
* Manage byte array allocation and provide a mechanism for recycling the byte
* array objects.
*/
static class Impl extends ByteArrayManager {
private final Conf conf;
private final CounterMap counters;
private final ManagerMap managers;
Impl(Conf conf) {
this.conf = conf;
this.counters = new CounterMap(conf.countResetTimePeriodMs);
this.managers = new ManagerMap(conf.countLimit);
}
/**
* Allocate a byte array, where the length of the allocated array
* is the least power of two of the given length
* unless the given length is less than {@link #MIN_ARRAY_LENGTH}.
* In such case, the returned array length is equal to {@link #MIN_ARRAY_LENGTH}.
*
* If the number of allocated arrays exceeds the capacity,
* the current thread is blocked until
* the number of allocated arrays drops to below the capacity.
*
* The byte array allocated by this method must be returned for recycling
* via the {@link Impl#release(byte[])} method.
*
* @return a byte array with length larger than or equal to the given length.
*/
@Override
public byte[] newByteArray(final int arrayLength) throws InterruptedException {
Preconditions.checkArgument(arrayLength >= 0);
if (LOG.isDebugEnabled()) {
debugMessage.get().append("allocate(").append(arrayLength).append(")");
}
final byte[] array;
if (arrayLength == 0) {
array = EMPTY_BYTE_ARRAY;
} else {
final int powerOfTwo = arrayLength <= MIN_ARRAY_LENGTH?
MIN_ARRAY_LENGTH: leastPowerOfTwo(arrayLength);
final long count = counters.get(powerOfTwo, true).increment();
final boolean aboveThreshold = count > conf.countThreshold;
// create a new manager only if the count is above threshold.
final FixedLengthManager manager = managers.get(powerOfTwo, aboveThreshold);
if (LOG.isDebugEnabled()) {
debugMessage.get().append(": count=").append(count)
.append(aboveThreshold? ", aboveThreshold": ", belowThreshold");
}
array = manager != null? manager.allocate(): new byte[powerOfTwo];
}
if (LOG.isDebugEnabled()) {
debugMessage.get().append(", return byte[").append(array.length).append("]");
logDebugMessage();
}
return array;
}
/**
* Recycle the given byte array.
*
* The byte array may or may not be allocated
* by the {@link Impl#newByteArray(int)} method.
*
* This is a non-blocking call.
*/
@Override
public int release(final byte[] array) {
Preconditions.checkNotNull(array);
if (LOG.isDebugEnabled()) {
debugMessage.get().append("recycle: array.length=").append(array.length);
}
final int freeQueueSize;
if (array.length == 0) {
freeQueueSize = -1;
} else {
final FixedLengthManager manager = managers.get(array.length, false);
freeQueueSize = manager == null? -1: manager.recycle(array);
}
if (LOG.isDebugEnabled()) {
debugMessage.get().append(", freeQueueSize=").append(freeQueueSize);
logDebugMessage();
}
return freeQueueSize;
}
CounterMap getCounters() {
return counters;
}
ManagerMap getManagers() {
return managers;
}
}
}
| 13,311 | 30.770883 | 100 |
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.